diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 14 | ||||
-rw-r--r-- | block/blk-mq-tag.h | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 75 | ||||
-rw-r--r-- | block/blk-mq.h | 1 | ||||
-rw-r--r-- | block/blk-timeout.c | 3 |
6 files changed, 104 insertions, 11 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153a40c2..3ad405571dcc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) | |||
473 | } | 473 | } |
474 | EXPORT_SYMBOL_GPL(blk_queue_bypass_end); | 474 | EXPORT_SYMBOL_GPL(blk_queue_bypass_end); |
475 | 475 | ||
476 | void blk_set_queue_dying(struct request_queue *q) | ||
477 | { | ||
478 | queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); | ||
479 | |||
480 | if (q->mq_ops) | ||
481 | blk_mq_wake_waiters(q); | ||
482 | else { | ||
483 | struct request_list *rl; | ||
484 | |||
485 | blk_queue_for_each_rl(rl, q) { | ||
486 | if (rl->rq_pool) { | ||
487 | wake_up(&rl->wait[BLK_RW_SYNC]); | ||
488 | wake_up(&rl->wait[BLK_RW_ASYNC]); | ||
489 | } | ||
490 | } | ||
491 | } | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(blk_set_queue_dying); | ||
494 | |||
476 | /** | 495 | /** |
477 | * blk_cleanup_queue - shutdown a request queue | 496 | * blk_cleanup_queue - shutdown a request queue |
478 | * @q: request queue to shutdown | 497 | * @q: request queue to shutdown |
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) | |||
486 | 505 | ||
487 | /* mark @q DYING, no new request or merges will be allowed afterwards */ | 506 | /* mark @q DYING, no new request or merges will be allowed afterwards */ |
488 | mutex_lock(&q->sysfs_lock); | 507 | mutex_lock(&q->sysfs_lock); |
489 | queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); | 508 | blk_set_queue_dying(q); |
490 | spin_lock_irq(lock); | 509 | spin_lock_irq(lock); |
491 | 510 | ||
492 | /* | 511 | /* |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32e8dbb9ad1c..60c9d4a93fe4 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Wakeup all potentially sleeping on normal (non-reserved) tags | 71 | * Wakeup all potentially sleeping on tags |
72 | */ | 72 | */ |
73 | static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) | 73 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
74 | { | 74 | { |
75 | struct blk_mq_bitmap_tags *bt; | 75 | struct blk_mq_bitmap_tags *bt; |
76 | int i, wake_index; | 76 | int i, wake_index; |
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) | |||
85 | 85 | ||
86 | wake_index = bt_index_inc(wake_index); | 86 | wake_index = bt_index_inc(wake_index); |
87 | } | 87 | } |
88 | |||
89 | if (include_reserve) { | ||
90 | bt = &tags->breserved_tags; | ||
91 | if (waitqueue_active(&bt->bs[0].wait)) | ||
92 | wake_up(&bt->bs[0].wait); | ||
93 | } | ||
88 | } | 94 | } |
89 | 95 | ||
90 | /* | 96 | /* |
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |||
100 | 106 | ||
101 | atomic_dec(&tags->active_queues); | 107 | atomic_dec(&tags->active_queues); |
102 | 108 | ||
103 | blk_mq_tag_wakeup_all(tags); | 109 | blk_mq_tag_wakeup_all(tags, false); |
104 | } | 110 | } |
105 | 111 | ||
106 | /* | 112 | /* |
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) | |||
584 | * static and should never need resizing. | 590 | * static and should never need resizing. |
585 | */ | 591 | */ |
586 | bt_update_count(&tags->bitmap_tags, tdepth); | 592 | bt_update_count(&tags->bitmap_tags, tdepth); |
587 | blk_mq_tag_wakeup_all(tags); | 593 | blk_mq_tag_wakeup_all(tags, false); |
588 | return 0; | 594 | return 0; |
589 | } | 595 | } |
590 | 596 | ||
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 6206ed17ef76..a6fa0fc9d41a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h | |||
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); | |||
54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); | 54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); |
55 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); | 55 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); |
56 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); | 56 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); |
57 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); | ||
57 | 58 | ||
58 | enum { | 59 | enum { |
59 | BLK_MQ_TAG_CACHE_MIN = 1, | 60 | BLK_MQ_TAG_CACHE_MIN = 1, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index da1ab5641227..2f95747c287e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) | |||
107 | wake_up_all(&q->mq_freeze_wq); | 107 | wake_up_all(&q->mq_freeze_wq); |
108 | } | 108 | } |
109 | 109 | ||
110 | static void blk_mq_freeze_queue_start(struct request_queue *q) | 110 | void blk_mq_freeze_queue_start(struct request_queue *q) |
111 | { | 111 | { |
112 | bool freeze; | 112 | bool freeze; |
113 | 113 | ||
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q) | |||
120 | blk_mq_run_queues(q, false); | 120 | blk_mq_run_queues(q, false); |
121 | } | 121 | } |
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); | ||
123 | 124 | ||
124 | static void blk_mq_freeze_queue_wait(struct request_queue *q) | 125 | static void blk_mq_freeze_queue_wait(struct request_queue *q) |
125 | { | 126 | { |
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q) | |||
136 | blk_mq_freeze_queue_wait(q); | 137 | blk_mq_freeze_queue_wait(q); |
137 | } | 138 | } |
138 | 139 | ||
139 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 140 | void blk_mq_unfreeze_queue(struct request_queue *q) |
140 | { | 141 | { |
141 | bool wake; | 142 | bool wake; |
142 | 143 | ||
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q) | |||
149 | wake_up_all(&q->mq_freeze_wq); | 150 | wake_up_all(&q->mq_freeze_wq); |
150 | } | 151 | } |
151 | } | 152 | } |
153 | EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); | ||
154 | |||
155 | void blk_mq_wake_waiters(struct request_queue *q) | ||
156 | { | ||
157 | struct blk_mq_hw_ctx *hctx; | ||
158 | unsigned int i; | ||
159 | |||
160 | queue_for_each_hw_ctx(q, hctx, i) | ||
161 | if (blk_mq_hw_queue_mapped(hctx)) | ||
162 | blk_mq_tag_wakeup_all(hctx->tags, true); | ||
163 | |||
164 | /* | ||
165 | * If we are called because the queue has now been marked as | ||
166 | * dying, we need to ensure that processes currently waiting on | ||
167 | * the queue are notified as well. | ||
168 | */ | ||
169 | wake_up_all(&q->mq_freeze_wq); | ||
170 | } | ||
152 | 171 | ||
153 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | 172 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) |
154 | { | 173 | { |
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, | |||
258 | ctx = alloc_data.ctx; | 277 | ctx = alloc_data.ctx; |
259 | } | 278 | } |
260 | blk_mq_put_ctx(ctx); | 279 | blk_mq_put_ctx(ctx); |
261 | if (!rq) | 280 | if (!rq) { |
281 | blk_mq_queue_exit(q); | ||
262 | return ERR_PTR(-EWOULDBLOCK); | 282 | return ERR_PTR(-EWOULDBLOCK); |
283 | } | ||
263 | return rq; | 284 | return rq; |
264 | } | 285 | } |
265 | EXPORT_SYMBOL(blk_mq_alloc_request); | 286 | EXPORT_SYMBOL(blk_mq_alloc_request); |
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq) | |||
383 | } | 404 | } |
384 | EXPORT_SYMBOL(blk_mq_complete_request); | 405 | EXPORT_SYMBOL(blk_mq_complete_request); |
385 | 406 | ||
407 | int blk_mq_request_started(struct request *rq) | ||
408 | { | ||
409 | return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | ||
410 | } | ||
411 | EXPORT_SYMBOL_GPL(blk_mq_request_started); | ||
412 | |||
386 | void blk_mq_start_request(struct request *rq) | 413 | void blk_mq_start_request(struct request *rq) |
387 | { | 414 | { |
388 | struct request_queue *q = rq->q; | 415 | struct request_queue *q = rq->q; |
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) | |||
500 | } | 527 | } |
501 | EXPORT_SYMBOL(blk_mq_add_to_requeue_list); | 528 | EXPORT_SYMBOL(blk_mq_add_to_requeue_list); |
502 | 529 | ||
530 | void blk_mq_cancel_requeue_work(struct request_queue *q) | ||
531 | { | ||
532 | cancel_work_sync(&q->requeue_work); | ||
533 | } | ||
534 | EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); | ||
535 | |||
503 | void blk_mq_kick_requeue_list(struct request_queue *q) | 536 | void blk_mq_kick_requeue_list(struct request_queue *q) |
504 | { | 537 | { |
505 | kblockd_schedule_work(&q->requeue_work); | 538 | kblockd_schedule_work(&q->requeue_work); |
506 | } | 539 | } |
507 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); | 540 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); |
508 | 541 | ||
542 | void blk_mq_abort_requeue_list(struct request_queue *q) | ||
543 | { | ||
544 | unsigned long flags; | ||
545 | LIST_HEAD(rq_list); | ||
546 | |||
547 | spin_lock_irqsave(&q->requeue_lock, flags); | ||
548 | list_splice_init(&q->requeue_list, &rq_list); | ||
549 | spin_unlock_irqrestore(&q->requeue_lock, flags); | ||
550 | |||
551 | while (!list_empty(&rq_list)) { | ||
552 | struct request *rq; | ||
553 | |||
554 | rq = list_first_entry(&rq_list, struct request, queuelist); | ||
555 | list_del_init(&rq->queuelist); | ||
556 | rq->errors = -EIO; | ||
557 | blk_mq_end_request(rq, rq->errors); | ||
558 | } | ||
559 | } | ||
560 | EXPORT_SYMBOL(blk_mq_abort_requeue_list); | ||
561 | |||
509 | static inline bool is_flush_request(struct request *rq, | 562 | static inline bool is_flush_request(struct request *rq, |
510 | struct blk_flush_queue *fq, unsigned int tag) | 563 | struct blk_flush_queue *fq, unsigned int tag) |
511 | { | 564 | { |
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved) | |||
566 | break; | 619 | break; |
567 | } | 620 | } |
568 | } | 621 | } |
569 | 622 | ||
570 | static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | 623 | static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, |
571 | struct request *rq, void *priv, bool reserved) | 624 | struct request *rq, void *priv, bool reserved) |
572 | { | 625 | { |
573 | struct blk_mq_timeout_data *data = priv; | 626 | struct blk_mq_timeout_data *data = priv; |
574 | 627 | ||
575 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) | 628 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { |
629 | /* | ||
630 | * If a request wasn't started before the queue was | ||
631 | * marked dying, kill it here or it'll go unnoticed. | ||
632 | */ | ||
633 | if (unlikely(blk_queue_dying(rq->q))) { | ||
634 | rq->errors = -EIO; | ||
635 | blk_mq_complete_request(rq); | ||
636 | } | ||
637 | return; | ||
638 | } | ||
639 | if (rq->cmd_flags & REQ_NO_TIMEOUT) | ||
576 | return; | 640 | return; |
577 | 641 | ||
578 | if (time_after_eq(jiffies, rq->deadline)) { | 642 | if (time_after_eq(jiffies, rq->deadline)) { |
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q, | |||
1601 | hctx->queue = q; | 1665 | hctx->queue = q; |
1602 | hctx->queue_num = hctx_idx; | 1666 | hctx->queue_num = hctx_idx; |
1603 | hctx->flags = set->flags; | 1667 | hctx->flags = set->flags; |
1604 | hctx->cmd_size = set->cmd_size; | ||
1605 | 1668 | ||
1606 | blk_mq_init_cpu_notifier(&hctx->cpu_notifier, | 1669 | blk_mq_init_cpu_notifier(&hctx->cpu_notifier, |
1607 | blk_mq_hctx_notify, hctx); | 1670 | blk_mq_hctx_notify, hctx); |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 206230e64f79..4f4f943c22c3 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); | |||
32 | void blk_mq_clone_flush_request(struct request *flush_rq, | 32 | void blk_mq_clone_flush_request(struct request *flush_rq, |
33 | struct request *orig_rq); | 33 | struct request *orig_rq); |
34 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); | 34 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
35 | void blk_mq_wake_waiters(struct request_queue *q); | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * CPU hotplug helpers | 38 | * CPU hotplug helpers |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 56c025894cdf..246dfb16c3d9 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req) | |||
190 | struct request_queue *q = req->q; | 190 | struct request_queue *q = req->q; |
191 | unsigned long expiry; | 191 | unsigned long expiry; |
192 | 192 | ||
193 | if (req->cmd_flags & REQ_NO_TIMEOUT) | ||
194 | return; | ||
195 | |||
193 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ | 196 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ |
194 | if (!q->mq_ops && !q->rq_timed_out_fn) | 197 | if (!q->mq_ops && !q->rq_timed_out_fn) |
195 | return; | 198 | return; |