diff options
Diffstat (limited to 'block/blk-mq-sched.c')
| -rw-r--r-- | block/blk-mq-sched.c | 58 |
1 files changed, 46 insertions, 12 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1f5b692526ae..0ded5e846335 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
| @@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q, | |||
| 68 | __blk_mq_sched_assign_ioc(q, rq, bio, ioc); | 68 | __blk_mq_sched_assign_ioc(q, rq, bio, ioc); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | /* | ||
| 72 | * Mark a hardware queue as needing a restart. For shared queues, maintain | ||
| 73 | * a count of how many hardware queues are marked for restart. | ||
| 74 | */ | ||
| 75 | static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
| 76 | { | ||
| 77 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
| 78 | return; | ||
| 79 | |||
| 80 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | ||
| 81 | struct request_queue *q = hctx->queue; | ||
| 82 | |||
| 83 | if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
| 84 | atomic_inc(&q->shared_hctx_restart); | ||
| 85 | } else | ||
| 86 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
| 87 | } | ||
| 88 | |||
| 89 | static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
| 90 | { | ||
| 91 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
| 92 | return false; | ||
| 93 | |||
| 94 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | ||
| 95 | struct request_queue *q = hctx->queue; | ||
| 96 | |||
| 97 | if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
| 98 | atomic_dec(&q->shared_hctx_restart); | ||
| 99 | } else | ||
| 100 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
| 101 | |||
| 102 | if (blk_mq_hctx_has_pending(hctx)) { | ||
| 103 | blk_mq_run_hw_queue(hctx, true); | ||
| 104 | return true; | ||
| 105 | } | ||
| 106 | |||
| 107 | return false; | ||
| 108 | } | ||
| 109 | |||
| 71 | struct request *blk_mq_sched_get_request(struct request_queue *q, | 110 | struct request *blk_mq_sched_get_request(struct request_queue *q, |
| 72 | struct bio *bio, | 111 | struct bio *bio, |
| 73 | unsigned int op, | 112 | unsigned int op, |
| @@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, | |||
| 266 | return true; | 305 | return true; |
| 267 | } | 306 | } |
| 268 | 307 | ||
| 269 | static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
| 270 | { | ||
| 271 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { | ||
| 272 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
| 273 | if (blk_mq_hctx_has_pending(hctx)) { | ||
| 274 | blk_mq_run_hw_queue(hctx, true); | ||
| 275 | return true; | ||
| 276 | } | ||
| 277 | } | ||
| 278 | return false; | ||
| 279 | } | ||
| 280 | |||
| 281 | /** | 308 | /** |
| 282 | * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list | 309 | * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list |
| 283 | * @pos: loop cursor. | 310 | * @pos: loop cursor. |
| @@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) | |||
| 309 | unsigned int i, j; | 336 | unsigned int i, j; |
| 310 | 337 | ||
| 311 | if (set->flags & BLK_MQ_F_TAG_SHARED) { | 338 | if (set->flags & BLK_MQ_F_TAG_SHARED) { |
| 339 | /* | ||
| 340 | * If this is 0, then we know that no hardware queues | ||
| 341 | * have RESTART marked. We're done. | ||
| 342 | */ | ||
| 343 | if (!atomic_read(&queue->shared_hctx_restart)) | ||
| 344 | return; | ||
| 345 | |||
| 312 | rcu_read_lock(); | 346 | rcu_read_lock(); |
| 313 | list_for_each_entry_rcu_rr(q, queue, &set->tag_list, | 347 | list_for_each_entry_rcu_rr(q, queue, &set->tag_list, |
| 314 | tag_set_list) { | 348 | tag_set_list) { |
