diff options
-rw-r--r-- | block/blk-mq.c | 55 |
1 files changed, 16 insertions, 39 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e931a0e8e73d..729169d022fc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1036,10 +1036,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) | |||
1036 | EXPORT_SYMBOL(blk_mq_delay_queue); | 1036 | EXPORT_SYMBOL(blk_mq_delay_queue); |
1037 | 1037 | ||
1038 | static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, | 1038 | static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, |
1039 | struct blk_mq_ctx *ctx, | ||
1040 | struct request *rq, | 1039 | struct request *rq, |
1041 | bool at_head) | 1040 | bool at_head) |
1042 | { | 1041 | { |
1042 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
1043 | |||
1043 | trace_block_rq_insert(hctx->queue, rq); | 1044 | trace_block_rq_insert(hctx->queue, rq); |
1044 | 1045 | ||
1045 | if (at_head) | 1046 | if (at_head) |
@@ -1053,20 +1054,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
1053 | { | 1054 | { |
1054 | struct blk_mq_ctx *ctx = rq->mq_ctx; | 1055 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
1055 | 1056 | ||
1056 | __blk_mq_insert_req_list(hctx, ctx, rq, at_head); | 1057 | __blk_mq_insert_req_list(hctx, rq, at_head); |
1057 | blk_mq_hctx_mark_pending(hctx, ctx); | 1058 | blk_mq_hctx_mark_pending(hctx, ctx); |
1058 | } | 1059 | } |
1059 | 1060 | ||
1060 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | 1061 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, |
1061 | bool async) | 1062 | bool async) |
1062 | { | 1063 | { |
1064 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
1063 | struct request_queue *q = rq->q; | 1065 | struct request_queue *q = rq->q; |
1064 | struct blk_mq_hw_ctx *hctx; | 1066 | struct blk_mq_hw_ctx *hctx; |
1065 | struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; | ||
1066 | |||
1067 | current_ctx = blk_mq_get_ctx(q); | ||
1068 | if (!cpu_online(ctx->cpu)) | ||
1069 | rq->mq_ctx = ctx = current_ctx; | ||
1070 | 1067 | ||
1071 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1068 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1072 | 1069 | ||
@@ -1076,8 +1073,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | |||
1076 | 1073 | ||
1077 | if (run_queue) | 1074 | if (run_queue) |
1078 | blk_mq_run_hw_queue(hctx, async); | 1075 | blk_mq_run_hw_queue(hctx, async); |
1079 | |||
1080 | blk_mq_put_ctx(current_ctx); | ||
1081 | } | 1076 | } |
1082 | 1077 | ||
1083 | static void blk_mq_insert_requests(struct request_queue *q, | 1078 | static void blk_mq_insert_requests(struct request_queue *q, |
@@ -1088,14 +1083,9 @@ static void blk_mq_insert_requests(struct request_queue *q, | |||
1088 | 1083 | ||
1089 | { | 1084 | { |
1090 | struct blk_mq_hw_ctx *hctx; | 1085 | struct blk_mq_hw_ctx *hctx; |
1091 | struct blk_mq_ctx *current_ctx; | ||
1092 | 1086 | ||
1093 | trace_block_unplug(q, depth, !from_schedule); | 1087 | trace_block_unplug(q, depth, !from_schedule); |
1094 | 1088 | ||
1095 | current_ctx = blk_mq_get_ctx(q); | ||
1096 | |||
1097 | if (!cpu_online(ctx->cpu)) | ||
1098 | ctx = current_ctx; | ||
1099 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1089 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1100 | 1090 | ||
1101 | /* | 1091 | /* |
@@ -1107,15 +1097,14 @@ static void blk_mq_insert_requests(struct request_queue *q, | |||
1107 | struct request *rq; | 1097 | struct request *rq; |
1108 | 1098 | ||
1109 | rq = list_first_entry(list, struct request, queuelist); | 1099 | rq = list_first_entry(list, struct request, queuelist); |
1100 | BUG_ON(rq->mq_ctx != ctx); | ||
1110 | list_del_init(&rq->queuelist); | 1101 | list_del_init(&rq->queuelist); |
1111 | rq->mq_ctx = ctx; | 1102 | __blk_mq_insert_req_list(hctx, rq, false); |
1112 | __blk_mq_insert_req_list(hctx, ctx, rq, false); | ||
1113 | } | 1103 | } |
1114 | blk_mq_hctx_mark_pending(hctx, ctx); | 1104 | blk_mq_hctx_mark_pending(hctx, ctx); |
1115 | spin_unlock(&ctx->lock); | 1105 | spin_unlock(&ctx->lock); |
1116 | 1106 | ||
1117 | blk_mq_run_hw_queue(hctx, from_schedule); | 1107 | blk_mq_run_hw_queue(hctx, from_schedule); |
1118 | blk_mq_put_ctx(current_ctx); | ||
1119 | } | 1108 | } |
1120 | 1109 | ||
1121 | static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) | 1110 | static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) |
@@ -1630,16 +1619,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) | |||
1630 | return 0; | 1619 | return 0; |
1631 | } | 1620 | } |
1632 | 1621 | ||
1622 | /* | ||
1623 | * 'cpu' is going away. splice any existing rq_list entries from this | ||
1624 | * software queue to the hw queue dispatch list, and ensure that it | ||
1625 | * gets run. | ||
1626 | */ | ||
1633 | static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | 1627 | static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) |
1634 | { | 1628 | { |
1635 | struct request_queue *q = hctx->queue; | ||
1636 | struct blk_mq_ctx *ctx; | 1629 | struct blk_mq_ctx *ctx; |
1637 | LIST_HEAD(tmp); | 1630 | LIST_HEAD(tmp); |
1638 | 1631 | ||
1639 | /* | 1632 | ctx = __blk_mq_get_ctx(hctx->queue, cpu); |
1640 | * Move ctx entries to new CPU, if this one is going away. | ||
1641 | */ | ||
1642 | ctx = __blk_mq_get_ctx(q, cpu); | ||
1643 | 1633 | ||
1644 | spin_lock(&ctx->lock); | 1634 | spin_lock(&ctx->lock); |
1645 | if (!list_empty(&ctx->rq_list)) { | 1635 | if (!list_empty(&ctx->rq_list)) { |
@@ -1651,24 +1641,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | |||
1651 | if (list_empty(&tmp)) | 1641 | if (list_empty(&tmp)) |
1652 | return NOTIFY_OK; | 1642 | return NOTIFY_OK; |
1653 | 1643 | ||
1654 | ctx = blk_mq_get_ctx(q); | 1644 | spin_lock(&hctx->lock); |
1655 | spin_lock(&ctx->lock); | 1645 | list_splice_tail_init(&tmp, &hctx->dispatch); |
1656 | 1646 | spin_unlock(&hctx->lock); | |
1657 | while (!list_empty(&tmp)) { | ||
1658 | struct request *rq; | ||
1659 | |||
1660 | rq = list_first_entry(&tmp, struct request, queuelist); | ||
1661 | rq->mq_ctx = ctx; | ||
1662 | list_move_tail(&rq->queuelist, &ctx->rq_list); | ||
1663 | } | ||
1664 | |||
1665 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | ||
1666 | blk_mq_hctx_mark_pending(hctx, ctx); | ||
1667 | |||
1668 | spin_unlock(&ctx->lock); | ||
1669 | 1647 | ||
1670 | blk_mq_run_hw_queue(hctx, true); | 1648 | blk_mq_run_hw_queue(hctx, true); |
1671 | blk_mq_put_ctx(ctx); | ||
1672 | return NOTIFY_OK; | 1649 | return NOTIFY_OK; |
1673 | } | 1650 | } |
1674 | 1651 | ||