aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-15 15:59:10 -0400
committerJens Axboe <axboe@fb.com>2014-04-15 16:03:03 -0400
commite9b267d91f6ddbc694cb40aa962b0b2cec03971d (patch)
tree6b9fac74ad40dfc7b57f701a8ae0454c42a71554 /block/blk-mq.c
parent8727af4b9d45c7503042e3fbd926c1a173876e9c (diff)
blk-mq: add ->init_request and ->exit_request methods
The current blk_mq_init_commands/blk_mq_free_commands interface has a two problems: 1) Because only the constructor is passed to blk_mq_init_commands there is no easy way to clean up when a comman initialization failed. The current code simply leaks the allocations done in the constructor. 2) There is no good place to call blk_mq_free_commands: before blk_cleanup_queue there is no guarantee that all outstanding commands have completed, so we can't free them yet. After blk_cleanup_queue the queue has usually been freed. This can be worked around by grabbing an unconditional reference before calling blk_cleanup_queue and dropping it after blk_mq_free_commands is done, although that's not exatly pretty and driver writers are guaranteed to get it wrong sooner or later. Both issues are easily fixed by making the request constructor and destructor normal blk_mq_ops methods. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c105
1 files changed, 32 insertions, 73 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e644feec068c..48d2d8495f5e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1031,74 +1031,20 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
1031 blk_mq_put_ctx(ctx); 1031 blk_mq_put_ctx(ctx);
1032} 1032}
1033 1033
1034static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, 1034static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx, void *driver_data)
1035 int (*init)(void *, struct blk_mq_hw_ctx *,
1036 struct request *, unsigned int),
1037 void *data)
1038{ 1035{
1039 unsigned int i; 1036 struct page *page;
1040 int ret = 0;
1041
1042 for (i = 0; i < hctx->queue_depth; i++) {
1043 struct request *rq = hctx->rqs[i];
1044
1045 ret = init(data, hctx, rq, i);
1046 if (ret)
1047 break;
1048 }
1049
1050 return ret;
1051}
1052
1053int blk_mq_init_commands(struct request_queue *q,
1054 int (*init)(void *, struct blk_mq_hw_ctx *,
1055 struct request *, unsigned int),
1056 void *data)
1057{
1058 struct blk_mq_hw_ctx *hctx;
1059 unsigned int i;
1060 int ret = 0;
1061
1062 queue_for_each_hw_ctx(q, hctx, i) {
1063 ret = blk_mq_init_hw_commands(hctx, init, data);
1064 if (ret)
1065 break;
1066 }
1067
1068 return ret;
1069}
1070EXPORT_SYMBOL(blk_mq_init_commands);
1071
1072static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx,
1073 void (*free)(void *, struct blk_mq_hw_ctx *,
1074 struct request *, unsigned int),
1075 void *data)
1076{
1077 unsigned int i;
1078 1037
1079 for (i = 0; i < hctx->queue_depth; i++) { 1038 if (hctx->rqs && hctx->queue->mq_ops->exit_request) {
1080 struct request *rq = hctx->rqs[i]; 1039 int i;
1081 1040
1082 free(data, hctx, rq, i); 1041 for (i = 0; i < hctx->queue_depth; i++) {
1042 if (!hctx->rqs[i])
1043 continue;
1044 hctx->queue->mq_ops->exit_request(driver_data, hctx,
1045 hctx->rqs[i], i);
1046 }
1083 } 1047 }
1084}
1085
1086void blk_mq_free_commands(struct request_queue *q,
1087 void (*free)(void *, struct blk_mq_hw_ctx *,
1088 struct request *, unsigned int),
1089 void *data)
1090{
1091 struct blk_mq_hw_ctx *hctx;
1092 unsigned int i;
1093
1094 queue_for_each_hw_ctx(q, hctx, i)
1095 blk_mq_free_hw_commands(hctx, free, data);
1096}
1097EXPORT_SYMBOL(blk_mq_free_commands);
1098
1099static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
1100{
1101 struct page *page;
1102 1048
1103 while (!list_empty(&hctx->page_list)) { 1049 while (!list_empty(&hctx->page_list)) {
1104 page = list_first_entry(&hctx->page_list, struct page, lru); 1050 page = list_first_entry(&hctx->page_list, struct page, lru);
@@ -1123,10 +1069,12 @@ static size_t order_to_size(unsigned int order)
1123} 1069}
1124 1070
1125static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, 1071static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
1126 unsigned int reserved_tags, int node) 1072 struct blk_mq_reg *reg, void *driver_data, int node)
1127{ 1073{
1074 unsigned int reserved_tags = reg->reserved_tags;
1128 unsigned int i, j, entries_per_page, max_order = 4; 1075 unsigned int i, j, entries_per_page, max_order = 4;
1129 size_t rq_size, left; 1076 size_t rq_size, left;
1077 int error;
1130 1078
1131 INIT_LIST_HEAD(&hctx->page_list); 1079 INIT_LIST_HEAD(&hctx->page_list);
1132 1080
@@ -1175,14 +1123,23 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
1175 for (j = 0; j < to_do; j++) { 1123 for (j = 0; j < to_do; j++) {
1176 hctx->rqs[i] = p; 1124 hctx->rqs[i] = p;
1177 blk_rq_init(hctx->queue, hctx->rqs[i]); 1125 blk_rq_init(hctx->queue, hctx->rqs[i]);
1126 if (reg->ops->init_request) {
1127 error = reg->ops->init_request(driver_data,
1128 hctx, hctx->rqs[i], i);
1129 if (error)
1130 goto err_rq_map;
1131 }
1132
1178 p += rq_size; 1133 p += rq_size;
1179 i++; 1134 i++;
1180 } 1135 }
1181 } 1136 }
1182 1137
1183 if (i < (reserved_tags + BLK_MQ_TAG_MIN)) 1138 if (i < (reserved_tags + BLK_MQ_TAG_MIN)) {
1139 error = -ENOMEM;
1184 goto err_rq_map; 1140 goto err_rq_map;
1185 else if (i != hctx->queue_depth) { 1141 }
1142 if (i != hctx->queue_depth) {
1186 hctx->queue_depth = i; 1143 hctx->queue_depth = i;
1187 pr_warn("%s: queue depth set to %u because of low memory\n", 1144 pr_warn("%s: queue depth set to %u because of low memory\n",
1188 __func__, i); 1145 __func__, i);
@@ -1190,12 +1147,14 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
1190 1147
1191 hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); 1148 hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node);
1192 if (!hctx->tags) { 1149 if (!hctx->tags) {
1193err_rq_map: 1150 error = -ENOMEM;
1194 blk_mq_free_rq_map(hctx); 1151 goto err_rq_map;
1195 return -ENOMEM;
1196 } 1152 }
1197 1153
1198 return 0; 1154 return 0;
1155err_rq_map:
1156 blk_mq_free_rq_map(hctx, driver_data);
1157 return error;
1199} 1158}
1200 1159
1201static int blk_mq_init_hw_queues(struct request_queue *q, 1160static int blk_mq_init_hw_queues(struct request_queue *q,
@@ -1228,7 +1187,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1228 blk_mq_hctx_notify, hctx); 1187 blk_mq_hctx_notify, hctx);
1229 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); 1188 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1230 1189
1231 if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node)) 1190 if (blk_mq_init_rq_map(hctx, reg, driver_data, node))
1232 break; 1191 break;
1233 1192
1234 /* 1193 /*
@@ -1268,7 +1227,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1268 reg->ops->exit_hctx(hctx, j); 1227 reg->ops->exit_hctx(hctx, j);
1269 1228
1270 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1229 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1271 blk_mq_free_rq_map(hctx); 1230 blk_mq_free_rq_map(hctx, driver_data);
1272 kfree(hctx->ctxs); 1231 kfree(hctx->ctxs);
1273 } 1232 }
1274 1233
@@ -1455,7 +1414,7 @@ void blk_mq_free_queue(struct request_queue *q)
1455 queue_for_each_hw_ctx(q, hctx, i) { 1414 queue_for_each_hw_ctx(q, hctx, i) {
1456 kfree(hctx->ctx_map); 1415 kfree(hctx->ctx_map);
1457 kfree(hctx->ctxs); 1416 kfree(hctx->ctxs);
1458 blk_mq_free_rq_map(hctx); 1417 blk_mq_free_rq_map(hctx, q->queuedata);
1459 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1418 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1460 if (q->mq_ops->exit_hctx) 1419 if (q->mq_ops->exit_hctx)
1461 q->mq_ops->exit_hctx(hctx, i); 1420 q->mq_ops->exit_hctx(hctx, i);