diff options
Diffstat (limited to 'block/blk-flush.c')
| -rw-r--r-- | block/blk-flush.c | 101 |
1 files changed, 37 insertions, 64 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index 9288aaf35c21..66e2b697f5db 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
| @@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq) | |||
| 130 | blk_clear_rq_complete(rq); | 130 | blk_clear_rq_complete(rq); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static void mq_flush_data_run(struct work_struct *work) | 133 | static void mq_flush_run(struct work_struct *work) |
| 134 | { | 134 | { |
| 135 | struct request *rq; | 135 | struct request *rq; |
| 136 | 136 | ||
| 137 | rq = container_of(work, struct request, mq_flush_data); | 137 | rq = container_of(work, struct request, mq_flush_work); |
| 138 | 138 | ||
| 139 | memset(&rq->csd, 0, sizeof(rq->csd)); | 139 | memset(&rq->csd, 0, sizeof(rq->csd)); |
| 140 | blk_mq_run_request(rq, true, false); | 140 | blk_mq_run_request(rq, true, false); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static void blk_mq_flush_data_insert(struct request *rq) | 143 | static bool blk_flush_queue_rq(struct request *rq) |
| 144 | { | 144 | { |
| 145 | INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); | 145 | if (rq->q->mq_ops) { |
| 146 | kblockd_schedule_work(rq->q, &rq->mq_flush_data); | 146 | INIT_WORK(&rq->mq_flush_work, mq_flush_run); |
| 147 | kblockd_schedule_work(rq->q, &rq->mq_flush_work); | ||
| 148 | return false; | ||
| 149 | } else { | ||
| 150 | list_add_tail(&rq->queuelist, &rq->q->queue_head); | ||
| 151 | return true; | ||
| 152 | } | ||
| 147 | } | 153 | } |
| 148 | 154 | ||
| 149 | /** | 155 | /** |
| @@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |||
| 187 | 193 | ||
| 188 | case REQ_FSEQ_DATA: | 194 | case REQ_FSEQ_DATA: |
| 189 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); | 195 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); |
| 190 | if (q->mq_ops) | 196 | queued = blk_flush_queue_rq(rq); |
| 191 | blk_mq_flush_data_insert(rq); | ||
| 192 | else { | ||
| 193 | list_add(&rq->queuelist, &q->queue_head); | ||
| 194 | queued = true; | ||
| 195 | } | ||
| 196 | break; | 197 | break; |
| 197 | 198 | ||
| 198 | case REQ_FSEQ_DONE: | 199 | case REQ_FSEQ_DONE: |
| @@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |||
| 216 | } | 217 | } |
| 217 | 218 | ||
| 218 | kicked = blk_kick_flush(q); | 219 | kicked = blk_kick_flush(q); |
| 219 | /* blk_mq_run_flush will run queue */ | ||
| 220 | if (q->mq_ops) | ||
| 221 | return queued; | ||
| 222 | return kicked | queued; | 220 | return kicked | queued; |
| 223 | } | 221 | } |
| 224 | 222 | ||
| @@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
| 230 | struct request *rq, *n; | 228 | struct request *rq, *n; |
| 231 | unsigned long flags = 0; | 229 | unsigned long flags = 0; |
| 232 | 230 | ||
| 233 | if (q->mq_ops) { | 231 | if (q->mq_ops) |
| 234 | blk_mq_free_request(flush_rq); | ||
| 235 | spin_lock_irqsave(&q->mq_flush_lock, flags); | 232 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
| 236 | } | 233 | |
| 237 | running = &q->flush_queue[q->flush_running_idx]; | 234 | running = &q->flush_queue[q->flush_running_idx]; |
| 238 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); | 235 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); |
| 239 | 236 | ||
| @@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
| 263 | * kblockd. | 260 | * kblockd. |
| 264 | */ | 261 | */ |
| 265 | if (queued || q->flush_queue_delayed) { | 262 | if (queued || q->flush_queue_delayed) { |
| 266 | if (!q->mq_ops) | 263 | WARN_ON(q->mq_ops); |
| 267 | blk_run_queue_async(q); | 264 | blk_run_queue_async(q); |
| 268 | else | ||
| 269 | /* | ||
| 270 | * This can be optimized to only run queues with requests | ||
| 271 | * queued if necessary. | ||
| 272 | */ | ||
| 273 | blk_mq_run_queues(q, true); | ||
| 274 | } | 265 | } |
| 275 | q->flush_queue_delayed = 0; | 266 | q->flush_queue_delayed = 0; |
| 276 | if (q->mq_ops) | 267 | if (q->mq_ops) |
| 277 | spin_unlock_irqrestore(&q->mq_flush_lock, flags); | 268 | spin_unlock_irqrestore(&q->mq_flush_lock, flags); |
| 278 | } | 269 | } |
| 279 | 270 | ||
| 280 | static void mq_flush_work(struct work_struct *work) | ||
| 281 | { | ||
| 282 | struct request_queue *q; | ||
| 283 | struct request *rq; | ||
| 284 | |||
| 285 | q = container_of(work, struct request_queue, mq_flush_work); | ||
| 286 | |||
| 287 | /* We don't need set REQ_FLUSH_SEQ, it's for consistency */ | ||
| 288 | rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ, | ||
| 289 | __GFP_WAIT|GFP_ATOMIC, true); | ||
| 290 | rq->cmd_type = REQ_TYPE_FS; | ||
| 291 | rq->end_io = flush_end_io; | ||
| 292 | |||
| 293 | blk_mq_run_request(rq, true, false); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * We can't directly use q->flush_rq, because it doesn't have tag and is not in | ||
| 298 | * hctx->rqs[]. so we must allocate a new request, since we can't sleep here, | ||
| 299 | * so offload the work to workqueue. | ||
| 300 | * | ||
| 301 | * Note: we assume a flush request finished in any hardware queue will flush | ||
| 302 | * the whole disk cache. | ||
| 303 | */ | ||
| 304 | static void mq_run_flush(struct request_queue *q) | ||
| 305 | { | ||
| 306 | kblockd_schedule_work(q, &q->mq_flush_work); | ||
| 307 | } | ||
| 308 | |||
| 309 | /** | 271 | /** |
| 310 | * blk_kick_flush - consider issuing flush request | 272 | * blk_kick_flush - consider issuing flush request |
| 311 | * @q: request_queue being kicked | 273 | * @q: request_queue being kicked |
| @@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q) | |||
| 340 | * different from running_idx, which means flush is in flight. | 302 | * different from running_idx, which means flush is in flight. |
| 341 | */ | 303 | */ |
| 342 | q->flush_pending_idx ^= 1; | 304 | q->flush_pending_idx ^= 1; |
| 305 | |||
| 343 | if (q->mq_ops) { | 306 | if (q->mq_ops) { |
| 344 | mq_run_flush(q); | 307 | struct blk_mq_ctx *ctx = first_rq->mq_ctx; |
| 345 | return true; | 308 | struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); |
| 309 | |||
| 310 | blk_mq_rq_init(hctx, q->flush_rq); | ||
| 311 | q->flush_rq->mq_ctx = ctx; | ||
| 312 | |||
| 313 | /* | ||
| 314 | * Reuse the tag value from the fist waiting request, | ||
| 315 | * with blk-mq the tag is generated during request | ||
| 316 | * allocation and drivers can rely on it being inside | ||
| 317 | * the range they asked for. | ||
| 318 | */ | ||
| 319 | q->flush_rq->tag = first_rq->tag; | ||
| 320 | } else { | ||
| 321 | blk_rq_init(q, q->flush_rq); | ||
| 346 | } | 322 | } |
| 347 | 323 | ||
| 348 | blk_rq_init(q, &q->flush_rq); | 324 | q->flush_rq->cmd_type = REQ_TYPE_FS; |
| 349 | q->flush_rq.cmd_type = REQ_TYPE_FS; | 325 | q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; |
| 350 | q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; | 326 | q->flush_rq->rq_disk = first_rq->rq_disk; |
| 351 | q->flush_rq.rq_disk = first_rq->rq_disk; | 327 | q->flush_rq->end_io = flush_end_io; |
| 352 | q->flush_rq.end_io = flush_end_io; | ||
| 353 | 328 | ||
| 354 | list_add_tail(&q->flush_rq.queuelist, &q->queue_head); | 329 | return blk_flush_queue_rq(q->flush_rq); |
| 355 | return true; | ||
| 356 | } | 330 | } |
| 357 | 331 | ||
| 358 | static void flush_data_end_io(struct request *rq, int error) | 332 | static void flush_data_end_io(struct request *rq, int error) |
| @@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush); | |||
| 558 | void blk_mq_init_flush(struct request_queue *q) | 532 | void blk_mq_init_flush(struct request_queue *q) |
| 559 | { | 533 | { |
| 560 | spin_lock_init(&q->mq_flush_lock); | 534 | spin_lock_init(&q->mq_flush_lock); |
| 561 | INIT_WORK(&q->mq_flush_work, mq_flush_work); | ||
| 562 | } | 535 | } |
