summaryrefslogtreecommitdiffstats
path: root/block/blk-flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c147
1 files changed, 98 insertions, 49 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3cb5e9e7108a..20badd7b9d1b 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -28,7 +28,7 @@
28 * 28 *
29 * The actual execution of flush is double buffered. Whenever a request 29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at 30 * needs to execute PRE or POSTFLUSH, it queues at
31 * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a 31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled. When the flush 32 * flush is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next 33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA 34 * step. This allows arbitrary merging of different types of FLUSH/FUA
@@ -91,7 +91,8 @@ enum {
91 FLUSH_PENDING_TIMEOUT = 5 * HZ, 91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
92}; 92};
93 93
94static bool blk_kick_flush(struct request_queue *q); 94static bool blk_kick_flush(struct request_queue *q,
95 struct blk_flush_queue *fq);
95 96
96static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) 97static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97{ 98{
@@ -126,8 +127,6 @@ static void blk_flush_restore_request(struct request *rq)
126 /* make @rq a normal request */ 127 /* make @rq a normal request */
127 rq->cmd_flags &= ~REQ_FLUSH_SEQ; 128 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128 rq->end_io = rq->flush.saved_end_io; 129 rq->end_io = rq->flush.saved_end_io;
129
130 blk_clear_rq_complete(rq);
131} 130}
132 131
133static bool blk_flush_queue_rq(struct request *rq, bool add_front) 132static bool blk_flush_queue_rq(struct request *rq, bool add_front)
@@ -150,6 +149,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
150/** 149/**
151 * blk_flush_complete_seq - complete flush sequence 150 * blk_flush_complete_seq - complete flush sequence
152 * @rq: FLUSH/FUA request being sequenced 151 * @rq: FLUSH/FUA request being sequenced
152 * @fq: flush queue
153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
154 * @error: whether an error occurred 154 * @error: whether an error occurred
155 * 155 *
@@ -157,16 +157,17 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
157 * completion and trigger the next step. 157 * completion and trigger the next step.
158 * 158 *
159 * CONTEXT: 159 * CONTEXT:
160 * spin_lock_irq(q->queue_lock or q->mq_flush_lock) 160 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
161 * 161 *
162 * RETURNS: 162 * RETURNS:
163 * %true if requests were added to the dispatch queue, %false otherwise. 163 * %true if requests were added to the dispatch queue, %false otherwise.
164 */ 164 */
165static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, 165static bool blk_flush_complete_seq(struct request *rq,
166 int error) 166 struct blk_flush_queue *fq,
167 unsigned int seq, int error)
167{ 168{
168 struct request_queue *q = rq->q; 169 struct request_queue *q = rq->q;
169 struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; 170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
170 bool queued = false, kicked; 171 bool queued = false, kicked;
171 172
172 BUG_ON(rq->flush.seq & seq); 173 BUG_ON(rq->flush.seq & seq);
@@ -182,12 +183,12 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
182 case REQ_FSEQ_POSTFLUSH: 183 case REQ_FSEQ_POSTFLUSH:
183 /* queue for flush */ 184 /* queue for flush */
184 if (list_empty(pending)) 185 if (list_empty(pending))
185 q->flush_pending_since = jiffies; 186 fq->flush_pending_since = jiffies;
186 list_move_tail(&rq->flush.list, pending); 187 list_move_tail(&rq->flush.list, pending);
187 break; 188 break;
188 189
189 case REQ_FSEQ_DATA: 190 case REQ_FSEQ_DATA:
190 list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
191 queued = blk_flush_queue_rq(rq, true); 192 queued = blk_flush_queue_rq(rq, true);
192 break; 193 break;
193 194
@@ -202,7 +203,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
202 list_del_init(&rq->flush.list); 203 list_del_init(&rq->flush.list);
203 blk_flush_restore_request(rq); 204 blk_flush_restore_request(rq);
204 if (q->mq_ops) 205 if (q->mq_ops)
205 blk_mq_end_io(rq, error); 206 blk_mq_end_request(rq, error);
206 else 207 else
207 __blk_end_request_all(rq, error); 208 __blk_end_request_all(rq, error);
208 break; 209 break;
@@ -211,7 +212,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
211 BUG(); 212 BUG();
212 } 213 }
213 214
214 kicked = blk_kick_flush(q); 215 kicked = blk_kick_flush(q, fq);
215 return kicked | queued; 216 return kicked | queued;
216} 217}
217 218
@@ -222,17 +223,18 @@ static void flush_end_io(struct request *flush_rq, int error)
222 bool queued = false; 223 bool queued = false;
223 struct request *rq, *n; 224 struct request *rq, *n;
224 unsigned long flags = 0; 225 unsigned long flags = 0;
226 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
225 227
226 if (q->mq_ops) { 228 if (q->mq_ops) {
227 spin_lock_irqsave(&q->mq_flush_lock, flags); 229 spin_lock_irqsave(&fq->mq_flush_lock, flags);
228 q->flush_rq->tag = -1; 230 flush_rq->tag = -1;
229 } 231 }
230 232
231 running = &q->flush_queue[q->flush_running_idx]; 233 running = &fq->flush_queue[fq->flush_running_idx];
232 BUG_ON(q->flush_pending_idx == q->flush_running_idx); 234 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
233 235
234 /* account completion of the flush request */ 236 /* account completion of the flush request */
235 q->flush_running_idx ^= 1; 237 fq->flush_running_idx ^= 1;
236 238
237 if (!q->mq_ops) 239 if (!q->mq_ops)
238 elv_completed_request(q, flush_rq); 240 elv_completed_request(q, flush_rq);
@@ -242,7 +244,7 @@ static void flush_end_io(struct request *flush_rq, int error)
242 unsigned int seq = blk_flush_cur_seq(rq); 244 unsigned int seq = blk_flush_cur_seq(rq);
243 245
244 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); 246 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
245 queued |= blk_flush_complete_seq(rq, seq, error); 247 queued |= blk_flush_complete_seq(rq, fq, seq, error);
246 } 248 }
247 249
248 /* 250 /*
@@ -256,71 +258,81 @@ static void flush_end_io(struct request *flush_rq, int error)
256 * directly into request_fn may confuse the driver. Always use 258 * directly into request_fn may confuse the driver. Always use
257 * kblockd. 259 * kblockd.
258 */ 260 */
259 if (queued || q->flush_queue_delayed) { 261 if (queued || fq->flush_queue_delayed) {
260 WARN_ON(q->mq_ops); 262 WARN_ON(q->mq_ops);
261 blk_run_queue_async(q); 263 blk_run_queue_async(q);
262 } 264 }
263 q->flush_queue_delayed = 0; 265 fq->flush_queue_delayed = 0;
264 if (q->mq_ops) 266 if (q->mq_ops)
265 spin_unlock_irqrestore(&q->mq_flush_lock, flags); 267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
266} 268}
267 269
268/** 270/**
269 * blk_kick_flush - consider issuing flush request 271 * blk_kick_flush - consider issuing flush request
270 * @q: request_queue being kicked 272 * @q: request_queue being kicked
273 * @fq: flush queue
271 * 274 *
272 * Flush related states of @q have changed, consider issuing flush request. 275 * Flush related states of @q have changed, consider issuing flush request.
273 * Please read the comment at the top of this file for more info. 276 * Please read the comment at the top of this file for more info.
274 * 277 *
275 * CONTEXT: 278 * CONTEXT:
276 * spin_lock_irq(q->queue_lock or q->mq_flush_lock) 279 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
277 * 280 *
278 * RETURNS: 281 * RETURNS:
279 * %true if flush was issued, %false otherwise. 282 * %true if flush was issued, %false otherwise.
280 */ 283 */
281static bool blk_kick_flush(struct request_queue *q) 284static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
282{ 285{
283 struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; 286 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
284 struct request *first_rq = 287 struct request *first_rq =
285 list_first_entry(pending, struct request, flush.list); 288 list_first_entry(pending, struct request, flush.list);
289 struct request *flush_rq = fq->flush_rq;
286 290
287 /* C1 described at the top of this file */ 291 /* C1 described at the top of this file */
288 if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) 292 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
289 return false; 293 return false;
290 294
291 /* C2 and C3 */ 295 /* C2 and C3 */
292 if (!list_empty(&q->flush_data_in_flight) && 296 if (!list_empty(&fq->flush_data_in_flight) &&
293 time_before(jiffies, 297 time_before(jiffies,
294 q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) 298 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
295 return false; 299 return false;
296 300
297 /* 301 /*
298 * Issue flush and toggle pending_idx. This makes pending_idx 302 * Issue flush and toggle pending_idx. This makes pending_idx
299 * different from running_idx, which means flush is in flight. 303 * different from running_idx, which means flush is in flight.
300 */ 304 */
301 q->flush_pending_idx ^= 1; 305 fq->flush_pending_idx ^= 1;
302 306
303 blk_rq_init(q, q->flush_rq); 307 blk_rq_init(q, flush_rq);
304 if (q->mq_ops) 308
305 blk_mq_clone_flush_request(q->flush_rq, first_rq); 309 /*
310 * Borrow tag from the first request since they can't
311 * be in flight at the same time.
312 */
313 if (q->mq_ops) {
314 flush_rq->mq_ctx = first_rq->mq_ctx;
315 flush_rq->tag = first_rq->tag;
316 }
306 317
307 q->flush_rq->cmd_type = REQ_TYPE_FS; 318 flush_rq->cmd_type = REQ_TYPE_FS;
308 q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 319 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
309 q->flush_rq->rq_disk = first_rq->rq_disk; 320 flush_rq->rq_disk = first_rq->rq_disk;
310 q->flush_rq->end_io = flush_end_io; 321 flush_rq->end_io = flush_end_io;
311 322
312 return blk_flush_queue_rq(q->flush_rq, false); 323 return blk_flush_queue_rq(flush_rq, false);
313} 324}
314 325
315static void flush_data_end_io(struct request *rq, int error) 326static void flush_data_end_io(struct request *rq, int error)
316{ 327{
317 struct request_queue *q = rq->q; 328 struct request_queue *q = rq->q;
329 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
318 330
319 /* 331 /*
320 * After populating an empty queue, kick it to avoid stall. Read 332 * After populating an empty queue, kick it to avoid stall. Read
321 * the comment in flush_end_io(). 333 * the comment in flush_end_io().
322 */ 334 */
323 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 335 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
324 blk_run_queue_async(q); 336 blk_run_queue_async(q);
325} 337}
326 338
@@ -328,20 +340,20 @@ static void mq_flush_data_end_io(struct request *rq, int error)
328{ 340{
329 struct request_queue *q = rq->q; 341 struct request_queue *q = rq->q;
330 struct blk_mq_hw_ctx *hctx; 342 struct blk_mq_hw_ctx *hctx;
331 struct blk_mq_ctx *ctx; 343 struct blk_mq_ctx *ctx = rq->mq_ctx;
332 unsigned long flags; 344 unsigned long flags;
345 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
333 346
334 ctx = rq->mq_ctx;
335 hctx = q->mq_ops->map_queue(q, ctx->cpu); 347 hctx = q->mq_ops->map_queue(q, ctx->cpu);
336 348
337 /* 349 /*
338 * After populating an empty queue, kick it to avoid stall. Read 350 * After populating an empty queue, kick it to avoid stall. Read
339 * the comment in flush_end_io(). 351 * the comment in flush_end_io().
340 */ 352 */
341 spin_lock_irqsave(&q->mq_flush_lock, flags); 353 spin_lock_irqsave(&fq->mq_flush_lock, flags);
342 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 354 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
343 blk_mq_run_hw_queue(hctx, true); 355 blk_mq_run_hw_queue(hctx, true);
344 spin_unlock_irqrestore(&q->mq_flush_lock, flags); 356 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
345} 357}
346 358
347/** 359/**
@@ -361,6 +373,7 @@ void blk_insert_flush(struct request *rq)
361 struct request_queue *q = rq->q; 373 struct request_queue *q = rq->q;
362 unsigned int fflags = q->flush_flags; /* may change, cache */ 374 unsigned int fflags = q->flush_flags; /* may change, cache */
363 unsigned int policy = blk_flush_policy(fflags, rq); 375 unsigned int policy = blk_flush_policy(fflags, rq);
376 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
364 377
365 /* 378 /*
366 * @policy now records what operations need to be done. Adjust 379 * @policy now records what operations need to be done. Adjust
@@ -378,7 +391,7 @@ void blk_insert_flush(struct request *rq)
378 */ 391 */
379 if (!policy) { 392 if (!policy) {
380 if (q->mq_ops) 393 if (q->mq_ops)
381 blk_mq_end_io(rq, 0); 394 blk_mq_end_request(rq, 0);
382 else 395 else
383 __blk_end_bidi_request(rq, 0, 0, 0); 396 __blk_end_bidi_request(rq, 0, 0, 0);
384 return; 397 return;
@@ -411,14 +424,14 @@ void blk_insert_flush(struct request *rq)
411 if (q->mq_ops) { 424 if (q->mq_ops) {
412 rq->end_io = mq_flush_data_end_io; 425 rq->end_io = mq_flush_data_end_io;
413 426
414 spin_lock_irq(&q->mq_flush_lock); 427 spin_lock_irq(&fq->mq_flush_lock);
415 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); 428 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
416 spin_unlock_irq(&q->mq_flush_lock); 429 spin_unlock_irq(&fq->mq_flush_lock);
417 return; 430 return;
418 } 431 }
419 rq->end_io = flush_data_end_io; 432 rq->end_io = flush_data_end_io;
420 433
421 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); 434 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
422} 435}
423 436
424/** 437/**
@@ -474,7 +487,43 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
474} 487}
475EXPORT_SYMBOL(blkdev_issue_flush); 488EXPORT_SYMBOL(blkdev_issue_flush);
476 489
477void blk_mq_init_flush(struct request_queue *q) 490struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
491 int node, int cmd_size)
492{
493 struct blk_flush_queue *fq;
494 int rq_sz = sizeof(struct request);
495
496 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
497 if (!fq)
498 goto fail;
499
500 if (q->mq_ops) {
501 spin_lock_init(&fq->mq_flush_lock);
502 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
503 }
504
505 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
506 if (!fq->flush_rq)
507 goto fail_rq;
508
509 INIT_LIST_HEAD(&fq->flush_queue[0]);
510 INIT_LIST_HEAD(&fq->flush_queue[1]);
511 INIT_LIST_HEAD(&fq->flush_data_in_flight);
512
513 return fq;
514
515 fail_rq:
516 kfree(fq);
517 fail:
518 return NULL;
519}
520
521void blk_free_flush_queue(struct blk_flush_queue *fq)
478{ 522{
479 spin_lock_init(&q->mq_flush_lock); 523 /* bio based request queue hasn't flush queue */
524 if (!fq)
525 return;
526
527 kfree(fq->flush_rq);
528 kfree(fq);
480} 529}