diff options
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r-- | drivers/block/ll_rw_blk.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 4e2b1b06b411..d2a66fd309c3 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -353,6 +353,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq) | |||
353 | struct request *rq = flush_rq->end_io_data; | 353 | struct request *rq = flush_rq->end_io_data; |
354 | request_queue_t *q = rq->q; | 354 | request_queue_t *q = rq->q; |
355 | 355 | ||
356 | elv_completed_request(q, flush_rq); | ||
357 | |||
356 | rq->flags |= REQ_BAR_PREFLUSH; | 358 | rq->flags |= REQ_BAR_PREFLUSH; |
357 | 359 | ||
358 | if (!flush_rq->errors) | 360 | if (!flush_rq->errors) |
@@ -369,6 +371,8 @@ static void blk_post_flush_end_io(struct request *flush_rq) | |||
369 | struct request *rq = flush_rq->end_io_data; | 371 | struct request *rq = flush_rq->end_io_data; |
370 | request_queue_t *q = rq->q; | 372 | request_queue_t *q = rq->q; |
371 | 373 | ||
374 | elv_completed_request(q, flush_rq); | ||
375 | |||
372 | rq->flags |= REQ_BAR_POSTFLUSH; | 376 | rq->flags |= REQ_BAR_POSTFLUSH; |
373 | 377 | ||
374 | q->end_flush_fn(q, flush_rq); | 378 | q->end_flush_fn(q, flush_rq); |
@@ -408,8 +412,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) | |||
408 | if (!list_empty(&rq->queuelist)) | 412 | if (!list_empty(&rq->queuelist)) |
409 | blkdev_dequeue_request(rq); | 413 | blkdev_dequeue_request(rq); |
410 | 414 | ||
411 | elv_deactivate_request(q, rq); | ||
412 | |||
413 | flush_rq->end_io_data = rq; | 415 | flush_rq->end_io_data = rq; |
414 | flush_rq->end_io = blk_pre_flush_end_io; | 416 | flush_rq->end_io = blk_pre_flush_end_io; |
415 | 417 | ||
@@ -1040,6 +1042,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); | |||
1040 | static char *rq_flags[] = { | 1042 | static char *rq_flags[] = { |
1041 | "REQ_RW", | 1043 | "REQ_RW", |
1042 | "REQ_FAILFAST", | 1044 | "REQ_FAILFAST", |
1045 | "REQ_SORTED", | ||
1043 | "REQ_SOFTBARRIER", | 1046 | "REQ_SOFTBARRIER", |
1044 | "REQ_HARDBARRIER", | 1047 | "REQ_HARDBARRIER", |
1045 | "REQ_CMD", | 1048 | "REQ_CMD", |
@@ -2456,6 +2459,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2456 | if (unlikely(--req->ref_count)) | 2459 | if (unlikely(--req->ref_count)) |
2457 | return; | 2460 | return; |
2458 | 2461 | ||
2462 | elv_completed_request(q, req); | ||
2463 | |||
2459 | req->rq_status = RQ_INACTIVE; | 2464 | req->rq_status = RQ_INACTIVE; |
2460 | req->rl = NULL; | 2465 | req->rl = NULL; |
2461 | 2466 | ||
@@ -2466,8 +2471,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2466 | if (rl) { | 2471 | if (rl) { |
2467 | int rw = rq_data_dir(req); | 2472 | int rw = rq_data_dir(req); |
2468 | 2473 | ||
2469 | elv_completed_request(q, req); | ||
2470 | |||
2471 | BUG_ON(!list_empty(&req->queuelist)); | 2474 | BUG_ON(!list_empty(&req->queuelist)); |
2472 | 2475 | ||
2473 | blk_free_request(q, req); | 2476 | blk_free_request(q, req); |
@@ -2477,14 +2480,14 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2477 | 2480 | ||
2478 | void blk_put_request(struct request *req) | 2481 | void blk_put_request(struct request *req) |
2479 | { | 2482 | { |
2483 | unsigned long flags; | ||
2484 | request_queue_t *q = req->q; | ||
2485 | |||
2480 | /* | 2486 | /* |
2481 | * if req->rl isn't set, this request didnt originate from the | 2487 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the |
2482 | * block layer, so it's safe to just disregard it | 2488 | * following if (q) test. |
2483 | */ | 2489 | */ |
2484 | if (req->rl) { | 2490 | if (q) { |
2485 | unsigned long flags; | ||
2486 | request_queue_t *q = req->q; | ||
2487 | |||
2488 | spin_lock_irqsave(q->queue_lock, flags); | 2491 | spin_lock_irqsave(q->queue_lock, flags); |
2489 | __blk_put_request(q, req); | 2492 | __blk_put_request(q, req); |
2490 | spin_unlock_irqrestore(q->queue_lock, flags); | 2493 | spin_unlock_irqrestore(q->queue_lock, flags); |