aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-08-09 11:42:20 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:02 -0400
commite17fc0a1ccf88f6d4dcb363729f3141b0958c325 (patch)
tree0a7c2dc1c3159c2af14d87c67ca83e158b2c78b5 /block/elevator.c
parentd30a2605be9d5132d95944916e8f578fcfe4f976 (diff)
Allow elevators to sort/merge discard requests
But blkdev_issue_discard() still emits requests which are interpreted as soft barriers, because naïve callers might otherwise issue subsequent writes to those same sectors, which might cross on the queue (if they're reallocated quickly enough). Callers still _can_ issue non-barrier discard requests, but they have to take care of queue ordering for themselves. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/block/elevator.c b/block/elevator.c
index ed6f8f32d27e..4f5127054e3f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -75,6 +75,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 return 0; 75 return 0;
76 76
77 /* 77 /*
78 * Don't merge file system requests and discard requests
79 */
80 if (bio_discard(bio) != bio_discard(rq->bio))
81 return 0;
82
83 /*
78 * different data direction or already started, don't merge 84 * different data direction or already started, don't merge
79 */ 85 */
80 if (bio_data_dir(bio) != rq_data_dir(rq)) 86 if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -438,6 +444,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
438 list_for_each_prev(entry, &q->queue_head) { 444 list_for_each_prev(entry, &q->queue_head) {
439 struct request *pos = list_entry_rq(entry); 445 struct request *pos = list_entry_rq(entry);
440 446
447 if (blk_discard_rq(rq) != blk_discard_rq(pos))
448 break;
441 if (rq_data_dir(rq) != rq_data_dir(pos)) 449 if (rq_data_dir(rq) != rq_data_dir(pos))
442 break; 450 break;
443 if (pos->cmd_flags & stop_flags) 451 if (pos->cmd_flags & stop_flags)
@@ -607,7 +615,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
607 break; 615 break;
608 616
609 case ELEVATOR_INSERT_SORT: 617 case ELEVATOR_INSERT_SORT:
610 BUG_ON(!blk_fs_request(rq)); 618 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
611 rq->cmd_flags |= REQ_SORTED; 619 rq->cmd_flags |= REQ_SORTED;
612 q->nr_sorted++; 620 q->nr_sorted++;
613 if (rq_mergeable(rq)) { 621 if (rq_mergeable(rq)) {
@@ -692,7 +700,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
692 * this request is scheduling boundary, update 700 * this request is scheduling boundary, update
693 * end_sector 701 * end_sector
694 */ 702 */
695 if (blk_fs_request(rq)) { 703 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
696 q->end_sector = rq_end_sector(rq); 704 q->end_sector = rq_end_sector(rq);
697 q->boundary_rq = rq; 705 q->boundary_rq = rq;
698 } 706 }