diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-05 13:01:53 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:01 -0400 |
commit | fb2dce862d9f9a68e6b9374579056ec9eca02a63 (patch) | |
tree | 888e0fd7248c9329fa1aa3981043a2dc2457d488 /block/blk-core.c | |
parent | d628eaef310533767ce68664873869c2d7f78f09 (diff) |
Add 'discard' request handling
Some block devices benefit from a hint that they can forget the contents
of certain sectors. Add basic support for this to the block core, along
with a 'blkdev_issue_discard()' helper function which issues such
requests.
The caller doesn't get to provide an end_io functio, since
blkdev_issue_discard() will automatically split the request up into
multiple bios if appropriate. Neither does the function wait for
completion -- it's expected that callers won't care about when, or even
_if_, the request completes. It's only a hint to the device anyway. By
definition, the file system doesn't _care_ about these sectors any more.
[With feedback from OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> and
Jens Axboe <jens.axboe@oracle.com]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index a496727df7ef..1e143c4f9d34 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1079,6 +1079,10 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1079 | */ | 1079 | */ |
1080 | if (unlikely(bio_barrier(bio))) | 1080 | if (unlikely(bio_barrier(bio))) |
1081 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 1081 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
1082 | if (unlikely(bio_discard(bio))) { | ||
1083 | req->cmd_flags |= (REQ_SOFTBARRIER | REQ_DISCARD); | ||
1084 | req->q->prepare_discard_fn(req->q, req); | ||
1085 | } | ||
1082 | 1086 | ||
1083 | if (bio_sync(bio)) | 1087 | if (bio_sync(bio)) |
1084 | req->cmd_flags |= REQ_RW_SYNC; | 1088 | req->cmd_flags |= REQ_RW_SYNC; |
@@ -1095,7 +1099,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1095 | static int __make_request(struct request_queue *q, struct bio *bio) | 1099 | static int __make_request(struct request_queue *q, struct bio *bio) |
1096 | { | 1100 | { |
1097 | struct request *req; | 1101 | struct request *req; |
1098 | int el_ret, nr_sectors, barrier, err; | 1102 | int el_ret, nr_sectors, barrier, discard, err; |
1099 | const unsigned short prio = bio_prio(bio); | 1103 | const unsigned short prio = bio_prio(bio); |
1100 | const int sync = bio_sync(bio); | 1104 | const int sync = bio_sync(bio); |
1101 | int rw_flags; | 1105 | int rw_flags; |
@@ -1115,6 +1119,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1115 | goto end_io; | 1119 | goto end_io; |
1116 | } | 1120 | } |
1117 | 1121 | ||
1122 | discard = bio_discard(bio); | ||
1123 | if (unlikely(discard) && !q->prepare_discard_fn) { | ||
1124 | err = -EOPNOTSUPP; | ||
1125 | goto end_io; | ||
1126 | } | ||
1127 | |||
1118 | spin_lock_irq(q->queue_lock); | 1128 | spin_lock_irq(q->queue_lock); |
1119 | 1129 | ||
1120 | if (unlikely(barrier) || elv_queue_empty(q)) | 1130 | if (unlikely(barrier) || elv_queue_empty(q)) |
@@ -1405,7 +1415,8 @@ end_io: | |||
1405 | 1415 | ||
1406 | if (bio_check_eod(bio, nr_sectors)) | 1416 | if (bio_check_eod(bio, nr_sectors)) |
1407 | goto end_io; | 1417 | goto end_io; |
1408 | if (bio_empty_barrier(bio) && !q->prepare_flush_fn) { | 1418 | if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) || |
1419 | (bio_discard(bio) && !q->prepare_discard_fn)) { | ||
1409 | err = -EOPNOTSUPP; | 1420 | err = -EOPNOTSUPP; |
1410 | goto end_io; | 1421 | goto end_io; |
1411 | } | 1422 | } |
@@ -1487,7 +1498,6 @@ void submit_bio(int rw, struct bio *bio) | |||
1487 | * go through the normal accounting stuff before submission. | 1498 | * go through the normal accounting stuff before submission. |
1488 | */ | 1499 | */ |
1489 | if (bio_has_data(bio)) { | 1500 | if (bio_has_data(bio)) { |
1490 | |||
1491 | if (rw & WRITE) { | 1501 | if (rw & WRITE) { |
1492 | count_vm_events(PGPGOUT, count); | 1502 | count_vm_events(PGPGOUT, count); |
1493 | } else { | 1503 | } else { |
@@ -1881,7 +1891,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, | |||
1881 | struct request_queue *q = rq->q; | 1891 | struct request_queue *q = rq->q; |
1882 | unsigned long flags = 0UL; | 1892 | unsigned long flags = 0UL; |
1883 | 1893 | ||
1884 | if (bio_has_data(rq->bio)) { | 1894 | if (bio_has_data(rq->bio) || blk_discard_rq(rq)) { |
1885 | if (__end_that_request_first(rq, error, nr_bytes)) | 1895 | if (__end_that_request_first(rq, error, nr_bytes)) |
1886 | return 1; | 1896 | return 1; |
1887 | 1897 | ||
@@ -1939,7 +1949,7 @@ EXPORT_SYMBOL_GPL(blk_end_request); | |||
1939 | **/ | 1949 | **/ |
1940 | int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 1950 | int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) |
1941 | { | 1951 | { |
1942 | if (bio_has_data(rq->bio) && | 1952 | if ((bio_has_data(rq->bio) || blk_discard_rq(rq)) && |
1943 | __end_that_request_first(rq, error, nr_bytes)) | 1953 | __end_that_request_first(rq, error, nr_bytes)) |
1944 | return 1; | 1954 | return 1; |
1945 | 1955 | ||
@@ -2012,12 +2022,14 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2012 | we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ | 2022 | we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ |
2013 | rq->cmd_flags |= (bio->bi_rw & 3); | 2023 | rq->cmd_flags |= (bio->bi_rw & 3); |
2014 | 2024 | ||
2015 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2025 | if (bio_has_data(bio)) { |
2016 | rq->nr_hw_segments = bio_hw_segments(q, bio); | 2026 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
2027 | rq->nr_hw_segments = bio_hw_segments(q, bio); | ||
2028 | rq->buffer = bio_data(bio); | ||
2029 | } | ||
2017 | rq->current_nr_sectors = bio_cur_sectors(bio); | 2030 | rq->current_nr_sectors = bio_cur_sectors(bio); |
2018 | rq->hard_cur_sectors = rq->current_nr_sectors; | 2031 | rq->hard_cur_sectors = rq->current_nr_sectors; |
2019 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | 2032 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); |
2020 | rq->buffer = bio_data(bio); | ||
2021 | rq->data_len = bio->bi_size; | 2033 | rq->data_len = bio->bi_size; |
2022 | 2034 | ||
2023 | rq->bio = rq->biotail = bio; | 2035 | rq->bio = rq->biotail = bio; |