diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-05 13:01:53 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:01 -0400 |
commit | fb2dce862d9f9a68e6b9374579056ec9eca02a63 (patch) | |
tree | 888e0fd7248c9329fa1aa3981043a2dc2457d488 /include/linux/blkdev.h | |
parent | d628eaef310533767ce68664873869c2d7f78f09 (diff) |
Add 'discard' request handling
Some block devices benefit from a hint that they can forget the contents
of certain sectors. Add basic support for this to the block core, along
with a 'blkdev_issue_discard()' helper function which issues such
requests.
The caller doesn't get to provide an end_io functio, since
blkdev_issue_discard() will automatically split the request up into
multiple bios if appropriate. Neither does the function wait for
completion -- it's expected that callers won't care about when, or even
_if_, the request completes. It's only a hint to the device anyway. By
definition, the file system doesn't _care_ about these sectors any more.
[With feedback from OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> and
Jens Axboe <jens.axboe@oracle.com]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e0ba018f5e88..26ececbbebe2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -89,6 +89,7 @@ enum { | |||
89 | enum rq_flag_bits { | 89 | enum rq_flag_bits { |
90 | __REQ_RW, /* not set, read. set, write */ | 90 | __REQ_RW, /* not set, read. set, write */ |
91 | __REQ_FAILFAST, /* no low level driver retries */ | 91 | __REQ_FAILFAST, /* no low level driver retries */ |
92 | __REQ_DISCARD, /* request to discard sectors */ | ||
92 | __REQ_SORTED, /* elevator knows about this request */ | 93 | __REQ_SORTED, /* elevator knows about this request */ |
93 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 94 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
94 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 95 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
@@ -111,6 +112,7 @@ enum rq_flag_bits { | |||
111 | }; | 112 | }; |
112 | 113 | ||
113 | #define REQ_RW (1 << __REQ_RW) | 114 | #define REQ_RW (1 << __REQ_RW) |
115 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
114 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | 116 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) |
115 | #define REQ_SORTED (1 << __REQ_SORTED) | 117 | #define REQ_SORTED (1 << __REQ_SORTED) |
116 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 118 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
@@ -252,6 +254,7 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
252 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 254 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
253 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 255 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
254 | typedef void (unplug_fn) (struct request_queue *); | 256 | typedef void (unplug_fn) (struct request_queue *); |
257 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
255 | 258 | ||
256 | struct bio_vec; | 259 | struct bio_vec; |
257 | struct bvec_merge_data { | 260 | struct bvec_merge_data { |
@@ -307,6 +310,7 @@ struct request_queue | |||
307 | make_request_fn *make_request_fn; | 310 | make_request_fn *make_request_fn; |
308 | prep_rq_fn *prep_rq_fn; | 311 | prep_rq_fn *prep_rq_fn; |
309 | unplug_fn *unplug_fn; | 312 | unplug_fn *unplug_fn; |
313 | prepare_discard_fn *prepare_discard_fn; | ||
310 | merge_bvec_fn *merge_bvec_fn; | 314 | merge_bvec_fn *merge_bvec_fn; |
311 | prepare_flush_fn *prepare_flush_fn; | 315 | prepare_flush_fn *prepare_flush_fn; |
312 | softirq_done_fn *softirq_done_fn; | 316 | softirq_done_fn *softirq_done_fn; |
@@ -546,6 +550,7 @@ enum { | |||
546 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | 550 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) |
547 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | 551 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) |
548 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | 552 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
553 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
549 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 554 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
550 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | 555 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) |
551 | /* rq->queuelist of dequeued request must be list_empty() */ | 556 | /* rq->queuelist of dequeued request must be list_empty() */ |
@@ -796,6 +801,7 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | |||
796 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 801 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
797 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 802 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
798 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 803 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
804 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
799 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 805 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
800 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 806 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
801 | extern int blk_do_ordered(struct request_queue *, struct request **); | 807 | extern int blk_do_ordered(struct request_queue *, struct request **); |
@@ -837,6 +843,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
837 | } | 843 | } |
838 | 844 | ||
839 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 845 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
846 | extern int blkdev_issue_discard(struct block_device *, sector_t sector, | ||
847 | unsigned nr_sects); | ||
848 | |||
849 | static inline int sb_issue_discard(struct super_block *sb, | ||
850 | sector_t block, unsigned nr_blocks) | ||
851 | { | ||
852 | block <<= (sb->s_blocksize_bits - 9); | ||
853 | nr_blocks <<= (sb->s_blocksize_bits - 9); | ||
854 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks); | ||
855 | } | ||
840 | 856 | ||
841 | /* | 857 | /* |
842 | * command filter functions | 858 | * command filter functions |