aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 05:56:16 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:35:36 -0400
commit4913efe456c987057e5d36a3f0a55422a9072cae (patch)
tree295f04a7214e1933df3301dd42c12ff3f282a22c /include
parent6958f145459ca7ad9715024de97445addacb8510 (diff)
block: deprecate barrier and replace blk_queue_ordered() with blk_queue_flush()
Barrier is deemed too heavy and will soon be replaced by FLUSH/FUA requests. Deprecate barrier. All REQ_HARDBARRIERs are failed with -EOPNOTSUPP and blk_queue_ordered() is replaced with simpler blk_queue_flush(). blk_queue_flush() takes combinations of REQ_FLUSH and FUA. If a device has write cache and can flush it, it should set REQ_FLUSH. If the device can handle FUA writes, it should also set REQ_FUA. All blk_queue_ordered() users are converted. * ORDERED_DRAIN is mapped to 0 which is the default value. * ORDERED_DRAIN_FLUSH is mapped to REQ_FLUSH. * ORDERED_DRAIN_FLUSH_FUA is mapped to REQ_FLUSH | REQ_FUA. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Boaz Harrosh <bharrosh@panasas.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Chris Wright <chrisw@sous-sol.org> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Cc: David S. Miller <davem@davemloft.net> Cc: Alasdair G Kergon <agk@redhat.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Cc: Stefan Weinhuber <wein@de.ibm.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7077bc0d6138..e97911d4dec3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -355,8 +355,10 @@ struct request_queue
355 struct blk_trace *blk_trace; 355 struct blk_trace *blk_trace;
356#endif 356#endif
357 /* 357 /*
358 * reserved for flush operations 358 * for flush operations
359 */ 359 */
360 unsigned int flush_flags;
361
360 unsigned int ordered, next_ordered, ordseq; 362 unsigned int ordered, next_ordered, ordseq;
361 int orderr, ordcolor; 363 int orderr, ordcolor;
362 struct request pre_flush_rq, bar_rq, post_flush_rq; 364 struct request pre_flush_rq, bar_rq, post_flush_rq;
@@ -865,8 +867,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
865extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 867extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
866extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 868extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
867extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 869extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
870extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
868extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 871extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
869extern int blk_queue_ordered(struct request_queue *, unsigned);
870extern bool blk_do_ordered(struct request_queue *, struct request **); 872extern bool blk_do_ordered(struct request_queue *, struct request **);
871extern unsigned blk_ordered_cur_seq(struct request_queue *); 873extern unsigned blk_ordered_cur_seq(struct request_queue *);
872extern unsigned blk_ordered_req_seq(struct request *); 874extern unsigned blk_ordered_req_seq(struct request *);