diff options
author | Tejun Heo <tj@kernel.org> | 2010-09-03 05:56:16 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-10 06:35:36 -0400 |
commit | 4913efe456c987057e5d36a3f0a55422a9072cae (patch) | |
tree | 295f04a7214e1933df3301dd42c12ff3f282a22c /block | |
parent | 6958f145459ca7ad9715024de97445addacb8510 (diff) |
block: deprecate barrier and replace blk_queue_ordered() with blk_queue_flush()
Barrier is deemed too heavy and will soon be replaced by FLUSH/FUA
requests. Deprecate barrier. All REQ_HARDBARRIERs are failed with
-EOPNOTSUPP and blk_queue_ordered() is replaced with simpler
blk_queue_flush().
blk_queue_flush() takes combinations of REQ_FLUSH and FUA. If a
device has write cache and can flush it, it should set REQ_FLUSH. If
the device can handle FUA writes, it should also set REQ_FUA.
All blk_queue_ordered() users are converted.
* ORDERED_DRAIN is mapped to 0 which is the default value.
* ORDERED_DRAIN_FLUSH is mapped to REQ_FLUSH.
* ORDERED_DRAIN_FLUSH_FUA is mapped to REQ_FLUSH | REQ_FUA.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Boaz Harrosh <bharrosh@panasas.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Alasdair G Kergon <agk@redhat.com>
Cc: Pierre Ossman <drzeus@drzeus.cx>
Cc: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-barrier.c | 29 | ||||
-rw-r--r-- | block/blk-core.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 20 |
3 files changed, 24 insertions, 31 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index c807e9ca3a68..ed0aba5463ab 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -9,35 +9,6 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | /** | ||
13 | * blk_queue_ordered - does this queue support ordered writes | ||
14 | * @q: the request queue | ||
15 | * @ordered: one of QUEUE_ORDERED_* | ||
16 | * | ||
17 | * Description: | ||
18 | * For journalled file systems, doing ordered writes on a commit | ||
19 | * block instead of explicitly doing wait_on_buffer (which is bad | ||
20 | * for performance) can be a big win. Block drivers supporting this | ||
21 | * feature should call this function and indicate so. | ||
22 | * | ||
23 | **/ | ||
24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered) | ||
25 | { | ||
26 | if (ordered != QUEUE_ORDERED_NONE && | ||
27 | ordered != QUEUE_ORDERED_DRAIN && | ||
28 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && | ||
29 | ordered != QUEUE_ORDERED_DRAIN_FUA) { | ||
30 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); | ||
31 | return -EINVAL; | ||
32 | } | ||
33 | |||
34 | q->ordered = ordered; | ||
35 | q->next_ordered = ordered; | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | EXPORT_SYMBOL(blk_queue_ordered); | ||
40 | |||
41 | /* | 12 | /* |
42 | * Cache flushing for ordered writes handling | 13 | * Cache flushing for ordered writes handling |
43 | */ | 14 | */ |
diff --git a/block/blk-core.c b/block/blk-core.c index ee1a1e7e63cc..f06354183b29 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1203,11 +1203,13 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1203 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1203 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1204 | int rw_flags; | 1204 | int rw_flags; |
1205 | 1205 | ||
1206 | if ((bio->bi_rw & REQ_HARDBARRIER) && | 1206 | /* REQ_HARDBARRIER is no more */ |
1207 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1207 | if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER, |
1208 | "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) { | ||
1208 | bio_endio(bio, -EOPNOTSUPP); | 1209 | bio_endio(bio, -EOPNOTSUPP); |
1209 | return 0; | 1210 | return 0; |
1210 | } | 1211 | } |
1212 | |||
1211 | /* | 1213 | /* |
1212 | * low level driver can indicate that it wants pages above a | 1214 | * low level driver can indicate that it wants pages above a |
1213 | * certain limit bounced to low memory (ie for highmem, or even | 1215 | * certain limit bounced to low memory (ie for highmem, or even |
diff --git a/block/blk-settings.c b/block/blk-settings.c index a234f4bf1d6f..9b18afcfe925 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -794,6 +794,26 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | |||
794 | } | 794 | } |
795 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | 795 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
796 | 796 | ||
797 | /** | ||
798 | * blk_queue_flush - configure queue's cache flush capability | ||
799 | * @q: the request queue for the device | ||
800 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA | ||
801 | * | ||
802 | * Tell block layer cache flush capability of @q. If it supports | ||
803 | * flushing, REQ_FLUSH should be set. If it supports bypassing | ||
804 | * write cache for individual writes, REQ_FUA should be set. | ||
805 | */ | ||
806 | void blk_queue_flush(struct request_queue *q, unsigned int flush) | ||
807 | { | ||
808 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); | ||
809 | |||
810 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) | ||
811 | flush &= ~REQ_FUA; | ||
812 | |||
813 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); | ||
814 | } | ||
815 | EXPORT_SYMBOL_GPL(blk_queue_flush); | ||
816 | |||
797 | static int __init blk_settings_init(void) | 817 | static int __init blk_settings_init(void) |
798 | { | 818 | { |
799 | blk_max_low_pfn = max_low_pfn - 1; | 819 | blk_max_low_pfn = max_low_pfn - 1; |