diff options
author | Tejun Heo <tj@kernel.org> | 2010-09-03 05:56:16 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-10 06:35:36 -0400 |
commit | dd4c133f387c48f526022860ad70354637a80f4c (patch) | |
tree | 7b741150d37d327b69e483468278d9de3a43a4e1 /block/blk-flush.c | |
parent | 8839a0e055d9abd6c011d533373a8dd266cad011 (diff) |
block: rename barrier/ordered to flush
With ordering requirements dropped, barrier and ordered are misnomers.
Now all block layer does is sequencing FLUSH and FUA. Rename them to
flush.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r-- | block/blk-flush.c | 98 |
1 files changed, 48 insertions, 50 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index e8b2e5c091b1..dd873225da97 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -9,41 +9,38 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static struct request *queue_next_ordseq(struct request_queue *q); | 12 | static struct request *queue_next_fseq(struct request_queue *q); |
13 | 13 | ||
14 | /* | 14 | unsigned blk_flush_cur_seq(struct request_queue *q) |
15 | * Cache flushing for ordered writes handling | ||
16 | */ | ||
17 | unsigned blk_ordered_cur_seq(struct request_queue *q) | ||
18 | { | 15 | { |
19 | if (!q->ordseq) | 16 | if (!q->flush_seq) |
20 | return 0; | 17 | return 0; |
21 | return 1 << ffz(q->ordseq); | 18 | return 1 << ffz(q->flush_seq); |
22 | } | 19 | } |
23 | 20 | ||
24 | static struct request *blk_ordered_complete_seq(struct request_queue *q, | 21 | static struct request *blk_flush_complete_seq(struct request_queue *q, |
25 | unsigned seq, int error) | 22 | unsigned seq, int error) |
26 | { | 23 | { |
27 | struct request *next_rq = NULL; | 24 | struct request *next_rq = NULL; |
28 | 25 | ||
29 | if (error && !q->orderr) | 26 | if (error && !q->flush_err) |
30 | q->orderr = error; | 27 | q->flush_err = error; |
31 | 28 | ||
32 | BUG_ON(q->ordseq & seq); | 29 | BUG_ON(q->flush_seq & seq); |
33 | q->ordseq |= seq; | 30 | q->flush_seq |= seq; |
34 | 31 | ||
35 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) { | 32 | if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { |
36 | /* not complete yet, queue the next ordered sequence */ | 33 | /* not complete yet, queue the next flush sequence */ |
37 | next_rq = queue_next_ordseq(q); | 34 | next_rq = queue_next_fseq(q); |
38 | } else { | 35 | } else { |
39 | /* complete this barrier request */ | 36 | /* complete this flush request */ |
40 | __blk_end_request_all(q->orig_bar_rq, q->orderr); | 37 | __blk_end_request_all(q->orig_flush_rq, q->flush_err); |
41 | q->orig_bar_rq = NULL; | 38 | q->orig_flush_rq = NULL; |
42 | q->ordseq = 0; | 39 | q->flush_seq = 0; |
43 | 40 | ||
44 | /* dispatch the next barrier if there's one */ | 41 | /* dispatch the next flush if there's one */ |
45 | if (!list_empty(&q->pending_barriers)) { | 42 | if (!list_empty(&q->pending_flushes)) { |
46 | next_rq = list_entry_rq(q->pending_barriers.next); | 43 | next_rq = list_entry_rq(q->pending_flushes.next); |
47 | list_move(&next_rq->queuelist, &q->queue_head); | 44 | list_move(&next_rq->queuelist, &q->queue_head); |
48 | } | 45 | } |
49 | } | 46 | } |
@@ -53,19 +50,19 @@ static struct request *blk_ordered_complete_seq(struct request_queue *q, | |||
53 | static void pre_flush_end_io(struct request *rq, int error) | 50 | static void pre_flush_end_io(struct request *rq, int error) |
54 | { | 51 | { |
55 | elv_completed_request(rq->q, rq); | 52 | elv_completed_request(rq->q, rq); |
56 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | 53 | blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error); |
57 | } | 54 | } |
58 | 55 | ||
59 | static void bar_end_io(struct request *rq, int error) | 56 | static void flush_data_end_io(struct request *rq, int error) |
60 | { | 57 | { |
61 | elv_completed_request(rq->q, rq); | 58 | elv_completed_request(rq->q, rq); |
62 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | 59 | blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error); |
63 | } | 60 | } |
64 | 61 | ||
65 | static void post_flush_end_io(struct request *rq, int error) | 62 | static void post_flush_end_io(struct request *rq, int error) |
66 | { | 63 | { |
67 | elv_completed_request(rq->q, rq); | 64 | elv_completed_request(rq->q, rq); |
68 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | 65 | blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error); |
69 | } | 66 | } |
70 | 67 | ||
71 | static void queue_flush(struct request_queue *q, struct request *rq, | 68 | static void queue_flush(struct request_queue *q, struct request *rq, |
@@ -74,34 +71,34 @@ static void queue_flush(struct request_queue *q, struct request *rq, | |||
74 | blk_rq_init(q, rq); | 71 | blk_rq_init(q, rq); |
75 | rq->cmd_type = REQ_TYPE_FS; | 72 | rq->cmd_type = REQ_TYPE_FS; |
76 | rq->cmd_flags = REQ_FLUSH; | 73 | rq->cmd_flags = REQ_FLUSH; |
77 | rq->rq_disk = q->orig_bar_rq->rq_disk; | 74 | rq->rq_disk = q->orig_flush_rq->rq_disk; |
78 | rq->end_io = end_io; | 75 | rq->end_io = end_io; |
79 | 76 | ||
80 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 77 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
81 | } | 78 | } |
82 | 79 | ||
83 | static struct request *queue_next_ordseq(struct request_queue *q) | 80 | static struct request *queue_next_fseq(struct request_queue *q) |
84 | { | 81 | { |
85 | struct request *rq = &q->bar_rq; | 82 | struct request *rq = &q->flush_rq; |
86 | 83 | ||
87 | switch (blk_ordered_cur_seq(q)) { | 84 | switch (blk_flush_cur_seq(q)) { |
88 | case QUEUE_ORDSEQ_PREFLUSH: | 85 | case QUEUE_FSEQ_PREFLUSH: |
89 | queue_flush(q, rq, pre_flush_end_io); | 86 | queue_flush(q, rq, pre_flush_end_io); |
90 | break; | 87 | break; |
91 | 88 | ||
92 | case QUEUE_ORDSEQ_BAR: | 89 | case QUEUE_FSEQ_DATA: |
93 | /* initialize proxy request and queue it */ | 90 | /* initialize proxy request and queue it */ |
94 | blk_rq_init(q, rq); | 91 | blk_rq_init(q, rq); |
95 | init_request_from_bio(rq, q->orig_bar_rq->bio); | 92 | init_request_from_bio(rq, q->orig_flush_rq->bio); |
96 | rq->cmd_flags &= ~REQ_HARDBARRIER; | 93 | rq->cmd_flags &= ~REQ_HARDBARRIER; |
97 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | 94 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
98 | rq->cmd_flags |= REQ_FUA; | 95 | rq->cmd_flags |= REQ_FUA; |
99 | rq->end_io = bar_end_io; | 96 | rq->end_io = flush_data_end_io; |
100 | 97 | ||
101 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 98 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
102 | break; | 99 | break; |
103 | 100 | ||
104 | case QUEUE_ORDSEQ_POSTFLUSH: | 101 | case QUEUE_FSEQ_POSTFLUSH: |
105 | queue_flush(q, rq, post_flush_end_io); | 102 | queue_flush(q, rq, post_flush_end_io); |
106 | break; | 103 | break; |
107 | 104 | ||
@@ -111,19 +108,20 @@ static struct request *queue_next_ordseq(struct request_queue *q) | |||
111 | return rq; | 108 | return rq; |
112 | } | 109 | } |
113 | 110 | ||
114 | struct request *blk_do_ordered(struct request_queue *q, struct request *rq) | 111 | struct request *blk_do_flush(struct request_queue *q, struct request *rq) |
115 | { | 112 | { |
116 | unsigned skip = 0; | 113 | unsigned skip = 0; |
117 | 114 | ||
118 | if (!(rq->cmd_flags & REQ_HARDBARRIER)) | 115 | if (!(rq->cmd_flags & REQ_HARDBARRIER)) |
119 | return rq; | 116 | return rq; |
120 | 117 | ||
121 | if (q->ordseq) { | 118 | if (q->flush_seq) { |
122 | /* | 119 | /* |
123 | * Barrier is already in progress and they can't be | 120 | * Sequenced flush is already in progress and they |
124 | * processed in parallel. Queue for later processing. | 121 | * can't be processed in parallel. Queue for later |
122 | * processing. | ||
125 | */ | 123 | */ |
126 | list_move_tail(&rq->queuelist, &q->pending_barriers); | 124 | list_move_tail(&rq->queuelist, &q->pending_flushes); |
127 | return NULL; | 125 | return NULL; |
128 | } | 126 | } |
129 | 127 | ||
@@ -138,11 +136,11 @@ struct request *blk_do_ordered(struct request_queue *q, struct request *rq) | |||
138 | } | 136 | } |
139 | 137 | ||
140 | /* | 138 | /* |
141 | * Start a new ordered sequence | 139 | * Start a new flush sequence |
142 | */ | 140 | */ |
143 | q->orderr = 0; | 141 | q->flush_err = 0; |
144 | q->ordered = q->next_ordered; | 142 | q->ordered = q->next_ordered; |
145 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | 143 | q->flush_seq |= QUEUE_FSEQ_STARTED; |
146 | 144 | ||
147 | /* | 145 | /* |
148 | * For an empty barrier, there's no actual BAR request, which | 146 | * For an empty barrier, there's no actual BAR request, which |
@@ -154,19 +152,19 @@ struct request *blk_do_ordered(struct request_queue *q, struct request *rq) | |||
154 | 152 | ||
155 | /* stash away the original request */ | 153 | /* stash away the original request */ |
156 | blk_dequeue_request(rq); | 154 | blk_dequeue_request(rq); |
157 | q->orig_bar_rq = rq; | 155 | q->orig_flush_rq = rq; |
158 | 156 | ||
159 | if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) | 157 | if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) |
160 | skip |= QUEUE_ORDSEQ_PREFLUSH; | 158 | skip |= QUEUE_FSEQ_PREFLUSH; |
161 | 159 | ||
162 | if (!(q->ordered & QUEUE_ORDERED_DO_BAR)) | 160 | if (!(q->ordered & QUEUE_ORDERED_DO_BAR)) |
163 | skip |= QUEUE_ORDSEQ_BAR; | 161 | skip |= QUEUE_FSEQ_DATA; |
164 | 162 | ||
165 | if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH)) | 163 | if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH)) |
166 | skip |= QUEUE_ORDSEQ_POSTFLUSH; | 164 | skip |= QUEUE_FSEQ_POSTFLUSH; |
167 | 165 | ||
168 | /* complete skipped sequences and return the first sequence */ | 166 | /* complete skipped sequences and return the first sequence */ |
169 | return blk_ordered_complete_seq(q, skip, 0); | 167 | return blk_flush_complete_seq(q, skip, 0); |
170 | } | 168 | } |
171 | 169 | ||
172 | static void bio_end_empty_barrier(struct bio *bio, int err) | 170 | static void bio_end_empty_barrier(struct bio *bio, int err) |