aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c85
1 files changed, 46 insertions, 39 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index dd873225da97..452c552e9ead 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Functions related to barrier IO handling 2 * Functions to sequence FLUSH and FUA writes.
3 */ 3 */
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/module.h> 5#include <linux/module.h>
@@ -9,6 +9,15 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12/* FLUSH/FUA sequences */
13enum {
14 QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
15 QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
16 QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
17 QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
18 QUEUE_FSEQ_DONE = (1 << 4),
19};
20
12static struct request *queue_next_fseq(struct request_queue *q); 21static struct request *queue_next_fseq(struct request_queue *q);
13 22
14unsigned blk_flush_cur_seq(struct request_queue *q) 23unsigned blk_flush_cur_seq(struct request_queue *q)
@@ -79,6 +88,7 @@ static void queue_flush(struct request_queue *q, struct request *rq,
79 88
80static struct request *queue_next_fseq(struct request_queue *q) 89static struct request *queue_next_fseq(struct request_queue *q)
81{ 90{
91 struct request *orig_rq = q->orig_flush_rq;
82 struct request *rq = &q->flush_rq; 92 struct request *rq = &q->flush_rq;
83 93
84 switch (blk_flush_cur_seq(q)) { 94 switch (blk_flush_cur_seq(q)) {
@@ -87,12 +97,11 @@ static struct request *queue_next_fseq(struct request_queue *q)
87 break; 97 break;
88 98
89 case QUEUE_FSEQ_DATA: 99 case QUEUE_FSEQ_DATA:
90 /* initialize proxy request and queue it */ 100 /* initialize proxy request, inherit FLUSH/FUA and queue it */
91 blk_rq_init(q, rq); 101 blk_rq_init(q, rq);
92 init_request_from_bio(rq, q->orig_flush_rq->bio); 102 init_request_from_bio(rq, orig_rq->bio);
93 rq->cmd_flags &= ~REQ_HARDBARRIER; 103 rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
94 if (q->ordered & QUEUE_ORDERED_DO_FUA) 104 rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
95 rq->cmd_flags |= REQ_FUA;
96 rq->end_io = flush_data_end_io; 105 rq->end_io = flush_data_end_io;
97 106
98 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 107 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
@@ -110,60 +119,58 @@ static struct request *queue_next_fseq(struct request_queue *q)
110 119
111struct request *blk_do_flush(struct request_queue *q, struct request *rq) 120struct request *blk_do_flush(struct request_queue *q, struct request *rq)
112{ 121{
122 unsigned int fflags = q->flush_flags; /* may change, cache it */
123 bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
124 bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
125 bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
113 unsigned skip = 0; 126 unsigned skip = 0;
114 127
115 if (!(rq->cmd_flags & REQ_HARDBARRIER)) 128 /*
129 * Special case. If there's data but flush is not necessary,
130 * the request can be issued directly.
131 *
132 * Flush w/o data should be able to be issued directly too but
133 * currently some drivers assume that rq->bio contains
134 * non-zero data if it isn't NULL and empty FLUSH requests
135 * getting here usually have bio's without data.
136 */
137 if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
138 rq->cmd_flags &= ~REQ_FLUSH;
139 if (!has_fua)
140 rq->cmd_flags &= ~REQ_FUA;
116 return rq; 141 return rq;
142 }
117 143
144 /*
145 * Sequenced flushes can't be processed in parallel. If
146 * another one is already in progress, queue for later
147 * processing.
148 */
118 if (q->flush_seq) { 149 if (q->flush_seq) {
119 /*
120 * Sequenced flush is already in progress and they
121 * can't be processed in parallel. Queue for later
122 * processing.
123 */
124 list_move_tail(&rq->queuelist, &q->pending_flushes); 150 list_move_tail(&rq->queuelist, &q->pending_flushes);
125 return NULL; 151 return NULL;
126 } 152 }
127 153
128 if (unlikely(q->next_ordered == QUEUE_ORDERED_NONE)) {
129 /*
130 * Queue ordering not supported. Terminate
131 * with prejudice.
132 */
133 blk_dequeue_request(rq);
134 __blk_end_request_all(rq, -EOPNOTSUPP);
135 return NULL;
136 }
137
138 /* 154 /*
139 * Start a new flush sequence 155 * Start a new flush sequence
140 */ 156 */
141 q->flush_err = 0; 157 q->flush_err = 0;
142 q->ordered = q->next_ordered;
143 q->flush_seq |= QUEUE_FSEQ_STARTED; 158 q->flush_seq |= QUEUE_FSEQ_STARTED;
144 159
145 /* 160 /* adjust FLUSH/FUA of the original request and stash it away */
146 * For an empty barrier, there's no actual BAR request, which 161 rq->cmd_flags &= ~REQ_FLUSH;
147 * in turn makes POSTFLUSH unnecessary. Mask them off. 162 if (!has_fua)
148 */ 163 rq->cmd_flags &= ~REQ_FUA;
149 if (!blk_rq_sectors(rq))
150 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
151 QUEUE_ORDERED_DO_POSTFLUSH);
152
153 /* stash away the original request */
154 blk_dequeue_request(rq); 164 blk_dequeue_request(rq);
155 q->orig_flush_rq = rq; 165 q->orig_flush_rq = rq;
156 166
157 if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) 167 /* skip unneded sequences and return the first one */
168 if (!do_preflush)
158 skip |= QUEUE_FSEQ_PREFLUSH; 169 skip |= QUEUE_FSEQ_PREFLUSH;
159 170 if (!blk_rq_sectors(rq))
160 if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
161 skip |= QUEUE_FSEQ_DATA; 171 skip |= QUEUE_FSEQ_DATA;
162 172 if (!do_postflush)
163 if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
164 skip |= QUEUE_FSEQ_POSTFLUSH; 173 skip |= QUEUE_FSEQ_POSTFLUSH;
165
166 /* complete skipped sequences and return the first sequence */
167 return blk_flush_complete_seq(q, skip, 0); 174 return blk_flush_complete_seq(q, skip, 0);
168} 175}
169 176