aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-11-27 23:32:05 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 02:28:45 -0500
commit8f11b3e99a1136fcbb67316c3260f085299c0bff (patch)
treebb9b12fe23aceac19e24f674786612d0fcad2142
parentf671620e7d895af221bdfeda751d54fa55ed9546 (diff)
block: make barrier completion more robust
Barrier completion had the following assumptions. * start_ordered() couldn't finish the whole sequence properly. If all actions are to be skipped, q->ordseq is set correctly but the actual completion was never triggered thus hanging the barrier request. * Drain completion in elv_complete_request() assumed that there's always at least one request in the queue when drain completes. Both assumptions are true but these assumptions need to be removed to improve empty barrier implementation. This patch makes the following changes. * Make start_ordered() use blk_ordered_complete_seq() to mark skipped steps complete and notify __elv_next_request() that it should fetch the next request if the whole barrier has completed inside start_ordered(). * Make drain completion path in elv_complete_request() check whether the queue is empty. Empty queue also indicates drain completion. * While at it, convert 0/1 return from blk_do_ordered() to false/true. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-barrier.c45
-rw-r--r--block/elevator.c10
-rw-r--r--include/linux/blkdev.h4
3 files changed, 36 insertions, 23 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 1efabf829c53..b03d88013e1e 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
88 return QUEUE_ORDSEQ_DONE; 88 return QUEUE_ORDSEQ_DONE;
89} 89}
90 90
91void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) 91bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
92{ 92{
93 struct request *rq; 93 struct request *rq;
94 94
@@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
99 q->ordseq |= seq; 99 q->ordseq |= seq;
100 100
101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) 101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
102 return; 102 return false;
103 103
104 /* 104 /*
105 * Okay, sequence complete. 105 * Okay, sequence complete.
@@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
109 109
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) 110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG(); 111 BUG();
112
113 return true;
112} 114}
113 115
114static void pre_flush_end_io(struct request *rq, int error) 116static void pre_flush_end_io(struct request *rq, int error)
@@ -151,9 +153,11 @@ static void queue_flush(struct request_queue *q, unsigned which)
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 153 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152} 154}
153 155
154static inline struct request *start_ordered(struct request_queue *q, 156static inline bool start_ordered(struct request_queue *q, struct request **rqp)
155 struct request *rq)
156{ 157{
158 struct request *rq = *rqp;
159 unsigned skip = 0;
160
157 q->orderr = 0; 161 q->orderr = 0;
158 q->ordered = q->next_ordered; 162 q->ordered = q->next_ordered;
159 q->ordseq |= QUEUE_ORDSEQ_STARTED; 163 q->ordseq |= QUEUE_ORDSEQ_STARTED;
@@ -177,7 +181,7 @@ static inline struct request *start_ordered(struct request_queue *q,
177 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); 181 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
178 rq = &q->post_flush_rq; 182 rq = &q->post_flush_rq;
179 } else 183 } else
180 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 184 skip |= QUEUE_ORDSEQ_POSTFLUSH;
181 185
182 if (q->ordered & QUEUE_ORDERED_DO_BAR) { 186 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
183 rq = &q->bar_rq; 187 rq = &q->bar_rq;
@@ -193,35 +197,40 @@ static inline struct request *start_ordered(struct request_queue *q,
193 197
194 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 198 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
195 } else 199 } else
196 q->ordseq |= QUEUE_ORDSEQ_BAR; 200 skip |= QUEUE_ORDSEQ_BAR;
197 201
198 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { 202 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
199 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); 203 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
200 rq = &q->pre_flush_rq; 204 rq = &q->pre_flush_rq;
201 } else 205 } else
202 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; 206 skip |= QUEUE_ORDSEQ_PREFLUSH;
203 207
204 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) 208 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
205 rq = NULL; 209 rq = NULL;
206 else 210 else
207 q->ordseq |= QUEUE_ORDSEQ_DRAIN; 211 skip |= QUEUE_ORDSEQ_DRAIN;
212
213 *rqp = rq;
208 214
209 return rq; 215 /*
216 * Complete skipped sequences. If whole sequence is complete,
217 * return false to tell elevator that this request is gone.
218 */
219 return !blk_ordered_complete_seq(q, skip, 0);
210} 220}
211 221
212int blk_do_ordered(struct request_queue *q, struct request **rqp) 222bool blk_do_ordered(struct request_queue *q, struct request **rqp)
213{ 223{
214 struct request *rq = *rqp; 224 struct request *rq = *rqp;
215 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 225 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
216 226
217 if (!q->ordseq) { 227 if (!q->ordseq) {
218 if (!is_barrier) 228 if (!is_barrier)
219 return 1; 229 return true;
220 230
221 if (q->next_ordered != QUEUE_ORDERED_NONE) { 231 if (q->next_ordered != QUEUE_ORDERED_NONE)
222 *rqp = start_ordered(q, rq); 232 return start_ordered(q, rqp);
223 return 1; 233 else {
224 } else {
225 /* 234 /*
226 * Queue ordering not supported. Terminate 235 * Queue ordering not supported. Terminate
227 * with prejudice. 236 * with prejudice.
@@ -231,7 +240,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
231 blk_rq_bytes(rq))) 240 blk_rq_bytes(rq)))
232 BUG(); 241 BUG();
233 *rqp = NULL; 242 *rqp = NULL;
234 return 0; 243 return false;
235 } 244 }
236 } 245 }
237 246
@@ -242,7 +251,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
242 /* Special requests are not subject to ordering rules. */ 251 /* Special requests are not subject to ordering rules. */
243 if (!blk_fs_request(rq) && 252 if (!blk_fs_request(rq) &&
244 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 253 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
245 return 1; 254 return true;
246 255
247 if (q->ordered & QUEUE_ORDERED_BY_TAG) { 256 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
248 /* Ordered by tag. Blocking the next barrier is enough. */ 257 /* Ordered by tag. Blocking the next barrier is enough. */
@@ -255,7 +264,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
255 *rqp = NULL; 264 *rqp = NULL;
256 } 265 }
257 266
258 return 1; 267 return true;
259} 268}
260 269
261static void bio_end_empty_barrier(struct bio *bio, int err) 270static void bio_end_empty_barrier(struct bio *bio, int err)
diff --git a/block/elevator.c b/block/elevator.c
index 86836dd179c0..261ffaaf47bd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -944,10 +944,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
944 * drained for flush sequence. 944 * drained for flush sequence.
945 */ 945 */
946 if (unlikely(q->ordseq)) { 946 if (unlikely(q->ordseq)) {
947 struct request *first_rq = list_entry_rq(q->queue_head.next); 947 struct request *next = NULL;
948 if (q->in_flight == 0 && 948
949 if (!list_empty(&q->queue_head))
950 next = list_entry_rq(q->queue_head.next);
951
952 if (!q->in_flight &&
949 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 953 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
950 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 954 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
951 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 955 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
952 blk_start_queueing(q); 956 blk_start_queueing(q);
953 } 957 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b044267009ed..3c7078e0129d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -866,10 +866,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
866extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 866extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
867extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 867extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
868extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 868extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
869extern int blk_do_ordered(struct request_queue *, struct request **); 869extern bool blk_do_ordered(struct request_queue *, struct request **);
870extern unsigned blk_ordered_cur_seq(struct request_queue *); 870extern unsigned blk_ordered_cur_seq(struct request_queue *);
871extern unsigned blk_ordered_req_seq(struct request *); 871extern unsigned blk_ordered_req_seq(struct request *);
872extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 872extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
873 873
874extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 874extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
875extern void blk_dump_rq_flags(struct request *, char *); 875extern void blk_dump_rq_flags(struct request *, char *);