aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c32
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk.h7
3 files changed, 23 insertions, 37 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index ed0aba5463ab..f1be85ba2bb5 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -110,9 +110,9 @@ static void queue_flush(struct request_queue *q, unsigned which)
110 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 110 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
111} 111}
112 112
113static inline bool start_ordered(struct request_queue *q, struct request **rqp) 113static inline struct request *start_ordered(struct request_queue *q,
114 struct request *rq)
114{ 115{
115 struct request *rq = *rqp;
116 unsigned skip = 0; 116 unsigned skip = 0;
117 117
118 q->orderr = 0; 118 q->orderr = 0;
@@ -149,11 +149,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
149 149
150 /* initialize proxy request and queue it */ 150 /* initialize proxy request and queue it */
151 blk_rq_init(q, rq); 151 blk_rq_init(q, rq);
152 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 152 init_request_from_bio(rq, q->orig_bar_rq->bio);
153 rq->cmd_flags |= REQ_WRITE;
154 if (q->ordered & QUEUE_ORDERED_DO_FUA) 153 if (q->ordered & QUEUE_ORDERED_DO_FUA)
155 rq->cmd_flags |= REQ_FUA; 154 rq->cmd_flags |= REQ_FUA;
156 init_request_from_bio(rq, q->orig_bar_rq->bio);
157 rq->end_io = bar_end_io; 155 rq->end_io = bar_end_io;
158 156
159 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 157 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
@@ -171,27 +169,26 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
171 else 169 else
172 skip |= QUEUE_ORDSEQ_DRAIN; 170 skip |= QUEUE_ORDSEQ_DRAIN;
173 171
174 *rqp = rq;
175
176 /* 172 /*
177 * Complete skipped sequences. If whole sequence is complete, 173 * Complete skipped sequences. If whole sequence is complete,
178 * return false to tell elevator that this request is gone. 174 * return %NULL to tell elevator that this request is gone.
179 */ 175 */
180 return !blk_ordered_complete_seq(q, skip, 0); 176 if (blk_ordered_complete_seq(q, skip, 0))
177 rq = NULL;
178 return rq;
181} 179}
182 180
183bool blk_do_ordered(struct request_queue *q, struct request **rqp) 181struct request *blk_do_ordered(struct request_queue *q, struct request *rq)
184{ 182{
185 struct request *rq = *rqp;
186 const int is_barrier = rq->cmd_type == REQ_TYPE_FS && 183 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
187 (rq->cmd_flags & REQ_HARDBARRIER); 184 (rq->cmd_flags & REQ_HARDBARRIER);
188 185
189 if (!q->ordseq) { 186 if (!q->ordseq) {
190 if (!is_barrier) 187 if (!is_barrier)
191 return true; 188 return rq;
192 189
193 if (q->next_ordered != QUEUE_ORDERED_NONE) 190 if (q->next_ordered != QUEUE_ORDERED_NONE)
194 return start_ordered(q, rqp); 191 return start_ordered(q, rq);
195 else { 192 else {
196 /* 193 /*
197 * Queue ordering not supported. Terminate 194 * Queue ordering not supported. Terminate
@@ -199,8 +196,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
199 */ 196 */
200 blk_dequeue_request(rq); 197 blk_dequeue_request(rq);
201 __blk_end_request_all(rq, -EOPNOTSUPP); 198 __blk_end_request_all(rq, -EOPNOTSUPP);
202 *rqp = NULL; 199 return NULL;
203 return false;
204 } 200 }
205 } 201 }
206 202
@@ -211,14 +207,14 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
211 /* Special requests are not subject to ordering rules. */ 207 /* Special requests are not subject to ordering rules. */
212 if (rq->cmd_type != REQ_TYPE_FS && 208 if (rq->cmd_type != REQ_TYPE_FS &&
213 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 209 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
214 return true; 210 return rq;
215 211
216 /* Ordered by draining. Wait for turn. */ 212 /* Ordered by draining. Wait for turn. */
217 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); 213 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
218 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) 214 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
219 *rqp = NULL; 215 rq = ERR_PTR(-EAGAIN);
220 216
221 return true; 217 return rq;
222} 218}
223 219
224static void bio_end_empty_barrier(struct bio *bio, int err) 220static void bio_end_empty_barrier(struct bio *bio, int err)
diff --git a/block/blk-core.c b/block/blk-core.c
index f06354183b29..f8d37a8e2c55 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1037,22 +1037,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1037} 1037}
1038EXPORT_SYMBOL(blk_insert_request); 1038EXPORT_SYMBOL(blk_insert_request);
1039 1039
1040/*
1041 * add-request adds a request to the linked list.
1042 * queue lock is held and interrupts disabled, as we muck with the
1043 * request queue list.
1044 */
1045static inline void add_request(struct request_queue *q, struct request *req)
1046{
1047 drive_stat_acct(req, 1);
1048
1049 /*
1050 * elevator indicated where it wants this request to be
1051 * inserted at elevator_merge time
1052 */
1053 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1054}
1055
1056static void part_round_stats_single(int cpu, struct hd_struct *part, 1040static void part_round_stats_single(int cpu, struct hd_struct *part,
1057 unsigned long now) 1041 unsigned long now)
1058{ 1042{
@@ -1316,7 +1300,10 @@ get_rq:
1316 req->cpu = blk_cpu_to_group(smp_processor_id()); 1300 req->cpu = blk_cpu_to_group(smp_processor_id());
1317 if (queue_should_plug(q) && elv_queue_empty(q)) 1301 if (queue_should_plug(q) && elv_queue_empty(q))
1318 blk_plug_device(q); 1302 blk_plug_device(q);
1319 add_request(q, req); 1303
1304 /* insert the request into the elevator */
1305 drive_stat_acct(req, 1);
1306 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1320out: 1307out:
1321 if (unplug || !queue_should_plug(q)) 1308 if (unplug || !queue_should_plug(q))
1322 __generic_unplug_device(q); 1309 __generic_unplug_device(q);
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87141e4..874eb4ea8093 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq)
51 */ 51 */
52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
53 53
54struct request *blk_do_ordered(struct request_queue *q, struct request *rq);
55
54static inline struct request *__elv_next_request(struct request_queue *q) 56static inline struct request *__elv_next_request(struct request_queue *q)
55{ 57{
56 struct request *rq; 58 struct request *rq;
@@ -58,8 +60,9 @@ static inline struct request *__elv_next_request(struct request_queue *q)
58 while (1) { 60 while (1) {
59 while (!list_empty(&q->queue_head)) { 61 while (!list_empty(&q->queue_head)) {
60 rq = list_entry_rq(q->queue_head.next); 62 rq = list_entry_rq(q->queue_head.next);
61 if (blk_do_ordered(q, &rq)) 63 rq = blk_do_ordered(q, rq);
62 return rq; 64 if (rq)
65 return !IS_ERR(rq) ? rq : NULL;
63 } 66 }
64 67
65 if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 68 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))