diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:58:35 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:58:35 -0500 |
commit | 4c63f5646e405b5010cc9499419060bf2e838f5b (patch) | |
tree | df91ba315032c8ec4aafeb3ab96fdfa7c6c656e1 /block | |
parent | cafb0bfca1a73efd6d8a4a6a6a716e6134b96c24 (diff) | |
parent | 69d60eb96ae8a73cf9b79cf28051caf973006011 (diff) |
Merge branch 'for-2.6.39/stack-plug' into for-2.6.39/core
Conflicts:
block/blk-core.c
block/blk-flush.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
fs/nilfs2/btnode.c
fs/nilfs2/mdt.c
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 540 | ||||
-rw-r--r-- | block/blk-exec.c | 4 | ||||
-rw-r--r-- | block/blk-flush.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 8 | ||||
-rw-r--r-- | block/blk-throttle.c | 4 | ||||
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 8 | ||||
-rw-r--r-- | block/deadline-iosched.c | 9 | ||||
-rw-r--r-- | block/elevator.c | 49 | ||||
-rw-r--r-- | block/noop-iosched.c | 8 |
10 files changed, 323 insertions, 315 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 74d496ccf4d7..e1fcf7a24668 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/writeback.h> | 27 | #include <linux/writeback.h> |
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
30 | #include <linux/list_sort.h> | ||
30 | 31 | ||
31 | #define CREATE_TRACE_POINTS | 32 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/block.h> | 33 | #include <trace/events/block.h> |
@@ -198,135 +199,43 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
198 | EXPORT_SYMBOL(blk_dump_rq_flags); | 199 | EXPORT_SYMBOL(blk_dump_rq_flags); |
199 | 200 | ||
200 | /* | 201 | /* |
201 | * "plug" the device if there are no outstanding requests: this will | 202 | * Make sure that plugs that were pending when this function was entered, |
202 | * force the transfer to start only after we have put all the requests | 203 | * are now complete and requests pushed to the queue. |
203 | * on the list. | 204 | */ |
204 | * | 205 | static inline void queue_sync_plugs(struct request_queue *q) |
205 | * This is called with interrupts off and no requests on the queue and | ||
206 | * with the queue lock held. | ||
207 | */ | ||
208 | void blk_plug_device(struct request_queue *q) | ||
209 | { | 206 | { |
210 | WARN_ON(!irqs_disabled()); | ||
211 | |||
212 | /* | 207 | /* |
213 | * don't plug a stopped queue, it must be paired with blk_start_queue() | 208 | * If the current process is plugged and has barriers submitted, |
214 | * which will restart the queueing | 209 | * we will livelock if we don't unplug first. |
215 | */ | 210 | */ |
216 | if (blk_queue_stopped(q)) | 211 | blk_flush_plug(current); |
217 | return; | ||
218 | |||
219 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { | ||
220 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | ||
221 | trace_block_plug(q); | ||
222 | } | ||
223 | } | ||
224 | EXPORT_SYMBOL(blk_plug_device); | ||
225 | |||
226 | /** | ||
227 | * blk_plug_device_unlocked - plug a device without queue lock held | ||
228 | * @q: The &struct request_queue to plug | ||
229 | * | ||
230 | * Description: | ||
231 | * Like @blk_plug_device(), but grabs the queue lock and disables | ||
232 | * interrupts. | ||
233 | **/ | ||
234 | void blk_plug_device_unlocked(struct request_queue *q) | ||
235 | { | ||
236 | unsigned long flags; | ||
237 | |||
238 | spin_lock_irqsave(q->queue_lock, flags); | ||
239 | blk_plug_device(q); | ||
240 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
241 | } | ||
242 | EXPORT_SYMBOL(blk_plug_device_unlocked); | ||
243 | |||
244 | /* | ||
245 | * remove the queue from the plugged list, if present. called with | ||
246 | * queue lock held and interrupts disabled. | ||
247 | */ | ||
248 | int blk_remove_plug(struct request_queue *q) | ||
249 | { | ||
250 | WARN_ON(!irqs_disabled()); | ||
251 | |||
252 | if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) | ||
253 | return 0; | ||
254 | |||
255 | del_timer(&q->unplug_timer); | ||
256 | return 1; | ||
257 | } | 212 | } |
258 | EXPORT_SYMBOL(blk_remove_plug); | ||
259 | 213 | ||
260 | /* | 214 | static void blk_delay_work(struct work_struct *work) |
261 | * remove the plug and let it rip.. | ||
262 | */ | ||
263 | void __generic_unplug_device(struct request_queue *q) | ||
264 | { | 215 | { |
265 | if (unlikely(blk_queue_stopped(q))) | 216 | struct request_queue *q; |
266 | return; | ||
267 | if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) | ||
268 | return; | ||
269 | 217 | ||
270 | q->request_fn(q); | 218 | q = container_of(work, struct request_queue, delay_work.work); |
219 | spin_lock_irq(q->queue_lock); | ||
220 | __blk_run_queue(q, false); | ||
221 | spin_unlock_irq(q->queue_lock); | ||
271 | } | 222 | } |
272 | 223 | ||
273 | /** | 224 | /** |
274 | * generic_unplug_device - fire a request queue | 225 | * blk_delay_queue - restart queueing after defined interval |
275 | * @q: The &struct request_queue in question | 226 | * @q: The &struct request_queue in question |
227 | * @msecs: Delay in msecs | ||
276 | * | 228 | * |
277 | * Description: | 229 | * Description: |
278 | * Linux uses plugging to build bigger requests queues before letting | 230 | * Sometimes queueing needs to be postponed for a little while, to allow |
279 | * the device have at them. If a queue is plugged, the I/O scheduler | 231 | * resources to come back. This function will make sure that queueing is |
280 | * is still adding and merging requests on the queue. Once the queue | 232 | * restarted around the specified time. |
281 | * gets unplugged, the request_fn defined for the queue is invoked and | 233 | */ |
282 | * transfers started. | 234 | void blk_delay_queue(struct request_queue *q, unsigned long msecs) |
283 | **/ | ||
284 | void generic_unplug_device(struct request_queue *q) | ||
285 | { | ||
286 | if (blk_queue_plugged(q)) { | ||
287 | spin_lock_irq(q->queue_lock); | ||
288 | __generic_unplug_device(q); | ||
289 | spin_unlock_irq(q->queue_lock); | ||
290 | } | ||
291 | } | ||
292 | EXPORT_SYMBOL(generic_unplug_device); | ||
293 | |||
294 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | ||
295 | struct page *page) | ||
296 | { | ||
297 | struct request_queue *q = bdi->unplug_io_data; | ||
298 | |||
299 | blk_unplug(q); | ||
300 | } | ||
301 | |||
302 | void blk_unplug_work(struct work_struct *work) | ||
303 | { | ||
304 | struct request_queue *q = | ||
305 | container_of(work, struct request_queue, unplug_work); | ||
306 | |||
307 | trace_block_unplug_io(q); | ||
308 | q->unplug_fn(q); | ||
309 | } | ||
310 | |||
311 | void blk_unplug_timeout(unsigned long data) | ||
312 | { | ||
313 | struct request_queue *q = (struct request_queue *)data; | ||
314 | |||
315 | trace_block_unplug_timer(q); | ||
316 | kblockd_schedule_work(q, &q->unplug_work); | ||
317 | } | ||
318 | |||
319 | void blk_unplug(struct request_queue *q) | ||
320 | { | 235 | { |
321 | /* | 236 | schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); |
322 | * devices don't necessarily have an ->unplug_fn defined | ||
323 | */ | ||
324 | if (q->unplug_fn) { | ||
325 | trace_block_unplug_io(q); | ||
326 | q->unplug_fn(q); | ||
327 | } | ||
328 | } | 237 | } |
329 | EXPORT_SYMBOL(blk_unplug); | 238 | EXPORT_SYMBOL(blk_delay_queue); |
330 | 239 | ||
331 | /** | 240 | /** |
332 | * blk_start_queue - restart a previously stopped queue | 241 | * blk_start_queue - restart a previously stopped queue |
@@ -362,7 +271,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
362 | **/ | 271 | **/ |
363 | void blk_stop_queue(struct request_queue *q) | 272 | void blk_stop_queue(struct request_queue *q) |
364 | { | 273 | { |
365 | blk_remove_plug(q); | 274 | cancel_delayed_work(&q->delay_work); |
366 | queue_flag_set(QUEUE_FLAG_STOPPED, q); | 275 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
367 | } | 276 | } |
368 | EXPORT_SYMBOL(blk_stop_queue); | 277 | EXPORT_SYMBOL(blk_stop_queue); |
@@ -387,9 +296,9 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
387 | */ | 296 | */ |
388 | void blk_sync_queue(struct request_queue *q) | 297 | void blk_sync_queue(struct request_queue *q) |
389 | { | 298 | { |
390 | del_timer_sync(&q->unplug_timer); | ||
391 | del_timer_sync(&q->timeout); | 299 | del_timer_sync(&q->timeout); |
392 | cancel_work_sync(&q->unplug_work); | 300 | cancel_delayed_work_sync(&q->delay_work); |
301 | queue_sync_plugs(q); | ||
393 | } | 302 | } |
394 | EXPORT_SYMBOL(blk_sync_queue); | 303 | EXPORT_SYMBOL(blk_sync_queue); |
395 | 304 | ||
@@ -405,14 +314,9 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
405 | */ | 314 | */ |
406 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) | 315 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) |
407 | { | 316 | { |
408 | blk_remove_plug(q); | ||
409 | |||
410 | if (unlikely(blk_queue_stopped(q))) | 317 | if (unlikely(blk_queue_stopped(q))) |
411 | return; | 318 | return; |
412 | 319 | ||
413 | if (elv_queue_empty(q)) | ||
414 | return; | ||
415 | |||
416 | /* | 320 | /* |
417 | * Only recurse once to avoid overrunning the stack, let the unplug | 321 | * Only recurse once to avoid overrunning the stack, let the unplug |
418 | * handling reinvoke the handler shortly if we already got there. | 322 | * handling reinvoke the handler shortly if we already got there. |
@@ -420,10 +324,8 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) | |||
420 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 324 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
421 | q->request_fn(q); | 325 | q->request_fn(q); |
422 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 326 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
423 | } else { | 327 | } else |
424 | queue_flag_set(QUEUE_FLAG_PLUGGED, q); | 328 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
425 | kblockd_schedule_work(q, &q->unplug_work); | ||
426 | } | ||
427 | } | 329 | } |
428 | EXPORT_SYMBOL(__blk_run_queue); | 330 | EXPORT_SYMBOL(__blk_run_queue); |
429 | 331 | ||
@@ -517,8 +419,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
517 | if (!q) | 419 | if (!q) |
518 | return NULL; | 420 | return NULL; |
519 | 421 | ||
520 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | ||
521 | q->backing_dev_info.unplug_io_data = q; | ||
522 | q->backing_dev_info.ra_pages = | 422 | q->backing_dev_info.ra_pages = |
523 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 423 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
524 | q->backing_dev_info.state = 0; | 424 | q->backing_dev_info.state = 0; |
@@ -538,13 +438,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
538 | 438 | ||
539 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 439 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, |
540 | laptop_mode_timer_fn, (unsigned long) q); | 440 | laptop_mode_timer_fn, (unsigned long) q); |
541 | init_timer(&q->unplug_timer); | ||
542 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 441 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
543 | INIT_LIST_HEAD(&q->timeout_list); | 442 | INIT_LIST_HEAD(&q->timeout_list); |
544 | INIT_LIST_HEAD(&q->flush_queue[0]); | 443 | INIT_LIST_HEAD(&q->flush_queue[0]); |
545 | INIT_LIST_HEAD(&q->flush_queue[1]); | 444 | INIT_LIST_HEAD(&q->flush_queue[1]); |
546 | INIT_LIST_HEAD(&q->flush_data_in_flight); | 445 | INIT_LIST_HEAD(&q->flush_data_in_flight); |
547 | INIT_WORK(&q->unplug_work, blk_unplug_work); | 446 | INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); |
548 | 447 | ||
549 | kobject_init(&q->kobj, &blk_queue_ktype); | 448 | kobject_init(&q->kobj, &blk_queue_ktype); |
550 | 449 | ||
@@ -639,7 +538,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
639 | q->request_fn = rfn; | 538 | q->request_fn = rfn; |
640 | q->prep_rq_fn = NULL; | 539 | q->prep_rq_fn = NULL; |
641 | q->unprep_rq_fn = NULL; | 540 | q->unprep_rq_fn = NULL; |
642 | q->unplug_fn = generic_unplug_device; | ||
643 | q->queue_flags = QUEUE_FLAG_DEFAULT; | 541 | q->queue_flags = QUEUE_FLAG_DEFAULT; |
644 | 542 | ||
645 | /* Override internal queue lock with supplied lock pointer */ | 543 | /* Override internal queue lock with supplied lock pointer */ |
@@ -677,6 +575,8 @@ int blk_get_queue(struct request_queue *q) | |||
677 | 575 | ||
678 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 576 | static inline void blk_free_request(struct request_queue *q, struct request *rq) |
679 | { | 577 | { |
578 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
579 | |||
680 | if (rq->cmd_flags & REQ_ELVPRIV) | 580 | if (rq->cmd_flags & REQ_ELVPRIV) |
681 | elv_put_request(q, rq); | 581 | elv_put_request(q, rq); |
682 | mempool_free(rq, q->rq.rq_pool); | 582 | mempool_free(rq, q->rq.rq_pool); |
@@ -898,8 +798,8 @@ out: | |||
898 | } | 798 | } |
899 | 799 | ||
900 | /* | 800 | /* |
901 | * No available requests for this queue, unplug the device and wait for some | 801 | * No available requests for this queue, wait for some requests to become |
902 | * requests to become available. | 802 | * available. |
903 | * | 803 | * |
904 | * Called with q->queue_lock held, and returns with it unlocked. | 804 | * Called with q->queue_lock held, and returns with it unlocked. |
905 | */ | 805 | */ |
@@ -920,7 +820,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
920 | 820 | ||
921 | trace_block_sleeprq(q, bio, rw_flags & 1); | 821 | trace_block_sleeprq(q, bio, rw_flags & 1); |
922 | 822 | ||
923 | __generic_unplug_device(q); | ||
924 | spin_unlock_irq(q->queue_lock); | 823 | spin_unlock_irq(q->queue_lock); |
925 | io_schedule(); | 824 | io_schedule(); |
926 | 825 | ||
@@ -1042,6 +941,13 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) | |||
1042 | } | 941 | } |
1043 | EXPORT_SYMBOL(blk_requeue_request); | 942 | EXPORT_SYMBOL(blk_requeue_request); |
1044 | 943 | ||
944 | static void add_acct_request(struct request_queue *q, struct request *rq, | ||
945 | int where) | ||
946 | { | ||
947 | drive_stat_acct(rq, 1); | ||
948 | __elv_add_request(q, rq, where); | ||
949 | } | ||
950 | |||
1045 | /** | 951 | /** |
1046 | * blk_insert_request - insert a special request into a request queue | 952 | * blk_insert_request - insert a special request into a request queue |
1047 | * @q: request queue where request should be inserted | 953 | * @q: request queue where request should be inserted |
@@ -1084,8 +990,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
1084 | if (blk_rq_tagged(rq)) | 990 | if (blk_rq_tagged(rq)) |
1085 | blk_queue_end_tag(q, rq); | 991 | blk_queue_end_tag(q, rq); |
1086 | 992 | ||
1087 | drive_stat_acct(rq, 1); | 993 | add_acct_request(q, rq, where); |
1088 | __elv_add_request(q, rq, where, 0); | ||
1089 | __blk_run_queue(q, false); | 994 | __blk_run_queue(q, false); |
1090 | spin_unlock_irqrestore(q->queue_lock, flags); | 995 | spin_unlock_irqrestore(q->queue_lock, flags); |
1091 | } | 996 | } |
@@ -1206,6 +1111,113 @@ void blk_add_request_payload(struct request *rq, struct page *page, | |||
1206 | } | 1111 | } |
1207 | EXPORT_SYMBOL_GPL(blk_add_request_payload); | 1112 | EXPORT_SYMBOL_GPL(blk_add_request_payload); |
1208 | 1113 | ||
1114 | static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | ||
1115 | struct bio *bio) | ||
1116 | { | ||
1117 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | ||
1118 | |||
1119 | /* | ||
1120 | * Debug stuff, kill later | ||
1121 | */ | ||
1122 | if (!rq_mergeable(req)) { | ||
1123 | blk_dump_rq_flags(req, "back"); | ||
1124 | return false; | ||
1125 | } | ||
1126 | |||
1127 | if (!ll_back_merge_fn(q, req, bio)) | ||
1128 | return false; | ||
1129 | |||
1130 | trace_block_bio_backmerge(q, bio); | ||
1131 | |||
1132 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | ||
1133 | blk_rq_set_mixed_merge(req); | ||
1134 | |||
1135 | req->biotail->bi_next = bio; | ||
1136 | req->biotail = bio; | ||
1137 | req->__data_len += bio->bi_size; | ||
1138 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | ||
1139 | |||
1140 | drive_stat_acct(req, 0); | ||
1141 | return true; | ||
1142 | } | ||
1143 | |||
1144 | static bool bio_attempt_front_merge(struct request_queue *q, | ||
1145 | struct request *req, struct bio *bio) | ||
1146 | { | ||
1147 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | ||
1148 | sector_t sector; | ||
1149 | |||
1150 | /* | ||
1151 | * Debug stuff, kill later | ||
1152 | */ | ||
1153 | if (!rq_mergeable(req)) { | ||
1154 | blk_dump_rq_flags(req, "front"); | ||
1155 | return false; | ||
1156 | } | ||
1157 | |||
1158 | if (!ll_front_merge_fn(q, req, bio)) | ||
1159 | return false; | ||
1160 | |||
1161 | trace_block_bio_frontmerge(q, bio); | ||
1162 | |||
1163 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | ||
1164 | blk_rq_set_mixed_merge(req); | ||
1165 | |||
1166 | sector = bio->bi_sector; | ||
1167 | |||
1168 | bio->bi_next = req->bio; | ||
1169 | req->bio = bio; | ||
1170 | |||
1171 | /* | ||
1172 | * may not be valid. if the low level driver said | ||
1173 | * it didn't need a bounce buffer then it better | ||
1174 | * not touch req->buffer either... | ||
1175 | */ | ||
1176 | req->buffer = bio_data(bio); | ||
1177 | req->__sector = bio->bi_sector; | ||
1178 | req->__data_len += bio->bi_size; | ||
1179 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | ||
1180 | |||
1181 | drive_stat_acct(req, 0); | ||
1182 | return true; | ||
1183 | } | ||
1184 | |||
1185 | /* | ||
1186 | * Attempts to merge with the plugged list in the current process. Returns | ||
1187 | * true if merge was succesful, otherwise false. | ||
1188 | */ | ||
1189 | static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, | ||
1190 | struct bio *bio) | ||
1191 | { | ||
1192 | struct blk_plug *plug; | ||
1193 | struct request *rq; | ||
1194 | bool ret = false; | ||
1195 | |||
1196 | plug = tsk->plug; | ||
1197 | if (!plug) | ||
1198 | goto out; | ||
1199 | |||
1200 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { | ||
1201 | int el_ret; | ||
1202 | |||
1203 | if (rq->q != q) | ||
1204 | continue; | ||
1205 | |||
1206 | el_ret = elv_try_merge(rq, bio); | ||
1207 | if (el_ret == ELEVATOR_BACK_MERGE) { | ||
1208 | ret = bio_attempt_back_merge(q, rq, bio); | ||
1209 | if (ret) | ||
1210 | break; | ||
1211 | } else if (el_ret == ELEVATOR_FRONT_MERGE) { | ||
1212 | ret = bio_attempt_front_merge(q, rq, bio); | ||
1213 | if (ret) | ||
1214 | break; | ||
1215 | } | ||
1216 | } | ||
1217 | out: | ||
1218 | return ret; | ||
1219 | } | ||
1220 | |||
1209 | void init_request_from_bio(struct request *req, struct bio *bio) | 1221 | void init_request_from_bio(struct request *req, struct bio *bio) |
1210 | { | 1222 | { |
1211 | req->cpu = bio->bi_comp_cpu; | 1223 | req->cpu = bio->bi_comp_cpu; |
@@ -1221,26 +1233,12 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1221 | blk_rq_bio_prep(req->q, req, bio); | 1233 | blk_rq_bio_prep(req->q, req, bio); |
1222 | } | 1234 | } |
1223 | 1235 | ||
1224 | /* | ||
1225 | * Only disabling plugging for non-rotational devices if it does tagging | ||
1226 | * as well, otherwise we do need the proper merging | ||
1227 | */ | ||
1228 | static inline bool queue_should_plug(struct request_queue *q) | ||
1229 | { | ||
1230 | return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); | ||
1231 | } | ||
1232 | |||
1233 | static int __make_request(struct request_queue *q, struct bio *bio) | 1236 | static int __make_request(struct request_queue *q, struct bio *bio) |
1234 | { | 1237 | { |
1235 | struct request *req; | ||
1236 | int el_ret; | ||
1237 | unsigned int bytes = bio->bi_size; | ||
1238 | const unsigned short prio = bio_prio(bio); | ||
1239 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 1238 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
1240 | const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); | 1239 | struct blk_plug *plug; |
1241 | const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1240 | int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; |
1242 | int where = ELEVATOR_INSERT_SORT; | 1241 | struct request *req; |
1243 | int rw_flags; | ||
1244 | 1242 | ||
1245 | /* | 1243 | /* |
1246 | * low level driver can indicate that it wants pages above a | 1244 | * low level driver can indicate that it wants pages above a |
@@ -1249,78 +1247,36 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1249 | */ | 1247 | */ |
1250 | blk_queue_bounce(q, &bio); | 1248 | blk_queue_bounce(q, &bio); |
1251 | 1249 | ||
1252 | spin_lock_irq(q->queue_lock); | ||
1253 | |||
1254 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 1250 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { |
1251 | spin_lock_irq(q->queue_lock); | ||
1255 | where = ELEVATOR_INSERT_FLUSH; | 1252 | where = ELEVATOR_INSERT_FLUSH; |
1256 | goto get_rq; | 1253 | goto get_rq; |
1257 | } | 1254 | } |
1258 | 1255 | ||
1259 | if (elv_queue_empty(q)) | 1256 | /* |
1260 | goto get_rq; | 1257 | * Check if we can merge with the plugged list before grabbing |
1261 | 1258 | * any locks. | |
1262 | el_ret = elv_merge(q, &req, bio); | 1259 | */ |
1263 | switch (el_ret) { | 1260 | if (attempt_plug_merge(current, q, bio)) |
1264 | case ELEVATOR_BACK_MERGE: | ||
1265 | BUG_ON(!rq_mergeable(req)); | ||
1266 | |||
1267 | if (!ll_back_merge_fn(q, req, bio)) | ||
1268 | break; | ||
1269 | |||
1270 | trace_block_bio_backmerge(q, bio); | ||
1271 | |||
1272 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | ||
1273 | blk_rq_set_mixed_merge(req); | ||
1274 | |||
1275 | req->biotail->bi_next = bio; | ||
1276 | req->biotail = bio; | ||
1277 | req->__data_len += bytes; | ||
1278 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
1279 | if (!blk_rq_cpu_valid(req)) | ||
1280 | req->cpu = bio->bi_comp_cpu; | ||
1281 | drive_stat_acct(req, 0); | ||
1282 | elv_bio_merged(q, req, bio); | ||
1283 | if (!attempt_back_merge(q, req)) | ||
1284 | elv_merged_request(q, req, el_ret); | ||
1285 | goto out; | 1261 | goto out; |
1286 | 1262 | ||
1287 | case ELEVATOR_FRONT_MERGE: | 1263 | spin_lock_irq(q->queue_lock); |
1288 | BUG_ON(!rq_mergeable(req)); | ||
1289 | |||
1290 | if (!ll_front_merge_fn(q, req, bio)) | ||
1291 | break; | ||
1292 | |||
1293 | trace_block_bio_frontmerge(q, bio); | ||
1294 | 1264 | ||
1295 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) { | 1265 | el_ret = elv_merge(q, &req, bio); |
1296 | blk_rq_set_mixed_merge(req); | 1266 | if (el_ret == ELEVATOR_BACK_MERGE) { |
1297 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | 1267 | BUG_ON(req->cmd_flags & REQ_ON_PLUG); |
1298 | req->cmd_flags |= ff; | 1268 | if (bio_attempt_back_merge(q, req, bio)) { |
1269 | if (!attempt_back_merge(q, req)) | ||
1270 | elv_merged_request(q, req, el_ret); | ||
1271 | goto out_unlock; | ||
1272 | } | ||
1273 | } else if (el_ret == ELEVATOR_FRONT_MERGE) { | ||
1274 | BUG_ON(req->cmd_flags & REQ_ON_PLUG); | ||
1275 | if (bio_attempt_front_merge(q, req, bio)) { | ||
1276 | if (!attempt_front_merge(q, req)) | ||
1277 | elv_merged_request(q, req, el_ret); | ||
1278 | goto out_unlock; | ||
1299 | } | 1279 | } |
1300 | |||
1301 | bio->bi_next = req->bio; | ||
1302 | req->bio = bio; | ||
1303 | |||
1304 | /* | ||
1305 | * may not be valid. if the low level driver said | ||
1306 | * it didn't need a bounce buffer then it better | ||
1307 | * not touch req->buffer either... | ||
1308 | */ | ||
1309 | req->buffer = bio_data(bio); | ||
1310 | req->__sector = bio->bi_sector; | ||
1311 | req->__data_len += bytes; | ||
1312 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
1313 | if (!blk_rq_cpu_valid(req)) | ||
1314 | req->cpu = bio->bi_comp_cpu; | ||
1315 | drive_stat_acct(req, 0); | ||
1316 | elv_bio_merged(q, req, bio); | ||
1317 | if (!attempt_front_merge(q, req)) | ||
1318 | elv_merged_request(q, req, el_ret); | ||
1319 | goto out; | ||
1320 | |||
1321 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ | ||
1322 | default: | ||
1323 | ; | ||
1324 | } | 1280 | } |
1325 | 1281 | ||
1326 | get_rq: | 1282 | get_rq: |
@@ -1347,20 +1303,35 @@ get_rq: | |||
1347 | */ | 1303 | */ |
1348 | init_request_from_bio(req, bio); | 1304 | init_request_from_bio(req, bio); |
1349 | 1305 | ||
1350 | spin_lock_irq(q->queue_lock); | ||
1351 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 1306 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || |
1352 | bio_flagged(bio, BIO_CPU_AFFINE)) | 1307 | bio_flagged(bio, BIO_CPU_AFFINE)) { |
1353 | req->cpu = blk_cpu_to_group(smp_processor_id()); | 1308 | req->cpu = blk_cpu_to_group(get_cpu()); |
1354 | if (queue_should_plug(q) && elv_queue_empty(q)) | 1309 | put_cpu(); |
1355 | blk_plug_device(q); | 1310 | } |
1356 | 1311 | ||
1357 | /* insert the request into the elevator */ | 1312 | plug = current->plug; |
1358 | drive_stat_acct(req, 1); | 1313 | if (plug) { |
1359 | __elv_add_request(q, req, where, 0); | 1314 | if (!plug->should_sort && !list_empty(&plug->list)) { |
1315 | struct request *__rq; | ||
1316 | |||
1317 | __rq = list_entry_rq(plug->list.prev); | ||
1318 | if (__rq->q != q) | ||
1319 | plug->should_sort = 1; | ||
1320 | } | ||
1321 | /* | ||
1322 | * Debug flag, kill later | ||
1323 | */ | ||
1324 | req->cmd_flags |= REQ_ON_PLUG; | ||
1325 | list_add_tail(&req->queuelist, &plug->list); | ||
1326 | drive_stat_acct(req, 1); | ||
1327 | } else { | ||
1328 | spin_lock_irq(q->queue_lock); | ||
1329 | add_acct_request(q, req, where); | ||
1330 | __blk_run_queue(q, false); | ||
1331 | out_unlock: | ||
1332 | spin_unlock_irq(q->queue_lock); | ||
1333 | } | ||
1360 | out: | 1334 | out: |
1361 | if (unplug || !queue_should_plug(q)) | ||
1362 | __generic_unplug_device(q); | ||
1363 | spin_unlock_irq(q->queue_lock); | ||
1364 | return 0; | 1335 | return 0; |
1365 | } | 1336 | } |
1366 | 1337 | ||
@@ -1763,9 +1734,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
1763 | */ | 1734 | */ |
1764 | BUG_ON(blk_queued_rq(rq)); | 1735 | BUG_ON(blk_queued_rq(rq)); |
1765 | 1736 | ||
1766 | drive_stat_acct(rq, 1); | 1737 | add_acct_request(q, rq, ELEVATOR_INSERT_BACK); |
1767 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | ||
1768 | |||
1769 | spin_unlock_irqrestore(q->queue_lock, flags); | 1738 | spin_unlock_irqrestore(q->queue_lock, flags); |
1770 | 1739 | ||
1771 | return 0; | 1740 | return 0; |
@@ -2643,6 +2612,113 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2643 | } | 2612 | } |
2644 | EXPORT_SYMBOL(kblockd_schedule_work); | 2613 | EXPORT_SYMBOL(kblockd_schedule_work); |
2645 | 2614 | ||
2615 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2616 | struct delayed_work *dwork, unsigned long delay) | ||
2617 | { | ||
2618 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | ||
2619 | } | ||
2620 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2621 | |||
2622 | #define PLUG_MAGIC 0x91827364 | ||
2623 | |||
2624 | void blk_start_plug(struct blk_plug *plug) | ||
2625 | { | ||
2626 | struct task_struct *tsk = current; | ||
2627 | |||
2628 | plug->magic = PLUG_MAGIC; | ||
2629 | INIT_LIST_HEAD(&plug->list); | ||
2630 | plug->should_sort = 0; | ||
2631 | |||
2632 | /* | ||
2633 | * If this is a nested plug, don't actually assign it. It will be | ||
2634 | * flushed on its own. | ||
2635 | */ | ||
2636 | if (!tsk->plug) { | ||
2637 | /* | ||
2638 | * Store ordering should not be needed here, since a potential | ||
2639 | * preempt will imply a full memory barrier | ||
2640 | */ | ||
2641 | tsk->plug = plug; | ||
2642 | } | ||
2643 | } | ||
2644 | EXPORT_SYMBOL(blk_start_plug); | ||
2645 | |||
2646 | static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) | ||
2647 | { | ||
2648 | struct request *rqa = container_of(a, struct request, queuelist); | ||
2649 | struct request *rqb = container_of(b, struct request, queuelist); | ||
2650 | |||
2651 | return !(rqa->q == rqb->q); | ||
2652 | } | ||
2653 | |||
2654 | static void flush_plug_list(struct blk_plug *plug) | ||
2655 | { | ||
2656 | struct request_queue *q; | ||
2657 | unsigned long flags; | ||
2658 | struct request *rq; | ||
2659 | |||
2660 | BUG_ON(plug->magic != PLUG_MAGIC); | ||
2661 | |||
2662 | if (list_empty(&plug->list)) | ||
2663 | return; | ||
2664 | |||
2665 | if (plug->should_sort) | ||
2666 | list_sort(NULL, &plug->list, plug_rq_cmp); | ||
2667 | |||
2668 | q = NULL; | ||
2669 | local_irq_save(flags); | ||
2670 | while (!list_empty(&plug->list)) { | ||
2671 | rq = list_entry_rq(plug->list.next); | ||
2672 | list_del_init(&rq->queuelist); | ||
2673 | BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); | ||
2674 | BUG_ON(!rq->q); | ||
2675 | if (rq->q != q) { | ||
2676 | if (q) { | ||
2677 | __blk_run_queue(q, false); | ||
2678 | spin_unlock(q->queue_lock); | ||
2679 | } | ||
2680 | q = rq->q; | ||
2681 | spin_lock(q->queue_lock); | ||
2682 | } | ||
2683 | rq->cmd_flags &= ~REQ_ON_PLUG; | ||
2684 | |||
2685 | /* | ||
2686 | * rq is already accounted, so use raw insert | ||
2687 | */ | ||
2688 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT); | ||
2689 | } | ||
2690 | |||
2691 | if (q) { | ||
2692 | __blk_run_queue(q, false); | ||
2693 | spin_unlock(q->queue_lock); | ||
2694 | } | ||
2695 | |||
2696 | BUG_ON(!list_empty(&plug->list)); | ||
2697 | local_irq_restore(flags); | ||
2698 | } | ||
2699 | |||
2700 | static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) | ||
2701 | { | ||
2702 | flush_plug_list(plug); | ||
2703 | |||
2704 | if (plug == tsk->plug) | ||
2705 | tsk->plug = NULL; | ||
2706 | } | ||
2707 | |||
2708 | void blk_finish_plug(struct blk_plug *plug) | ||
2709 | { | ||
2710 | if (plug) | ||
2711 | __blk_finish_plug(current, plug); | ||
2712 | } | ||
2713 | EXPORT_SYMBOL(blk_finish_plug); | ||
2714 | |||
2715 | void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) | ||
2716 | { | ||
2717 | __blk_finish_plug(tsk, plug); | ||
2718 | tsk->plug = plug; | ||
2719 | } | ||
2720 | EXPORT_SYMBOL(__blk_flush_plug); | ||
2721 | |||
2646 | int __init blk_dev_init(void) | 2722 | int __init blk_dev_init(void) |
2647 | { | 2723 | { |
2648 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2724 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/blk-exec.c b/block/blk-exec.c index cf1456a02acd..7482b7fa863b 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
54 | rq->end_io = done; | 54 | rq->end_io = done; |
55 | WARN_ON(irqs_disabled()); | 55 | WARN_ON(irqs_disabled()); |
56 | spin_lock_irq(q->queue_lock); | 56 | spin_lock_irq(q->queue_lock); |
57 | __elv_add_request(q, rq, where, 1); | 57 | __elv_add_request(q, rq, where); |
58 | __generic_unplug_device(q); | 58 | __blk_run_queue(q, false); |
59 | /* the queue is stopped so it won't be plugged+unplugged */ | 59 | /* the queue is stopped so it won't be plugged+unplugged */ |
60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) | 60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) |
61 | q->request_fn(q); | 61 | q->request_fn(q); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 0bd8c9c5d6e5..93d5fd8e51eb 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
194 | { | 194 | { |
195 | struct request_queue *q = flush_rq->q; | 195 | struct request_queue *q = flush_rq->q; |
196 | struct list_head *running = &q->flush_queue[q->flush_running_idx]; | 196 | struct list_head *running = &q->flush_queue[q->flush_running_idx]; |
197 | bool was_empty = elv_queue_empty(q); | ||
198 | bool queued = false; | 197 | bool queued = false; |
199 | struct request *rq, *n; | 198 | struct request *rq, *n; |
200 | 199 | ||
@@ -218,7 +217,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
218 | * from request completion path and calling directly into | 217 | * from request completion path and calling directly into |
219 | * request_fn may confuse the driver. Always use kblockd. | 218 | * request_fn may confuse the driver. Always use kblockd. |
220 | */ | 219 | */ |
221 | if (queued && was_empty) | 220 | if (queued) |
222 | __blk_run_queue(q, true); | 221 | __blk_run_queue(q, true); |
223 | } | 222 | } |
224 | 223 | ||
@@ -269,13 +268,12 @@ static bool blk_kick_flush(struct request_queue *q) | |||
269 | static void flush_data_end_io(struct request *rq, int error) | 268 | static void flush_data_end_io(struct request *rq, int error) |
270 | { | 269 | { |
271 | struct request_queue *q = rq->q; | 270 | struct request_queue *q = rq->q; |
272 | bool was_empty = elv_queue_empty(q); | ||
273 | 271 | ||
274 | /* | 272 | /* |
275 | * After populating an empty queue, kick it to avoid stall. Read | 273 | * After populating an empty queue, kick it to avoid stall. Read |
276 | * the comment in flush_end_io(). | 274 | * the comment in flush_end_io(). |
277 | */ | 275 | */ |
278 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty) | 276 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) |
279 | __blk_run_queue(q, true); | 277 | __blk_run_queue(q, true); |
280 | } | 278 | } |
281 | 279 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index df649fa59ded..1fa769293597 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
164 | blk_queue_congestion_threshold(q); | 164 | blk_queue_congestion_threshold(q); |
165 | q->nr_batching = BLK_BATCH_REQ; | 165 | q->nr_batching = BLK_BATCH_REQ; |
166 | 166 | ||
167 | q->unplug_thresh = 4; /* hmm */ | ||
168 | q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ | ||
169 | if (q->unplug_delay == 0) | ||
170 | q->unplug_delay = 1; | ||
171 | |||
172 | q->unplug_timer.function = blk_unplug_timeout; | ||
173 | q->unplug_timer.data = (unsigned long)q; | ||
174 | |||
175 | blk_set_default_limits(&q->limits); | 167 | blk_set_default_limits(&q->limits); |
176 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); | 168 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
177 | 169 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 32dd3e4b041d..37abbfc68590 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -768,6 +768,7 @@ static int throtl_dispatch(struct request_queue *q) | |||
768 | unsigned int nr_disp = 0; | 768 | unsigned int nr_disp = 0; |
769 | struct bio_list bio_list_on_stack; | 769 | struct bio_list bio_list_on_stack; |
770 | struct bio *bio; | 770 | struct bio *bio; |
771 | struct blk_plug plug; | ||
771 | 772 | ||
772 | spin_lock_irq(q->queue_lock); | 773 | spin_lock_irq(q->queue_lock); |
773 | 774 | ||
@@ -796,9 +797,10 @@ out: | |||
796 | * immediate dispatch | 797 | * immediate dispatch |
797 | */ | 798 | */ |
798 | if (nr_disp) { | 799 | if (nr_disp) { |
800 | blk_start_plug(&plug); | ||
799 | while((bio = bio_list_pop(&bio_list_on_stack))) | 801 | while((bio = bio_list_pop(&bio_list_on_stack))) |
800 | generic_make_request(bio); | 802 | generic_make_request(bio); |
801 | blk_unplug(q); | 803 | blk_finish_plug(&plug); |
802 | } | 804 | } |
803 | return nr_disp; | 805 | return nr_disp; |
804 | } | 806 | } |
diff --git a/block/blk.h b/block/blk.h index 284b500852bd..49d21af81d07 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |||
18 | void blk_dequeue_request(struct request *rq); | 18 | void blk_dequeue_request(struct request *rq); |
19 | void __blk_queue_free_tags(struct request_queue *q); | 19 | void __blk_queue_free_tags(struct request_queue *q); |
20 | 20 | ||
21 | void blk_unplug_work(struct work_struct *work); | ||
22 | void blk_unplug_timeout(unsigned long data); | ||
23 | void blk_rq_timed_out_timer(unsigned long data); | 21 | void blk_rq_timed_out_timer(unsigned long data); |
24 | void blk_delete_timer(struct request *); | 22 | void blk_delete_timer(struct request *); |
25 | void blk_add_timer(struct request *); | 23 | void blk_add_timer(struct request *); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9697053f80bc..c826ef81c679 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -500,13 +500,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |||
500 | } | 500 | } |
501 | } | 501 | } |
502 | 502 | ||
503 | static int cfq_queue_empty(struct request_queue *q) | ||
504 | { | ||
505 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
506 | |||
507 | return !cfqd->rq_queued; | ||
508 | } | ||
509 | |||
510 | /* | 503 | /* |
511 | * Scale schedule slice based on io priority. Use the sync time slice only | 504 | * Scale schedule slice based on io priority. Use the sync time slice only |
512 | * if a queue is marked sync and has sync io queued. A sync queue with async | 505 | * if a queue is marked sync and has sync io queued. A sync queue with async |
@@ -4080,7 +4073,6 @@ static struct elevator_type iosched_cfq = { | |||
4080 | .elevator_add_req_fn = cfq_insert_request, | 4073 | .elevator_add_req_fn = cfq_insert_request, |
4081 | .elevator_activate_req_fn = cfq_activate_request, | 4074 | .elevator_activate_req_fn = cfq_activate_request, |
4082 | .elevator_deactivate_req_fn = cfq_deactivate_request, | 4075 | .elevator_deactivate_req_fn = cfq_deactivate_request, |
4083 | .elevator_queue_empty_fn = cfq_queue_empty, | ||
4084 | .elevator_completed_req_fn = cfq_completed_request, | 4076 | .elevator_completed_req_fn = cfq_completed_request, |
4085 | .elevator_former_req_fn = elv_rb_former_request, | 4077 | .elevator_former_req_fn = elv_rb_former_request, |
4086 | .elevator_latter_req_fn = elv_rb_latter_request, | 4078 | .elevator_latter_req_fn = elv_rb_latter_request, |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b547cbca7b23..5139c0ea1864 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -326,14 +326,6 @@ dispatch_request: | |||
326 | return 1; | 326 | return 1; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int deadline_queue_empty(struct request_queue *q) | ||
330 | { | ||
331 | struct deadline_data *dd = q->elevator->elevator_data; | ||
332 | |||
333 | return list_empty(&dd->fifo_list[WRITE]) | ||
334 | && list_empty(&dd->fifo_list[READ]); | ||
335 | } | ||
336 | |||
337 | static void deadline_exit_queue(struct elevator_queue *e) | 329 | static void deadline_exit_queue(struct elevator_queue *e) |
338 | { | 330 | { |
339 | struct deadline_data *dd = e->elevator_data; | 331 | struct deadline_data *dd = e->elevator_data; |
@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = { | |||
445 | .elevator_merge_req_fn = deadline_merged_requests, | 437 | .elevator_merge_req_fn = deadline_merged_requests, |
446 | .elevator_dispatch_fn = deadline_dispatch_requests, | 438 | .elevator_dispatch_fn = deadline_dispatch_requests, |
447 | .elevator_add_req_fn = deadline_add_request, | 439 | .elevator_add_req_fn = deadline_add_request, |
448 | .elevator_queue_empty_fn = deadline_queue_empty, | ||
449 | .elevator_former_req_fn = elv_rb_former_request, | 440 | .elevator_former_req_fn = elv_rb_former_request, |
450 | .elevator_latter_req_fn = elv_rb_latter_request, | 441 | .elevator_latter_req_fn = elv_rb_latter_request, |
451 | .elevator_init_fn = deadline_init_queue, | 442 | .elevator_init_fn = deadline_init_queue, |
diff --git a/block/elevator.c b/block/elevator.c index fabf3675c913..542ce826b401 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL(elv_rq_merge_ok); | 114 | EXPORT_SYMBOL(elv_rq_merge_ok); |
115 | 115 | ||
116 | static inline int elv_try_merge(struct request *__rq, struct bio *bio) | 116 | int elv_try_merge(struct request *__rq, struct bio *bio) |
117 | { | 117 | { |
118 | int ret = ELEVATOR_NO_MERGE; | 118 | int ret = ELEVATOR_NO_MERGE; |
119 | 119 | ||
@@ -421,6 +421,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
421 | struct list_head *entry; | 421 | struct list_head *entry; |
422 | int stop_flags; | 422 | int stop_flags; |
423 | 423 | ||
424 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
425 | |||
424 | if (q->last_merge == rq) | 426 | if (q->last_merge == rq) |
425 | q->last_merge = NULL; | 427 | q->last_merge = NULL; |
426 | 428 | ||
@@ -617,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q) | |||
617 | 619 | ||
618 | void elv_insert(struct request_queue *q, struct request *rq, int where) | 620 | void elv_insert(struct request_queue *q, struct request *rq, int where) |
619 | { | 621 | { |
620 | int unplug_it = 1; | ||
621 | |||
622 | trace_block_rq_insert(q, rq); | 622 | trace_block_rq_insert(q, rq); |
623 | 623 | ||
624 | rq->q = q; | 624 | rq->q = q; |
625 | 625 | ||
626 | switch (where) { | 626 | switch (where) { |
627 | case ELEVATOR_INSERT_REQUEUE: | 627 | case ELEVATOR_INSERT_REQUEUE: |
628 | /* | ||
629 | * Most requeues happen because of a busy condition, | ||
630 | * don't force unplug of the queue for that case. | ||
631 | * Clear unplug_it and fall through. | ||
632 | */ | ||
633 | unplug_it = 0; | ||
634 | |||
635 | case ELEVATOR_INSERT_FRONT: | 628 | case ELEVATOR_INSERT_FRONT: |
636 | rq->cmd_flags |= REQ_SOFTBARRIER; | 629 | rq->cmd_flags |= REQ_SOFTBARRIER; |
637 | list_add(&rq->queuelist, &q->queue_head); | 630 | list_add(&rq->queuelist, &q->queue_head); |
@@ -677,25 +670,17 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
677 | rq->cmd_flags |= REQ_SOFTBARRIER; | 670 | rq->cmd_flags |= REQ_SOFTBARRIER; |
678 | blk_insert_flush(rq); | 671 | blk_insert_flush(rq); |
679 | break; | 672 | break; |
680 | |||
681 | default: | 673 | default: |
682 | printk(KERN_ERR "%s: bad insertion point %d\n", | 674 | printk(KERN_ERR "%s: bad insertion point %d\n", |
683 | __func__, where); | 675 | __func__, where); |
684 | BUG(); | 676 | BUG(); |
685 | } | 677 | } |
686 | |||
687 | if (unplug_it && blk_queue_plugged(q)) { | ||
688 | int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] | ||
689 | - queue_in_flight(q); | ||
690 | |||
691 | if (nrq >= q->unplug_thresh) | ||
692 | __generic_unplug_device(q); | ||
693 | } | ||
694 | } | 678 | } |
695 | 679 | ||
696 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, | 680 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
697 | int plug) | ||
698 | { | 681 | { |
682 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | ||
683 | |||
699 | if (rq->cmd_flags & REQ_SOFTBARRIER) { | 684 | if (rq->cmd_flags & REQ_SOFTBARRIER) { |
700 | /* barriers are scheduling boundary, update end_sector */ | 685 | /* barriers are scheduling boundary, update end_sector */ |
701 | if (rq->cmd_type == REQ_TYPE_FS || | 686 | if (rq->cmd_type == REQ_TYPE_FS || |
@@ -707,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
707 | where == ELEVATOR_INSERT_SORT) | 692 | where == ELEVATOR_INSERT_SORT) |
708 | where = ELEVATOR_INSERT_BACK; | 693 | where = ELEVATOR_INSERT_BACK; |
709 | 694 | ||
710 | if (plug) | ||
711 | blk_plug_device(q); | ||
712 | |||
713 | elv_insert(q, rq, where); | 695 | elv_insert(q, rq, where); |
714 | } | 696 | } |
715 | EXPORT_SYMBOL(__elv_add_request); | 697 | EXPORT_SYMBOL(__elv_add_request); |
716 | 698 | ||
717 | void elv_add_request(struct request_queue *q, struct request *rq, int where, | 699 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
718 | int plug) | ||
719 | { | 700 | { |
720 | unsigned long flags; | 701 | unsigned long flags; |
721 | 702 | ||
722 | spin_lock_irqsave(q->queue_lock, flags); | 703 | spin_lock_irqsave(q->queue_lock, flags); |
723 | __elv_add_request(q, rq, where, plug); | 704 | __elv_add_request(q, rq, where); |
724 | spin_unlock_irqrestore(q->queue_lock, flags); | 705 | spin_unlock_irqrestore(q->queue_lock, flags); |
725 | } | 706 | } |
726 | EXPORT_SYMBOL(elv_add_request); | 707 | EXPORT_SYMBOL(elv_add_request); |
727 | 708 | ||
728 | int elv_queue_empty(struct request_queue *q) | ||
729 | { | ||
730 | struct elevator_queue *e = q->elevator; | ||
731 | |||
732 | if (!list_empty(&q->queue_head)) | ||
733 | return 0; | ||
734 | |||
735 | if (e->ops->elevator_queue_empty_fn) | ||
736 | return e->ops->elevator_queue_empty_fn(q); | ||
737 | |||
738 | return 1; | ||
739 | } | ||
740 | EXPORT_SYMBOL(elv_queue_empty); | ||
741 | |||
742 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 709 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
743 | { | 710 | { |
744 | struct elevator_queue *e = q->elevator; | 711 | struct elevator_queue *e = q->elevator; |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 232c4b38cd37..06389e9ef96d 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq) | |||
39 | list_add_tail(&rq->queuelist, &nd->queue); | 39 | list_add_tail(&rq->queuelist, &nd->queue); |
40 | } | 40 | } |
41 | 41 | ||
42 | static int noop_queue_empty(struct request_queue *q) | ||
43 | { | ||
44 | struct noop_data *nd = q->elevator->elevator_data; | ||
45 | |||
46 | return list_empty(&nd->queue); | ||
47 | } | ||
48 | |||
49 | static struct request * | 42 | static struct request * |
50 | noop_former_request(struct request_queue *q, struct request *rq) | 43 | noop_former_request(struct request_queue *q, struct request *rq) |
51 | { | 44 | { |
@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = { | |||
90 | .elevator_merge_req_fn = noop_merged_requests, | 83 | .elevator_merge_req_fn = noop_merged_requests, |
91 | .elevator_dispatch_fn = noop_dispatch, | 84 | .elevator_dispatch_fn = noop_dispatch, |
92 | .elevator_add_req_fn = noop_add_request, | 85 | .elevator_add_req_fn = noop_add_request, |
93 | .elevator_queue_empty_fn = noop_queue_empty, | ||
94 | .elevator_former_req_fn = noop_former_request, | 86 | .elevator_former_req_fn = noop_former_request, |
95 | .elevator_latter_req_fn = noop_latter_request, | 87 | .elevator_latter_req_fn = noop_latter_request, |
96 | .elevator_init_fn = noop_init_queue, | 88 | .elevator_init_fn = noop_init_queue, |