aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /block
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c173
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-flush.c3
-rw-r--r--block/blk-settings.c8
-rw-r--r--block/blk-throttle.c1
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/deadline-iosched.c9
-rw-r--r--block/elevator.c43
-rw-r--r--block/noop-iosched.c8
10 files changed, 26 insertions, 233 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6efb55cc5af0..82a45898ba76 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198} 198}
199EXPORT_SYMBOL(blk_dump_rq_flags); 199EXPORT_SYMBOL(blk_dump_rq_flags);
200 200
201/*
202 * Make sure that plugs that were pending when this function was entered,
203 * are now complete and requests pushed to the queue.
204*/
205static inline void queue_sync_plugs(struct request_queue *q)
206{
207 /*
208 * If the current process is plugged and has barriers submitted,
209 * we will livelock if we don't unplug first.
210 */
211 blk_flush_plug(current);
212}
213
201static void blk_delay_work(struct work_struct *work) 214static void blk_delay_work(struct work_struct *work)
202{ 215{
203 struct request_queue *q; 216 struct request_queue *q;
@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
224} 237}
225EXPORT_SYMBOL(blk_delay_queue); 238EXPORT_SYMBOL(blk_delay_queue);
226 239
227/*
228 * "plug" the device if there are no outstanding requests: this will
229 * force the transfer to start only after we have put all the requests
230 * on the list.
231 *
232 * This is called with interrupts off and no requests on the queue and
233 * with the queue lock held.
234 */
235void blk_plug_device(struct request_queue *q)
236{
237 WARN_ON(!irqs_disabled());
238
239 /*
240 * don't plug a stopped queue, it must be paired with blk_start_queue()
241 * which will restart the queueing
242 */
243 if (blk_queue_stopped(q))
244 return;
245
246 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
247 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
248 trace_block_plug(q);
249 }
250}
251EXPORT_SYMBOL(blk_plug_device);
252
253/**
254 * blk_plug_device_unlocked - plug a device without queue lock held
255 * @q: The &struct request_queue to plug
256 *
257 * Description:
258 * Like @blk_plug_device(), but grabs the queue lock and disables
259 * interrupts.
260 **/
261void blk_plug_device_unlocked(struct request_queue *q)
262{
263 unsigned long flags;
264
265 spin_lock_irqsave(q->queue_lock, flags);
266 blk_plug_device(q);
267 spin_unlock_irqrestore(q->queue_lock, flags);
268}
269EXPORT_SYMBOL(blk_plug_device_unlocked);
270
271/*
272 * remove the queue from the plugged list, if present. called with
273 * queue lock held and interrupts disabled.
274 */
275int blk_remove_plug(struct request_queue *q)
276{
277 WARN_ON(!irqs_disabled());
278
279 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
280 return 0;
281
282 del_timer(&q->unplug_timer);
283 return 1;
284}
285EXPORT_SYMBOL(blk_remove_plug);
286
287/*
288 * remove the plug and let it rip..
289 */
290void __generic_unplug_device(struct request_queue *q)
291{
292 if (unlikely(blk_queue_stopped(q)))
293 return;
294 if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
295 return;
296
297 q->request_fn(q);
298}
299
300/**
301 * generic_unplug_device - fire a request queue
302 * @q: The &struct request_queue in question
303 *
304 * Description:
305 * Linux uses plugging to build bigger requests queues before letting
306 * the device have at them. If a queue is plugged, the I/O scheduler
307 * is still adding and merging requests on the queue. Once the queue
308 * gets unplugged, the request_fn defined for the queue is invoked and
309 * transfers started.
310 **/
311void generic_unplug_device(struct request_queue *q)
312{
313 if (blk_queue_plugged(q)) {
314 spin_lock_irq(q->queue_lock);
315 __generic_unplug_device(q);
316 spin_unlock_irq(q->queue_lock);
317 }
318}
319EXPORT_SYMBOL(generic_unplug_device);
320
321static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
322 struct page *page)
323{
324 struct request_queue *q = bdi->unplug_io_data;
325
326 blk_unplug(q);
327}
328
329void blk_unplug_work(struct work_struct *work)
330{
331 struct request_queue *q =
332 container_of(work, struct request_queue, unplug_work);
333
334 trace_block_unplug_io(q);
335 q->unplug_fn(q);
336}
337
338void blk_unplug_timeout(unsigned long data)
339{
340 struct request_queue *q = (struct request_queue *)data;
341
342 trace_block_unplug_timer(q);
343 kblockd_schedule_work(q, &q->unplug_work);
344}
345
346void blk_unplug(struct request_queue *q)
347{
348 /*
349 * devices don't necessarily have an ->unplug_fn defined
350 */
351 if (q->unplug_fn) {
352 trace_block_unplug_io(q);
353 q->unplug_fn(q);
354 }
355}
356EXPORT_SYMBOL(blk_unplug);
357
358/** 240/**
359 * blk_start_queue - restart a previously stopped queue 241 * blk_start_queue - restart a previously stopped queue
360 * @q: The &struct request_queue in question 242 * @q: The &struct request_queue in question
@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
389 **/ 271 **/
390void blk_stop_queue(struct request_queue *q) 272void blk_stop_queue(struct request_queue *q)
391{ 273{
392 blk_remove_plug(q);
393 cancel_delayed_work(&q->delay_work); 274 cancel_delayed_work(&q->delay_work);
394 queue_flag_set(QUEUE_FLAG_STOPPED, q); 275 queue_flag_set(QUEUE_FLAG_STOPPED, q);
395} 276}
@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
411 */ 292 */
412void blk_sync_queue(struct request_queue *q) 293void blk_sync_queue(struct request_queue *q)
413{ 294{
414 del_timer_sync(&q->unplug_timer);
415 del_timer_sync(&q->timeout); 295 del_timer_sync(&q->timeout);
416 cancel_work_sync(&q->unplug_work);
417 throtl_shutdown_timer_wq(q); 296 throtl_shutdown_timer_wq(q);
418 cancel_delayed_work_sync(&q->delay_work); 297 cancel_delayed_work_sync(&q->delay_work);
298 queue_sync_plugs(q);
419} 299}
420EXPORT_SYMBOL(blk_sync_queue); 300EXPORT_SYMBOL(blk_sync_queue);
421 301
@@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue);
430 */ 310 */
431void __blk_run_queue(struct request_queue *q) 311void __blk_run_queue(struct request_queue *q)
432{ 312{
433 blk_remove_plug(q);
434
435 if (unlikely(blk_queue_stopped(q))) 313 if (unlikely(blk_queue_stopped(q)))
436 return; 314 return;
437 315
438 if (elv_queue_empty(q))
439 return;
440
441 /* 316 /*
442 * Only recurse once to avoid overrunning the stack, let the unplug 317 * Only recurse once to avoid overrunning the stack, let the unplug
443 * handling reinvoke the handler shortly if we already got there. 318 * handling reinvoke the handler shortly if we already got there.
@@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q)
445 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 320 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
446 q->request_fn(q); 321 q->request_fn(q);
447 queue_flag_clear(QUEUE_FLAG_REENTER, q); 322 queue_flag_clear(QUEUE_FLAG_REENTER, q);
448 } else { 323 } else
449 queue_flag_set(QUEUE_FLAG_PLUGGED, q); 324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
450 kblockd_schedule_work(q, &q->unplug_work);
451 }
452} 325}
453EXPORT_SYMBOL(__blk_run_queue); 326EXPORT_SYMBOL(__blk_run_queue);
454 327
@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
535 if (!q) 408 if (!q)
536 return NULL; 409 return NULL;
537 410
538 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
539 q->backing_dev_info.unplug_io_data = q;
540 q->backing_dev_info.ra_pages = 411 q->backing_dev_info.ra_pages =
541 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 412 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
542 q->backing_dev_info.state = 0; 413 q->backing_dev_info.state = 0;
@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
556 427
557 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 428 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
558 laptop_mode_timer_fn, (unsigned long) q); 429 laptop_mode_timer_fn, (unsigned long) q);
559 init_timer(&q->unplug_timer);
560 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 430 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
561 INIT_LIST_HEAD(&q->timeout_list); 431 INIT_LIST_HEAD(&q->timeout_list);
562 INIT_LIST_HEAD(&q->flush_queue[0]); 432 INIT_LIST_HEAD(&q->flush_queue[0]);
563 INIT_LIST_HEAD(&q->flush_queue[1]); 433 INIT_LIST_HEAD(&q->flush_queue[1]);
564 INIT_LIST_HEAD(&q->flush_data_in_flight); 434 INIT_LIST_HEAD(&q->flush_data_in_flight);
565 INIT_WORK(&q->unplug_work, blk_unplug_work);
566 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 435 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
567 436
568 kobject_init(&q->kobj, &blk_queue_ktype); 437 kobject_init(&q->kobj, &blk_queue_ktype);
@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
652 q->request_fn = rfn; 521 q->request_fn = rfn;
653 q->prep_rq_fn = NULL; 522 q->prep_rq_fn = NULL;
654 q->unprep_rq_fn = NULL; 523 q->unprep_rq_fn = NULL;
655 q->unplug_fn = generic_unplug_device;
656 q->queue_flags = QUEUE_FLAG_DEFAULT; 524 q->queue_flags = QUEUE_FLAG_DEFAULT;
657 q->queue_lock = lock; 525 q->queue_lock = lock;
658 526
@@ -910,8 +778,8 @@ out:
910} 778}
911 779
912/* 780/*
913 * No available requests for this queue, unplug the device and wait for some 781 * No available requests for this queue, wait for some requests to become
914 * requests to become available. 782 * available.
915 * 783 *
916 * Called with q->queue_lock held, and returns with it unlocked. 784 * Called with q->queue_lock held, and returns with it unlocked.
917 */ 785 */
@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
932 800
933 trace_block_sleeprq(q, bio, rw_flags & 1); 801 trace_block_sleeprq(q, bio, rw_flags & 1);
934 802
935 __generic_unplug_device(q);
936 spin_unlock_irq(q->queue_lock); 803 spin_unlock_irq(q->queue_lock);
937 io_schedule(); 804 io_schedule();
938 805
@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1058 int where) 925 int where)
1059{ 926{
1060 drive_stat_acct(rq, 1); 927 drive_stat_acct(rq, 1);
1061 __elv_add_request(q, rq, where, 0); 928 __elv_add_request(q, rq, where);
1062} 929}
1063 930
1064/** 931/**
@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
2798 /* 2665 /*
2799 * rq is already accounted, so use raw insert 2666 * rq is already accounted, so use raw insert
2800 */ 2667 */
2801 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0); 2668 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
2802 } 2669 }
2803 2670
2804 if (q) { 2671 if (q) {
diff --git a/block/blk-exec.c b/block/blk-exec.c
index cf1456a02acd..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
54 rq->end_io = done; 54 rq->end_io = done;
55 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where, 1); 57 __elv_add_request(q, rq, where);
58 __generic_unplug_device(q); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 1e2aa8a8908c..671fa9da7560 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
194{ 194{
195 struct request_queue *q = flush_rq->q; 195 struct request_queue *q = flush_rq->q;
196 struct list_head *running = &q->flush_queue[q->flush_running_idx]; 196 struct list_head *running = &q->flush_queue[q->flush_running_idx];
197 bool was_empty = elv_queue_empty(q);
198 bool queued = false; 197 bool queued = false;
199 struct request *rq, *n; 198 struct request *rq, *n;
200 199
@@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error)
213 } 212 }
214 213
215 /* after populating an empty queue, kick it to avoid stall */ 214 /* after populating an empty queue, kick it to avoid stall */
216 if (queued && was_empty) 215 if (queued)
217 __blk_run_queue(q); 216 __blk_run_queue(q);
218} 217}
219 218
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 36c8c1f2af18..c8d68921dddb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
164 blk_queue_congestion_threshold(q); 164 blk_queue_congestion_threshold(q);
165 q->nr_batching = BLK_BATCH_REQ; 165 q->nr_batching = BLK_BATCH_REQ;
166 166
167 q->unplug_thresh = 4; /* hmm */
168 q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
169 if (q->unplug_delay == 0)
170 q->unplug_delay = 1;
171
172 q->unplug_timer.function = blk_unplug_timeout;
173 q->unplug_timer.data = (unsigned long)q;
174
175 blk_set_default_limits(&q->limits); 167 blk_set_default_limits(&q->limits);
176 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); 168 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177 169
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a89043a3caa4..b8dcdc2663a1 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -800,7 +800,6 @@ out:
800 if (nr_disp) { 800 if (nr_disp) {
801 while((bio = bio_list_pop(&bio_list_on_stack))) 801 while((bio = bio_list_pop(&bio_list_on_stack)))
802 generic_make_request(bio); 802 generic_make_request(bio);
803 blk_unplug(q);
804 } 803 }
805 return nr_disp; 804 return nr_disp;
806} 805}
diff --git a/block/blk.h b/block/blk.h
index 284b500852bd..49d21af81d07 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
18void blk_dequeue_request(struct request *rq); 18void blk_dequeue_request(struct request *rq);
19void __blk_queue_free_tags(struct request_queue *q); 19void __blk_queue_free_tags(struct request_queue *q);
20 20
21void blk_unplug_work(struct work_struct *work);
22void blk_unplug_timeout(unsigned long data);
23void blk_rq_timed_out_timer(unsigned long data); 21void blk_rq_timed_out_timer(unsigned long data);
24void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
25void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3202c7e87fb3..ef631539dd2a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
499 } 499 }
500} 500}
501 501
502static int cfq_queue_empty(struct request_queue *q)
503{
504 struct cfq_data *cfqd = q->elevator->elevator_data;
505
506 return !cfqd->rq_queued;
507}
508
509/* 502/*
510 * Scale schedule slice based on io priority. Use the sync time slice only 503 * Scale schedule slice based on io priority. Use the sync time slice only
511 * if a queue is marked sync and has sync io queued. A sync queue with async 504 * if a queue is marked sync and has sync io queued. A sync queue with async
@@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = {
4061 .elevator_add_req_fn = cfq_insert_request, 4054 .elevator_add_req_fn = cfq_insert_request,
4062 .elevator_activate_req_fn = cfq_activate_request, 4055 .elevator_activate_req_fn = cfq_activate_request,
4063 .elevator_deactivate_req_fn = cfq_deactivate_request, 4056 .elevator_deactivate_req_fn = cfq_deactivate_request,
4064 .elevator_queue_empty_fn = cfq_queue_empty,
4065 .elevator_completed_req_fn = cfq_completed_request, 4057 .elevator_completed_req_fn = cfq_completed_request,
4066 .elevator_former_req_fn = elv_rb_former_request, 4058 .elevator_former_req_fn = elv_rb_former_request,
4067 .elevator_latter_req_fn = elv_rb_latter_request, 4059 .elevator_latter_req_fn = elv_rb_latter_request,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b547cbca7b23..5139c0ea1864 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -326,14 +326,6 @@ dispatch_request:
326 return 1; 326 return 1;
327} 327}
328 328
329static int deadline_queue_empty(struct request_queue *q)
330{
331 struct deadline_data *dd = q->elevator->elevator_data;
332
333 return list_empty(&dd->fifo_list[WRITE])
334 && list_empty(&dd->fifo_list[READ]);
335}
336
337static void deadline_exit_queue(struct elevator_queue *e) 329static void deadline_exit_queue(struct elevator_queue *e)
338{ 330{
339 struct deadline_data *dd = e->elevator_data; 331 struct deadline_data *dd = e->elevator_data;
@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
445 .elevator_merge_req_fn = deadline_merged_requests, 437 .elevator_merge_req_fn = deadline_merged_requests,
446 .elevator_dispatch_fn = deadline_dispatch_requests, 438 .elevator_dispatch_fn = deadline_dispatch_requests,
447 .elevator_add_req_fn = deadline_add_request, 439 .elevator_add_req_fn = deadline_add_request,
448 .elevator_queue_empty_fn = deadline_queue_empty,
449 .elevator_former_req_fn = elv_rb_former_request, 440 .elevator_former_req_fn = elv_rb_former_request,
450 .elevator_latter_req_fn = elv_rb_latter_request, 441 .elevator_latter_req_fn = elv_rb_latter_request,
451 .elevator_init_fn = deadline_init_queue, 442 .elevator_init_fn = deadline_init_queue,
diff --git a/block/elevator.c b/block/elevator.c
index 25713927c0d3..3ea208256e78 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q)
619 619
620void elv_insert(struct request_queue *q, struct request *rq, int where) 620void elv_insert(struct request_queue *q, struct request *rq, int where)
621{ 621{
622 int unplug_it = 1;
623
624 trace_block_rq_insert(q, rq); 622 trace_block_rq_insert(q, rq);
625 623
626 rq->q = q; 624 rq->q = q;
627 625
628 switch (where) { 626 switch (where) {
629 case ELEVATOR_INSERT_REQUEUE: 627 case ELEVATOR_INSERT_REQUEUE:
630 /*
631 * Most requeues happen because of a busy condition,
632 * don't force unplug of the queue for that case.
633 * Clear unplug_it and fall through.
634 */
635 unplug_it = 0;
636
637 case ELEVATOR_INSERT_FRONT: 628 case ELEVATOR_INSERT_FRONT:
638 rq->cmd_flags |= REQ_SOFTBARRIER; 629 rq->cmd_flags |= REQ_SOFTBARRIER;
639 list_add(&rq->queuelist, &q->queue_head); 630 list_add(&rq->queuelist, &q->queue_head);
@@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
679 rq->cmd_flags |= REQ_SOFTBARRIER; 670 rq->cmd_flags |= REQ_SOFTBARRIER;
680 blk_insert_flush(rq); 671 blk_insert_flush(rq);
681 break; 672 break;
682
683 default: 673 default:
684 printk(KERN_ERR "%s: bad insertion point %d\n", 674 printk(KERN_ERR "%s: bad insertion point %d\n",
685 __func__, where); 675 __func__, where);
686 BUG(); 676 BUG();
687 } 677 }
688
689 if (unplug_it && blk_queue_plugged(q)) {
690 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
691 - queue_in_flight(q);
692
693 if (nrq >= q->unplug_thresh)
694 __generic_unplug_device(q);
695 }
696} 678}
697 679
698void __elv_add_request(struct request_queue *q, struct request *rq, int where, 680void __elv_add_request(struct request_queue *q, struct request *rq, int where)
699 int plug)
700{ 681{
701 BUG_ON(rq->cmd_flags & REQ_ON_PLUG); 682 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
702 683
@@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
711 where == ELEVATOR_INSERT_SORT) 692 where == ELEVATOR_INSERT_SORT)
712 where = ELEVATOR_INSERT_BACK; 693 where = ELEVATOR_INSERT_BACK;
713 694
714 if (plug)
715 blk_plug_device(q);
716
717 elv_insert(q, rq, where); 695 elv_insert(q, rq, where);
718} 696}
719EXPORT_SYMBOL(__elv_add_request); 697EXPORT_SYMBOL(__elv_add_request);
720 698
721void elv_add_request(struct request_queue *q, struct request *rq, int where, 699void elv_add_request(struct request_queue *q, struct request *rq, int where)
722 int plug)
723{ 700{
724 unsigned long flags; 701 unsigned long flags;
725 702
726 spin_lock_irqsave(q->queue_lock, flags); 703 spin_lock_irqsave(q->queue_lock, flags);
727 __elv_add_request(q, rq, where, plug); 704 __elv_add_request(q, rq, where);
728 spin_unlock_irqrestore(q->queue_lock, flags); 705 spin_unlock_irqrestore(q->queue_lock, flags);
729} 706}
730EXPORT_SYMBOL(elv_add_request); 707EXPORT_SYMBOL(elv_add_request);
731 708
732int elv_queue_empty(struct request_queue *q)
733{
734 struct elevator_queue *e = q->elevator;
735
736 if (!list_empty(&q->queue_head))
737 return 0;
738
739 if (e->ops->elevator_queue_empty_fn)
740 return e->ops->elevator_queue_empty_fn(q);
741
742 return 1;
743}
744EXPORT_SYMBOL(elv_queue_empty);
745
746struct request *elv_latter_request(struct request_queue *q, struct request *rq) 709struct request *elv_latter_request(struct request_queue *q, struct request *rq)
747{ 710{
748 struct elevator_queue *e = q->elevator; 711 struct elevator_queue *e = q->elevator;
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 232c4b38cd37..06389e9ef96d 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
39 list_add_tail(&rq->queuelist, &nd->queue); 39 list_add_tail(&rq->queuelist, &nd->queue);
40} 40}
41 41
42static int noop_queue_empty(struct request_queue *q)
43{
44 struct noop_data *nd = q->elevator->elevator_data;
45
46 return list_empty(&nd->queue);
47}
48
49static struct request * 42static struct request *
50noop_former_request(struct request_queue *q, struct request *rq) 43noop_former_request(struct request_queue *q, struct request *rq)
51{ 44{
@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
90 .elevator_merge_req_fn = noop_merged_requests, 83 .elevator_merge_req_fn = noop_merged_requests,
91 .elevator_dispatch_fn = noop_dispatch, 84 .elevator_dispatch_fn = noop_dispatch,
92 .elevator_add_req_fn = noop_add_request, 85 .elevator_add_req_fn = noop_add_request,
93 .elevator_queue_empty_fn = noop_queue_empty,
94 .elevator_former_req_fn = noop_former_request, 86 .elevator_former_req_fn = noop_former_request,
95 .elevator_latter_req_fn = noop_latter_request, 87 .elevator_latter_req_fn = noop_latter_request,
96 .elevator_init_fn = noop_init_queue, 88 .elevator_init_fn = noop_init_queue,