aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /block/blk-core.c
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c173
1 files changed, 20 insertions, 153 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6efb55cc5af0..82a45898ba76 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198} 198}
199EXPORT_SYMBOL(blk_dump_rq_flags); 199EXPORT_SYMBOL(blk_dump_rq_flags);
200 200
201/*
202 * Make sure that plugs that were pending when this function was entered,
203 * are now complete and requests pushed to the queue.
204*/
205static inline void queue_sync_plugs(struct request_queue *q)
206{
207 /*
208 * If the current process is plugged and has barriers submitted,
209 * we will livelock if we don't unplug first.
210 */
211 blk_flush_plug(current);
212}
213
201static void blk_delay_work(struct work_struct *work) 214static void blk_delay_work(struct work_struct *work)
202{ 215{
203 struct request_queue *q; 216 struct request_queue *q;
@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
224} 237}
225EXPORT_SYMBOL(blk_delay_queue); 238EXPORT_SYMBOL(blk_delay_queue);
226 239
227/*
228 * "plug" the device if there are no outstanding requests: this will
229 * force the transfer to start only after we have put all the requests
230 * on the list.
231 *
232 * This is called with interrupts off and no requests on the queue and
233 * with the queue lock held.
234 */
235void blk_plug_device(struct request_queue *q)
236{
237 WARN_ON(!irqs_disabled());
238
239 /*
240 * don't plug a stopped queue, it must be paired with blk_start_queue()
241 * which will restart the queueing
242 */
243 if (blk_queue_stopped(q))
244 return;
245
246 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
247 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
248 trace_block_plug(q);
249 }
250}
251EXPORT_SYMBOL(blk_plug_device);
252
253/**
254 * blk_plug_device_unlocked - plug a device without queue lock held
255 * @q: The &struct request_queue to plug
256 *
257 * Description:
258 * Like @blk_plug_device(), but grabs the queue lock and disables
259 * interrupts.
260 **/
261void blk_plug_device_unlocked(struct request_queue *q)
262{
263 unsigned long flags;
264
265 spin_lock_irqsave(q->queue_lock, flags);
266 blk_plug_device(q);
267 spin_unlock_irqrestore(q->queue_lock, flags);
268}
269EXPORT_SYMBOL(blk_plug_device_unlocked);
270
271/*
272 * remove the queue from the plugged list, if present. called with
273 * queue lock held and interrupts disabled.
274 */
275int blk_remove_plug(struct request_queue *q)
276{
277 WARN_ON(!irqs_disabled());
278
279 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
280 return 0;
281
282 del_timer(&q->unplug_timer);
283 return 1;
284}
285EXPORT_SYMBOL(blk_remove_plug);
286
287/*
288 * remove the plug and let it rip..
289 */
290void __generic_unplug_device(struct request_queue *q)
291{
292 if (unlikely(blk_queue_stopped(q)))
293 return;
294 if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
295 return;
296
297 q->request_fn(q);
298}
299
300/**
301 * generic_unplug_device - fire a request queue
302 * @q: The &struct request_queue in question
303 *
304 * Description:
305 * Linux uses plugging to build bigger requests queues before letting
306 * the device have at them. If a queue is plugged, the I/O scheduler
307 * is still adding and merging requests on the queue. Once the queue
308 * gets unplugged, the request_fn defined for the queue is invoked and
309 * transfers started.
310 **/
311void generic_unplug_device(struct request_queue *q)
312{
313 if (blk_queue_plugged(q)) {
314 spin_lock_irq(q->queue_lock);
315 __generic_unplug_device(q);
316 spin_unlock_irq(q->queue_lock);
317 }
318}
319EXPORT_SYMBOL(generic_unplug_device);
320
321static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
322 struct page *page)
323{
324 struct request_queue *q = bdi->unplug_io_data;
325
326 blk_unplug(q);
327}
328
329void blk_unplug_work(struct work_struct *work)
330{
331 struct request_queue *q =
332 container_of(work, struct request_queue, unplug_work);
333
334 trace_block_unplug_io(q);
335 q->unplug_fn(q);
336}
337
338void blk_unplug_timeout(unsigned long data)
339{
340 struct request_queue *q = (struct request_queue *)data;
341
342 trace_block_unplug_timer(q);
343 kblockd_schedule_work(q, &q->unplug_work);
344}
345
346void blk_unplug(struct request_queue *q)
347{
348 /*
349 * devices don't necessarily have an ->unplug_fn defined
350 */
351 if (q->unplug_fn) {
352 trace_block_unplug_io(q);
353 q->unplug_fn(q);
354 }
355}
356EXPORT_SYMBOL(blk_unplug);
357
358/** 240/**
359 * blk_start_queue - restart a previously stopped queue 241 * blk_start_queue - restart a previously stopped queue
360 * @q: The &struct request_queue in question 242 * @q: The &struct request_queue in question
@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
389 **/ 271 **/
390void blk_stop_queue(struct request_queue *q) 272void blk_stop_queue(struct request_queue *q)
391{ 273{
392 blk_remove_plug(q);
393 cancel_delayed_work(&q->delay_work); 274 cancel_delayed_work(&q->delay_work);
394 queue_flag_set(QUEUE_FLAG_STOPPED, q); 275 queue_flag_set(QUEUE_FLAG_STOPPED, q);
395} 276}
@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
411 */ 292 */
412void blk_sync_queue(struct request_queue *q) 293void blk_sync_queue(struct request_queue *q)
413{ 294{
414 del_timer_sync(&q->unplug_timer);
415 del_timer_sync(&q->timeout); 295 del_timer_sync(&q->timeout);
416 cancel_work_sync(&q->unplug_work);
417 throtl_shutdown_timer_wq(q); 296 throtl_shutdown_timer_wq(q);
418 cancel_delayed_work_sync(&q->delay_work); 297 cancel_delayed_work_sync(&q->delay_work);
298 queue_sync_plugs(q);
419} 299}
420EXPORT_SYMBOL(blk_sync_queue); 300EXPORT_SYMBOL(blk_sync_queue);
421 301
@@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue);
430 */ 310 */
431void __blk_run_queue(struct request_queue *q) 311void __blk_run_queue(struct request_queue *q)
432{ 312{
433 blk_remove_plug(q);
434
435 if (unlikely(blk_queue_stopped(q))) 313 if (unlikely(blk_queue_stopped(q)))
436 return; 314 return;
437 315
438 if (elv_queue_empty(q))
439 return;
440
441 /* 316 /*
442 * Only recurse once to avoid overrunning the stack, let the unplug 317 * Only recurse once to avoid overrunning the stack, let the unplug
443 * handling reinvoke the handler shortly if we already got there. 318 * handling reinvoke the handler shortly if we already got there.
@@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q)
445 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 320 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
446 q->request_fn(q); 321 q->request_fn(q);
447 queue_flag_clear(QUEUE_FLAG_REENTER, q); 322 queue_flag_clear(QUEUE_FLAG_REENTER, q);
448 } else { 323 } else
449 queue_flag_set(QUEUE_FLAG_PLUGGED, q); 324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
450 kblockd_schedule_work(q, &q->unplug_work);
451 }
452} 325}
453EXPORT_SYMBOL(__blk_run_queue); 326EXPORT_SYMBOL(__blk_run_queue);
454 327
@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
535 if (!q) 408 if (!q)
536 return NULL; 409 return NULL;
537 410
538 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
539 q->backing_dev_info.unplug_io_data = q;
540 q->backing_dev_info.ra_pages = 411 q->backing_dev_info.ra_pages =
541 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 412 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
542 q->backing_dev_info.state = 0; 413 q->backing_dev_info.state = 0;
@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
556 427
557 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 428 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
558 laptop_mode_timer_fn, (unsigned long) q); 429 laptop_mode_timer_fn, (unsigned long) q);
559 init_timer(&q->unplug_timer);
560 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 430 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
561 INIT_LIST_HEAD(&q->timeout_list); 431 INIT_LIST_HEAD(&q->timeout_list);
562 INIT_LIST_HEAD(&q->flush_queue[0]); 432 INIT_LIST_HEAD(&q->flush_queue[0]);
563 INIT_LIST_HEAD(&q->flush_queue[1]); 433 INIT_LIST_HEAD(&q->flush_queue[1]);
564 INIT_LIST_HEAD(&q->flush_data_in_flight); 434 INIT_LIST_HEAD(&q->flush_data_in_flight);
565 INIT_WORK(&q->unplug_work, blk_unplug_work);
566 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 435 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
567 436
568 kobject_init(&q->kobj, &blk_queue_ktype); 437 kobject_init(&q->kobj, &blk_queue_ktype);
@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
652 q->request_fn = rfn; 521 q->request_fn = rfn;
653 q->prep_rq_fn = NULL; 522 q->prep_rq_fn = NULL;
654 q->unprep_rq_fn = NULL; 523 q->unprep_rq_fn = NULL;
655 q->unplug_fn = generic_unplug_device;
656 q->queue_flags = QUEUE_FLAG_DEFAULT; 524 q->queue_flags = QUEUE_FLAG_DEFAULT;
657 q->queue_lock = lock; 525 q->queue_lock = lock;
658 526
@@ -910,8 +778,8 @@ out:
910} 778}
911 779
912/* 780/*
913 * No available requests for this queue, unplug the device and wait for some 781 * No available requests for this queue, wait for some requests to become
914 * requests to become available. 782 * available.
915 * 783 *
916 * Called with q->queue_lock held, and returns with it unlocked. 784 * Called with q->queue_lock held, and returns with it unlocked.
917 */ 785 */
@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
932 800
933 trace_block_sleeprq(q, bio, rw_flags & 1); 801 trace_block_sleeprq(q, bio, rw_flags & 1);
934 802
935 __generic_unplug_device(q);
936 spin_unlock_irq(q->queue_lock); 803 spin_unlock_irq(q->queue_lock);
937 io_schedule(); 804 io_schedule();
938 805
@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1058 int where) 925 int where)
1059{ 926{
1060 drive_stat_acct(rq, 1); 927 drive_stat_acct(rq, 1);
1061 __elv_add_request(q, rq, where, 0); 928 __elv_add_request(q, rq, where);
1062} 929}
1063 930
1064/** 931/**
@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
2798 /* 2665 /*
2799 * rq is already accounted, so use raw insert 2666 * rq is already accounted, so use raw insert
2800 */ 2667 */
2801 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0); 2668 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
2802 } 2669 }
2803 2670
2804 if (q) { 2671 if (q) {