diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-20 12:48:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-20 12:48:52 -0400 |
commit | 73aa86825f45cf8efccf20128779416db7c278b2 (patch) | |
tree | fbffdf48afeda5ad45c23e6d7c132ae4a040f080 | |
parent | 2f666bcf757cb72549f360ef6da02f03620a48b6 (diff) | |
parent | 60735b6362f29b52b5635a2dfa9ab5ad39948345 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: Remove the extra check in queue_requests_store
block, blk-sysfs: Fix an err return path in blk_register_queue()
block: remove stale kerneldoc member from __blk_run_queue()
block: get rid of QUEUE_FLAG_REENTER
cfq-iosched: read_lock() does not always imply rcu_read_lock()
block: kill blk_flush_plug_list() export
-rw-r--r-- | block/blk-core.c | 13 | ||||
-rw-r--r-- | block/blk-sysfs.c | 8 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/cfq-iosched.c | 20 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 17 | ||||
-rw-r--r-- | drivers/scsi/scsi_transport_fc.c | 19 | ||||
-rw-r--r-- | include/linux/blkdev.h | 26 |
7 files changed, 31 insertions, 73 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 5fa3dd2705c6..a2e58eeb3549 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
292 | /** | 292 | /** |
293 | * __blk_run_queue - run a single device queue | 293 | * __blk_run_queue - run a single device queue |
294 | * @q: The queue to run | 294 | * @q: The queue to run |
295 | * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. | ||
296 | * | 295 | * |
297 | * Description: | 296 | * Description: |
298 | * See @blk_run_queue. This variant must be called with the queue lock | 297 | * See @blk_run_queue. This variant must be called with the queue lock |
@@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q) | |||
303 | if (unlikely(blk_queue_stopped(q))) | 302 | if (unlikely(blk_queue_stopped(q))) |
304 | return; | 303 | return; |
305 | 304 | ||
306 | /* | 305 | q->request_fn(q); |
307 | * Only recurse once to avoid overrunning the stack, let the unplug | ||
308 | * handling reinvoke the handler shortly if we already got there. | ||
309 | */ | ||
310 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | ||
311 | q->request_fn(q); | ||
312 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | ||
313 | } else | ||
314 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | ||
315 | } | 306 | } |
316 | EXPORT_SYMBOL(__blk_run_queue); | 307 | EXPORT_SYMBOL(__blk_run_queue); |
317 | 308 | ||
@@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q) | |||
328 | if (likely(!blk_queue_stopped(q))) | 319 | if (likely(!blk_queue_stopped(q))) |
329 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 320 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
330 | } | 321 | } |
322 | EXPORT_SYMBOL(blk_run_queue_async); | ||
331 | 323 | ||
332 | /** | 324 | /** |
333 | * blk_run_queue - run a single device queue | 325 | * blk_run_queue - run a single device queue |
@@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
2787 | 2779 | ||
2788 | local_irq_restore(flags); | 2780 | local_irq_restore(flags); |
2789 | } | 2781 | } |
2790 | EXPORT_SYMBOL(blk_flush_plug_list); | ||
2791 | 2782 | ||
2792 | void blk_finish_plug(struct blk_plug *plug) | 2783 | void blk_finish_plug(struct blk_plug *plug) |
2793 | { | 2784 | { |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6d735122bc59..bd236313f35d 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
66 | 66 | ||
67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | 67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { |
68 | blk_set_queue_full(q, BLK_RW_SYNC); | 68 | blk_set_queue_full(q, BLK_RW_SYNC); |
69 | } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { | 69 | } else { |
70 | blk_clear_queue_full(q, BLK_RW_SYNC); | 70 | blk_clear_queue_full(q, BLK_RW_SYNC); |
71 | wake_up(&rl->wait[BLK_RW_SYNC]); | 71 | wake_up(&rl->wait[BLK_RW_SYNC]); |
72 | } | 72 | } |
73 | 73 | ||
74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | 74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
75 | blk_set_queue_full(q, BLK_RW_ASYNC); | 75 | blk_set_queue_full(q, BLK_RW_ASYNC); |
76 | } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { | 76 | } else { |
77 | blk_clear_queue_full(q, BLK_RW_ASYNC); | 77 | blk_clear_queue_full(q, BLK_RW_ASYNC); |
78 | wake_up(&rl->wait[BLK_RW_ASYNC]); | 78 | wake_up(&rl->wait[BLK_RW_ASYNC]); |
79 | } | 79 | } |
@@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk) | |||
508 | return ret; | 508 | return ret; |
509 | 509 | ||
510 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); | 510 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
511 | if (ret < 0) | 511 | if (ret < 0) { |
512 | blk_trace_remove_sysfs(dev); | ||
512 | return ret; | 513 | return ret; |
514 | } | ||
513 | 515 | ||
514 | kobject_uevent(&q->kobj, KOBJ_ADD); | 516 | kobject_uevent(&q->kobj, KOBJ_ADD); |
515 | 517 | ||
diff --git a/block/blk.h b/block/blk.h index c9df8fc3c999..61263463e38e 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data); | |||
22 | void blk_delete_timer(struct request *); | 22 | void blk_delete_timer(struct request *); |
23 | void blk_add_timer(struct request *); | 23 | void blk_add_timer(struct request *); |
24 | void __generic_unplug_device(struct request_queue *); | 24 | void __generic_unplug_device(struct request_queue *); |
25 | void blk_run_queue_async(struct request_queue *q); | ||
26 | 25 | ||
27 | /* | 26 | /* |
28 | * Internal atomic flags for request handling | 27 | * Internal atomic flags for request handling |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 46b0a1d1d925..5b52011e3a40 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2582 | } | 2582 | } |
2583 | 2583 | ||
2584 | /* | 2584 | /* |
2585 | * Must always be called with the rcu_read_lock() held | 2585 | * Call func for each cic attached to this ioc. |
2586 | */ | 2586 | */ |
2587 | static void | 2587 | static void |
2588 | __call_for_each_cic(struct io_context *ioc, | 2588 | call_for_each_cic(struct io_context *ioc, |
2589 | void (*func)(struct io_context *, struct cfq_io_context *)) | 2589 | void (*func)(struct io_context *, struct cfq_io_context *)) |
2590 | { | 2590 | { |
2591 | struct cfq_io_context *cic; | 2591 | struct cfq_io_context *cic; |
2592 | struct hlist_node *n; | 2592 | struct hlist_node *n; |
2593 | 2593 | ||
2594 | rcu_read_lock(); | ||
2595 | |||
2594 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | 2596 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) |
2595 | func(ioc, cic); | 2597 | func(ioc, cic); |
2596 | } | ||
2597 | 2598 | ||
2598 | /* | ||
2599 | * Call func for each cic attached to this ioc. | ||
2600 | */ | ||
2601 | static void | ||
2602 | call_for_each_cic(struct io_context *ioc, | ||
2603 | void (*func)(struct io_context *, struct cfq_io_context *)) | ||
2604 | { | ||
2605 | rcu_read_lock(); | ||
2606 | __call_for_each_cic(ioc, func); | ||
2607 | rcu_read_unlock(); | 2599 | rcu_read_unlock(); |
2608 | } | 2600 | } |
2609 | 2601 | ||
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
2664 | * should be ok to iterate over the known list, we will see all cic's | 2656 | * should be ok to iterate over the known list, we will see all cic's |
2665 | * since no new ones are added. | 2657 | * since no new ones are added. |
2666 | */ | 2658 | */ |
2667 | __call_for_each_cic(ioc, cic_free_func); | 2659 | call_for_each_cic(ioc, cic_free_func); |
2668 | } | 2660 | } |
2669 | 2661 | ||
2670 | static void cfq_put_cooperator(struct cfq_queue *cfqq) | 2662 | static void cfq_put_cooperator(struct cfq_queue *cfqq) |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ab55c2fa7ce2..e9901b8f8443 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q) | |||
411 | list_splice_init(&shost->starved_list, &starved_list); | 411 | list_splice_init(&shost->starved_list, &starved_list); |
412 | 412 | ||
413 | while (!list_empty(&starved_list)) { | 413 | while (!list_empty(&starved_list)) { |
414 | int flagset; | ||
415 | |||
416 | /* | 414 | /* |
417 | * As long as shost is accepting commands and we have | 415 | * As long as shost is accepting commands and we have |
418 | * starved queues, call blk_run_queue. scsi_request_fn | 416 | * starved queues, call blk_run_queue. scsi_request_fn |
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
435 | continue; | 433 | continue; |
436 | } | 434 | } |
437 | 435 | ||
438 | spin_unlock(shost->host_lock); | 436 | blk_run_queue_async(sdev->request_queue); |
439 | |||
440 | spin_lock(sdev->request_queue->queue_lock); | ||
441 | flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | ||
442 | !test_bit(QUEUE_FLAG_REENTER, | ||
443 | &sdev->request_queue->queue_flags); | ||
444 | if (flagset) | ||
445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
446 | __blk_run_queue(sdev->request_queue); | ||
447 | if (flagset) | ||
448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
449 | spin_unlock(sdev->request_queue->queue_lock); | ||
450 | |||
451 | spin_lock(shost->host_lock); | ||
452 | } | 437 | } |
453 | /* put any unprocessed entries back */ | 438 | /* put any unprocessed entries back */ |
454 | list_splice(&starved_list, &shost->starved_list); | 439 | list_splice(&starved_list, &shost->starved_list); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 28c33506e4ad..815069d13f9b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3816,28 +3816,17 @@ fail_host_msg: | |||
3816 | static void | 3816 | static void |
3817 | fc_bsg_goose_queue(struct fc_rport *rport) | 3817 | fc_bsg_goose_queue(struct fc_rport *rport) |
3818 | { | 3818 | { |
3819 | int flagset; | ||
3820 | unsigned long flags; | ||
3821 | |||
3822 | if (!rport->rqst_q) | 3819 | if (!rport->rqst_q) |
3823 | return; | 3820 | return; |
3824 | 3821 | ||
3822 | /* | ||
3823 | * This get/put dance makes no sense | ||
3824 | */ | ||
3825 | get_device(&rport->dev); | 3825 | get_device(&rport->dev); |
3826 | 3826 | blk_run_queue_async(rport->rqst_q); | |
3827 | spin_lock_irqsave(rport->rqst_q->queue_lock, flags); | ||
3828 | flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && | ||
3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | ||
3830 | if (flagset) | ||
3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
3832 | __blk_run_queue(rport->rqst_q); | ||
3833 | if (flagset) | ||
3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | ||
3836 | |||
3837 | put_device(&rport->dev); | 3827 | put_device(&rport->dev); |
3838 | } | 3828 | } |
3839 | 3829 | ||
3840 | |||
3841 | /** | 3830 | /** |
3842 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD | 3831 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD |
3843 | * @q: rport request queue | 3832 | * @q: rport request queue |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cbbfd98ad4a3..2ad95fa1d130 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -388,20 +388,19 @@ struct request_queue | |||
388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 391 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ | 392 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ | 393 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ | 394 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ |
395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ | 395 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ | 396 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ | 397 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ | ||
399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 398 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 399 | #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ |
401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 400 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ |
402 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 401 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
403 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | 402 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
404 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 403 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
405 | 404 | ||
406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 406 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q); | |||
699 | extern void __blk_stop_queue(struct request_queue *q); | 698 | extern void __blk_stop_queue(struct request_queue *q); |
700 | extern void __blk_run_queue(struct request_queue *q); | 699 | extern void __blk_run_queue(struct request_queue *q); |
701 | extern void blk_run_queue(struct request_queue *); | 700 | extern void blk_run_queue(struct request_queue *); |
701 | extern void blk_run_queue_async(struct request_queue *q); | ||
702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
703 | struct rq_map_data *, void __user *, unsigned long, | 703 | struct rq_map_data *, void __user *, unsigned long, |
704 | gfp_t); | 704 | gfp_t); |