diff options
-rw-r--r-- | block/blk-core.c | 11 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 17 | ||||
-rw-r--r-- | drivers/scsi/scsi_transport_fc.c | 19 | ||||
-rw-r--r-- | include/linux/blkdev.h | 26 |
5 files changed, 20 insertions, 54 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 580eee5743e5..40725b9091f1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -303,15 +303,7 @@ void __blk_run_queue(struct request_queue *q) | |||
303 | if (unlikely(blk_queue_stopped(q))) | 303 | if (unlikely(blk_queue_stopped(q))) |
304 | return; | 304 | return; |
305 | 305 | ||
306 | /* | 306 | q->request_fn(q); |
307 | * Only recurse once to avoid overrunning the stack, let the unplug | ||
308 | * handling reinvoke the handler shortly if we already got there. | ||
309 | */ | ||
310 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | ||
311 | q->request_fn(q); | ||
312 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | ||
313 | } else | ||
314 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | ||
315 | } | 307 | } |
316 | EXPORT_SYMBOL(__blk_run_queue); | 308 | EXPORT_SYMBOL(__blk_run_queue); |
317 | 309 | ||
@@ -328,6 +320,7 @@ void blk_run_queue_async(struct request_queue *q) | |||
328 | if (likely(!blk_queue_stopped(q))) | 320 | if (likely(!blk_queue_stopped(q))) |
329 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 321 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
330 | } | 322 | } |
323 | EXPORT_SYMBOL(blk_run_queue_async); | ||
331 | 324 | ||
332 | /** | 325 | /** |
333 | * blk_run_queue - run a single device queue | 326 | * blk_run_queue - run a single device queue |
diff --git a/block/blk.h b/block/blk.h index c9df8fc3c999..61263463e38e 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data); | |||
22 | void blk_delete_timer(struct request *); | 22 | void blk_delete_timer(struct request *); |
23 | void blk_add_timer(struct request *); | 23 | void blk_add_timer(struct request *); |
24 | void __generic_unplug_device(struct request_queue *); | 24 | void __generic_unplug_device(struct request_queue *); |
25 | void blk_run_queue_async(struct request_queue *q); | ||
26 | 25 | ||
27 | /* | 26 | /* |
28 | * Internal atomic flags for request handling | 27 | * Internal atomic flags for request handling |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ab55c2fa7ce2..e9901b8f8443 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q) | |||
411 | list_splice_init(&shost->starved_list, &starved_list); | 411 | list_splice_init(&shost->starved_list, &starved_list); |
412 | 412 | ||
413 | while (!list_empty(&starved_list)) { | 413 | while (!list_empty(&starved_list)) { |
414 | int flagset; | ||
415 | |||
416 | /* | 414 | /* |
417 | * As long as shost is accepting commands and we have | 415 | * As long as shost is accepting commands and we have |
418 | * starved queues, call blk_run_queue. scsi_request_fn | 416 | * starved queues, call blk_run_queue. scsi_request_fn |
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
435 | continue; | 433 | continue; |
436 | } | 434 | } |
437 | 435 | ||
438 | spin_unlock(shost->host_lock); | 436 | blk_run_queue_async(sdev->request_queue); |
439 | |||
440 | spin_lock(sdev->request_queue->queue_lock); | ||
441 | flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | ||
442 | !test_bit(QUEUE_FLAG_REENTER, | ||
443 | &sdev->request_queue->queue_flags); | ||
444 | if (flagset) | ||
445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
446 | __blk_run_queue(sdev->request_queue); | ||
447 | if (flagset) | ||
448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
449 | spin_unlock(sdev->request_queue->queue_lock); | ||
450 | |||
451 | spin_lock(shost->host_lock); | ||
452 | } | 437 | } |
453 | /* put any unprocessed entries back */ | 438 | /* put any unprocessed entries back */ |
454 | list_splice(&starved_list, &shost->starved_list); | 439 | list_splice(&starved_list, &shost->starved_list); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 28c33506e4ad..815069d13f9b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3816,28 +3816,17 @@ fail_host_msg: | |||
3816 | static void | 3816 | static void |
3817 | fc_bsg_goose_queue(struct fc_rport *rport) | 3817 | fc_bsg_goose_queue(struct fc_rport *rport) |
3818 | { | 3818 | { |
3819 | int flagset; | ||
3820 | unsigned long flags; | ||
3821 | |||
3822 | if (!rport->rqst_q) | 3819 | if (!rport->rqst_q) |
3823 | return; | 3820 | return; |
3824 | 3821 | ||
3822 | /* | ||
3823 | * This get/put dance makes no sense | ||
3824 | */ | ||
3825 | get_device(&rport->dev); | 3825 | get_device(&rport->dev); |
3826 | 3826 | blk_run_queue_async(rport->rqst_q); | |
3827 | spin_lock_irqsave(rport->rqst_q->queue_lock, flags); | ||
3828 | flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && | ||
3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | ||
3830 | if (flagset) | ||
3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
3832 | __blk_run_queue(rport->rqst_q); | ||
3833 | if (flagset) | ||
3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | ||
3836 | |||
3837 | put_device(&rport->dev); | 3827 | put_device(&rport->dev); |
3838 | } | 3828 | } |
3839 | 3829 | ||
3840 | |||
3841 | /** | 3830 | /** |
3842 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD | 3831 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD |
3843 | * @q: rport request queue | 3832 | * @q: rport request queue |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cbbfd98ad4a3..2ad95fa1d130 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -388,20 +388,19 @@ struct request_queue | |||
388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 391 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ | 392 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ | 393 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ | 394 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ |
395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ | 395 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ | 396 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ | 397 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ | ||
399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 398 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 399 | #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ |
401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 400 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ |
402 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 401 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
403 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | 402 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
404 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 403 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
405 | 404 | ||
406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 406 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q); | |||
699 | extern void __blk_stop_queue(struct request_queue *q); | 698 | extern void __blk_stop_queue(struct request_queue *q); |
700 | extern void __blk_run_queue(struct request_queue *q); | 699 | extern void __blk_run_queue(struct request_queue *q); |
701 | extern void blk_run_queue(struct request_queue *); | 700 | extern void blk_run_queue(struct request_queue *); |
701 | extern void blk_run_queue_async(struct request_queue *q); | ||
702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
703 | struct rq_map_data *, void __user *, unsigned long, | 703 | struct rq_map_data *, void __user *, unsigned long, |
704 | gfp_t); | 704 | gfp_t); |