diff options
| -rw-r--r-- | drivers/block/ll_rw_blk.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 265de858d4de..1e847151c3a3 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
| @@ -1439,7 +1439,7 @@ EXPORT_SYMBOL(blk_remove_plug); | |||
| 1439 | */ | 1439 | */ |
| 1440 | void __generic_unplug_device(request_queue_t *q) | 1440 | void __generic_unplug_device(request_queue_t *q) |
| 1441 | { | 1441 | { |
| 1442 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) | 1442 | if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))) |
| 1443 | return; | 1443 | return; |
| 1444 | 1444 | ||
| 1445 | if (!blk_remove_plug(q)) | 1445 | if (!blk_remove_plug(q)) |
| @@ -1763,7 +1763,7 @@ EXPORT_SYMBOL(blk_init_queue_node); | |||
| 1763 | 1763 | ||
| 1764 | int blk_get_queue(request_queue_t *q) | 1764 | int blk_get_queue(request_queue_t *q) |
| 1765 | { | 1765 | { |
| 1766 | if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | 1766 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
| 1767 | atomic_inc(&q->refcnt); | 1767 | atomic_inc(&q->refcnt); |
| 1768 | return 0; | 1768 | return 0; |
| 1769 | } | 1769 | } |
| @@ -2584,7 +2584,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
| 2584 | spin_lock_prefetch(q->queue_lock); | 2584 | spin_lock_prefetch(q->queue_lock); |
| 2585 | 2585 | ||
| 2586 | barrier = bio_barrier(bio); | 2586 | barrier = bio_barrier(bio); |
| 2587 | if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) { | 2587 | if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) { |
| 2588 | err = -EOPNOTSUPP; | 2588 | err = -EOPNOTSUPP; |
| 2589 | goto end_io; | 2589 | goto end_io; |
| 2590 | } | 2590 | } |
| @@ -2685,7 +2685,7 @@ get_rq: | |||
| 2685 | /* | 2685 | /* |
| 2686 | * REQ_BARRIER implies no merging, but lets make it explicit | 2686 | * REQ_BARRIER implies no merging, but lets make it explicit |
| 2687 | */ | 2687 | */ |
| 2688 | if (barrier) | 2688 | if (unlikely(barrier)) |
| 2689 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 2689 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
| 2690 | 2690 | ||
| 2691 | req->errors = 0; | 2691 | req->errors = 0; |
| @@ -2809,7 +2809,7 @@ static inline void block_wait_queue_running(request_queue_t *q) | |||
| 2809 | { | 2809 | { |
| 2810 | DEFINE_WAIT(wait); | 2810 | DEFINE_WAIT(wait); |
| 2811 | 2811 | ||
| 2812 | while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { | 2812 | while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) { |
| 2813 | struct request_list *rl = &q->rq; | 2813 | struct request_list *rl = &q->rq; |
| 2814 | 2814 | ||
| 2815 | prepare_to_wait_exclusive(&rl->drain, &wait, | 2815 | prepare_to_wait_exclusive(&rl->drain, &wait, |
| @@ -2918,7 +2918,7 @@ end_io: | |||
| 2918 | goto end_io; | 2918 | goto end_io; |
| 2919 | } | 2919 | } |
| 2920 | 2920 | ||
| 2921 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) | 2921 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) |
| 2922 | goto end_io; | 2922 | goto end_io; |
| 2923 | 2923 | ||
| 2924 | block_wait_queue_running(q); | 2924 | block_wait_queue_running(q); |
