diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index b754a4a2f9bd..1905aaba49fb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | |||
54 | 54 | ||
55 | static void drive_stat_acct(struct request *rq, int new_io) | 55 | static void drive_stat_acct(struct request *rq, int new_io) |
56 | { | 56 | { |
57 | struct hd_struct *part; | ||
57 | int rw = rq_data_dir(rq); | 58 | int rw = rq_data_dir(rq); |
58 | 59 | ||
59 | if (!blk_fs_request(rq) || !rq->rq_disk) | 60 | if (!blk_fs_request(rq) || !rq->rq_disk) |
60 | return; | 61 | return; |
61 | 62 | ||
62 | if (!new_io) { | 63 | part = get_part(rq->rq_disk, rq->sector); |
63 | __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); | 64 | if (!new_io) |
64 | } else { | 65 | __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); |
65 | struct hd_struct *part = get_part(rq->rq_disk, rq->sector); | 66 | else { |
66 | disk_round_stats(rq->rq_disk); | 67 | disk_round_stats(rq->rq_disk); |
67 | rq->rq_disk->in_flight++; | 68 | rq->rq_disk->in_flight++; |
68 | if (part) { | 69 | if (part) { |
@@ -253,9 +254,11 @@ EXPORT_SYMBOL(__generic_unplug_device); | |||
253 | **/ | 254 | **/ |
254 | void generic_unplug_device(struct request_queue *q) | 255 | void generic_unplug_device(struct request_queue *q) |
255 | { | 256 | { |
256 | spin_lock_irq(q->queue_lock); | 257 | if (blk_queue_plugged(q)) { |
257 | __generic_unplug_device(q); | 258 | spin_lock_irq(q->queue_lock); |
258 | spin_unlock_irq(q->queue_lock); | 259 | __generic_unplug_device(q); |
260 | spin_unlock_irq(q->queue_lock); | ||
261 | } | ||
259 | } | 262 | } |
260 | EXPORT_SYMBOL(generic_unplug_device); | 263 | EXPORT_SYMBOL(generic_unplug_device); |
261 | 264 | ||
@@ -479,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
479 | kobject_init(&q->kobj, &blk_queue_ktype); | 482 | kobject_init(&q->kobj, &blk_queue_ktype); |
480 | 483 | ||
481 | mutex_init(&q->sysfs_lock); | 484 | mutex_init(&q->sysfs_lock); |
485 | spin_lock_init(&q->__queue_lock); | ||
482 | 486 | ||
483 | return q; | 487 | return q; |
484 | } | 488 | } |
@@ -541,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
541 | * if caller didn't supply a lock, they get per-queue locking with | 545 | * if caller didn't supply a lock, they get per-queue locking with |
542 | * our embedded lock | 546 | * our embedded lock |
543 | */ | 547 | */ |
544 | if (!lock) { | 548 | if (!lock) |
545 | spin_lock_init(&q->__queue_lock); | ||
546 | lock = &q->__queue_lock; | 549 | lock = &q->__queue_lock; |
547 | } | ||
548 | 550 | ||
549 | q->request_fn = rfn; | 551 | q->request_fn = rfn; |
550 | q->prep_rq_fn = NULL; | 552 | q->prep_rq_fn = NULL; |
@@ -804,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
804 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 806 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
805 | while (!rq) { | 807 | while (!rq) { |
806 | DEFINE_WAIT(wait); | 808 | DEFINE_WAIT(wait); |
809 | struct io_context *ioc; | ||
807 | struct request_list *rl = &q->rq; | 810 | struct request_list *rl = &q->rq; |
808 | 811 | ||
809 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 812 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, |
810 | TASK_UNINTERRUPTIBLE); | 813 | TASK_UNINTERRUPTIBLE); |
811 | 814 | ||
812 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 815 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); |
813 | |||
814 | if (!rq) { | ||
815 | struct io_context *ioc; | ||
816 | |||
817 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | ||
818 | 816 | ||
819 | __generic_unplug_device(q); | 817 | __generic_unplug_device(q); |
820 | spin_unlock_irq(q->queue_lock); | 818 | spin_unlock_irq(q->queue_lock); |
821 | io_schedule(); | 819 | io_schedule(); |
822 | 820 | ||
823 | /* | 821 | /* |
824 | * After sleeping, we become a "batching" process and | 822 | * After sleeping, we become a "batching" process and |
825 | * will be able to allocate at least one request, and | 823 | * will be able to allocate at least one request, and |
826 | * up to a big batch of them for a small period time. | 824 | * up to a big batch of them for a small period time. |
827 | * See ioc_batching, ioc_set_batching | 825 | * See ioc_batching, ioc_set_batching |
828 | */ | 826 | */ |
829 | ioc = current_io_context(GFP_NOIO, q->node); | 827 | ioc = current_io_context(GFP_NOIO, q->node); |
830 | ioc_set_batching(q, ioc); | 828 | ioc_set_batching(q, ioc); |
831 | 829 | ||
832 | spin_lock_irq(q->queue_lock); | 830 | spin_lock_irq(q->queue_lock); |
833 | } | ||
834 | finish_wait(&rl->wait[rw], &wait); | 831 | finish_wait(&rl->wait[rw], &wait); |
835 | } | 832 | |
833 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | ||
834 | }; | ||
836 | 835 | ||
837 | return rq; | 836 | return rq; |
838 | } | 837 | } |
@@ -1536,10 +1535,11 @@ static int __end_that_request_first(struct request *req, int error, | |||
1536 | } | 1535 | } |
1537 | 1536 | ||
1538 | if (blk_fs_request(req) && req->rq_disk) { | 1537 | if (blk_fs_request(req) && req->rq_disk) { |
1538 | struct hd_struct *part = get_part(req->rq_disk, req->sector); | ||
1539 | const int rw = rq_data_dir(req); | 1539 | const int rw = rq_data_dir(req); |
1540 | 1540 | ||
1541 | all_stat_add(req->rq_disk, sectors[rw], | 1541 | all_stat_add(req->rq_disk, part, sectors[rw], |
1542 | nr_bytes >> 9, req->sector); | 1542 | nr_bytes >> 9, req->sector); |
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | total_bytes = bio_nbytes = 0; | 1545 | total_bytes = bio_nbytes = 0; |
@@ -1725,8 +1725,8 @@ static void end_that_request_last(struct request *req, int error) | |||
1725 | const int rw = rq_data_dir(req); | 1725 | const int rw = rq_data_dir(req); |
1726 | struct hd_struct *part = get_part(disk, req->sector); | 1726 | struct hd_struct *part = get_part(disk, req->sector); |
1727 | 1727 | ||
1728 | __all_stat_inc(disk, ios[rw], req->sector); | 1728 | __all_stat_inc(disk, part, ios[rw], req->sector); |
1729 | __all_stat_add(disk, ticks[rw], duration, req->sector); | 1729 | __all_stat_add(disk, part, ticks[rw], duration, req->sector); |
1730 | disk_round_stats(disk); | 1730 | disk_round_stats(disk); |
1731 | disk->in_flight--; | 1731 | disk->in_flight--; |
1732 | if (part) { | 1732 | if (part) { |