diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 062067fa7ead..5b26af8597f3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -785,6 +785,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) | |||
785 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | 785 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); |
786 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | 786 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); |
787 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | 787 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); |
788 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
789 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | ||
788 | } | 790 | } |
789 | 791 | ||
790 | EXPORT_SYMBOL(blk_queue_stack_limits); | 792 | EXPORT_SYMBOL(blk_queue_stack_limits); |
@@ -906,17 +908,15 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | |||
906 | __FUNCTION__, depth); | 908 | __FUNCTION__, depth); |
907 | } | 909 | } |
908 | 910 | ||
909 | tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); | 911 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); |
910 | if (!tag_index) | 912 | if (!tag_index) |
911 | goto fail; | 913 | goto fail; |
912 | 914 | ||
913 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | 915 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; |
914 | tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | 916 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); |
915 | if (!tag_map) | 917 | if (!tag_map) |
916 | goto fail; | 918 | goto fail; |
917 | 919 | ||
918 | memset(tag_index, 0, depth * sizeof(struct request *)); | ||
919 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); | ||
920 | tags->real_max_depth = depth; | 920 | tags->real_max_depth = depth; |
921 | tags->max_depth = depth; | 921 | tags->max_depth = depth; |
922 | tags->tag_index = tag_index; | 922 | tags->tag_index = tag_index; |
@@ -2479,10 +2479,12 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, | |||
2479 | rq->rq_disk = bd_disk; | 2479 | rq->rq_disk = bd_disk; |
2480 | rq->flags |= REQ_NOMERGE; | 2480 | rq->flags |= REQ_NOMERGE; |
2481 | rq->end_io = done; | 2481 | rq->end_io = done; |
2482 | elv_add_request(q, rq, where, 1); | 2482 | WARN_ON(irqs_disabled()); |
2483 | generic_unplug_device(q); | 2483 | spin_lock_irq(q->queue_lock); |
2484 | __elv_add_request(q, rq, where, 1); | ||
2485 | __generic_unplug_device(q); | ||
2486 | spin_unlock_irq(q->queue_lock); | ||
2484 | } | 2487 | } |
2485 | |||
2486 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | 2488 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); |
2487 | 2489 | ||
2488 | /** | 2490 | /** |
@@ -3512,7 +3514,7 @@ int __init blk_dev_init(void) | |||
3512 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | 3514 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |
3513 | sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); | 3515 | sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); |
3514 | 3516 | ||
3515 | for_each_cpu(i) | 3517 | for_each_possible_cpu(i) |
3516 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | 3518 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); |
3517 | 3519 | ||
3518 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | 3520 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); |
@@ -3537,11 +3539,17 @@ void put_io_context(struct io_context *ioc) | |||
3537 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 3539 | BUG_ON(atomic_read(&ioc->refcount) == 0); |
3538 | 3540 | ||
3539 | if (atomic_dec_and_test(&ioc->refcount)) { | 3541 | if (atomic_dec_and_test(&ioc->refcount)) { |
3542 | struct cfq_io_context *cic; | ||
3543 | |||
3540 | rcu_read_lock(); | 3544 | rcu_read_lock(); |
3541 | if (ioc->aic && ioc->aic->dtor) | 3545 | if (ioc->aic && ioc->aic->dtor) |
3542 | ioc->aic->dtor(ioc->aic); | 3546 | ioc->aic->dtor(ioc->aic); |
3543 | if (ioc->cic && ioc->cic->dtor) | 3547 | if (ioc->cic_root.rb_node != NULL) { |
3544 | ioc->cic->dtor(ioc->cic); | 3548 | struct rb_node *n = rb_first(&ioc->cic_root); |
3549 | |||
3550 | cic = rb_entry(n, struct cfq_io_context, rb_node); | ||
3551 | cic->dtor(ioc); | ||
3552 | } | ||
3545 | rcu_read_unlock(); | 3553 | rcu_read_unlock(); |
3546 | 3554 | ||
3547 | kmem_cache_free(iocontext_cachep, ioc); | 3555 | kmem_cache_free(iocontext_cachep, ioc); |
@@ -3554,6 +3562,7 @@ void exit_io_context(void) | |||
3554 | { | 3562 | { |
3555 | unsigned long flags; | 3563 | unsigned long flags; |
3556 | struct io_context *ioc; | 3564 | struct io_context *ioc; |
3565 | struct cfq_io_context *cic; | ||
3557 | 3566 | ||
3558 | local_irq_save(flags); | 3567 | local_irq_save(flags); |
3559 | task_lock(current); | 3568 | task_lock(current); |
@@ -3565,9 +3574,11 @@ void exit_io_context(void) | |||
3565 | 3574 | ||
3566 | if (ioc->aic && ioc->aic->exit) | 3575 | if (ioc->aic && ioc->aic->exit) |
3567 | ioc->aic->exit(ioc->aic); | 3576 | ioc->aic->exit(ioc->aic); |
3568 | if (ioc->cic && ioc->cic->exit) | 3577 | if (ioc->cic_root.rb_node != NULL) { |
3569 | ioc->cic->exit(ioc->cic); | 3578 | cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); |
3570 | 3579 | cic->exit(ioc); | |
3580 | } | ||
3581 | |||
3571 | put_io_context(ioc); | 3582 | put_io_context(ioc); |
3572 | } | 3583 | } |
3573 | 3584 | ||
@@ -3596,7 +3607,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) | |||
3596 | ret->last_waited = jiffies; /* doesn't matter... */ | 3607 | ret->last_waited = jiffies; /* doesn't matter... */ |
3597 | ret->nr_batch_requests = 0; /* because this is 0 */ | 3608 | ret->nr_batch_requests = 0; /* because this is 0 */ |
3598 | ret->aic = NULL; | 3609 | ret->aic = NULL; |
3599 | ret->cic = NULL; | 3610 | ret->cic_root.rb_node = NULL; |
3600 | tsk->io_context = ret; | 3611 | tsk->io_context = ret; |
3601 | } | 3612 | } |
3602 | 3613 | ||