diff options
author | Mike Snitzer <snitzer@redhat.com> | 2010-06-03 13:34:52 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-06-04 07:47:06 -0400 |
commit | c86d1b8ae622e1ea5d20e98bd72fbd7d9dd69016 (patch) | |
tree | 92353647c38f99ee4b8bf6a29b9ae632c7be14e7 /block | |
parent | ff9da691c0498ff81fdd014e7a0731dab2337dac (diff) |
block: avoid unconditionally freeing previously allocated request_queue
On blk_init_allocated_queue_node failure, only free the request_queue if
it is wasn't previously allocated outside the block layer
(e.g. blk_init_queue_node was blk_init_allocated_queue_node caller).
This addresses an interface bug introduced by the following commit:
01effb0 block: allow initialization of previously allocated
request_queue
Otherwise the request_queue may be free'd out from underneath a caller
that is managing the request_queue directly (e.g. caller uses
blk_alloc_queue + blk_init_allocated_queue_node).
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3bc5579d6f54..826d07078902 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -570,9 +570,17 @@ EXPORT_SYMBOL(blk_init_queue); | |||
570 | struct request_queue * | 570 | struct request_queue * |
571 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | 571 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) |
572 | { | 572 | { |
573 | struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | 573 | struct request_queue *uninit_q, *q; |
574 | 574 | ||
575 | return blk_init_allocated_queue_node(q, rfn, lock, node_id); | 575 | uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); |
576 | if (!uninit_q) | ||
577 | return NULL; | ||
578 | |||
579 | q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); | ||
580 | if (!q) | ||
581 | blk_cleanup_queue(uninit_q); | ||
582 | |||
583 | return q; | ||
576 | } | 584 | } |
577 | EXPORT_SYMBOL(blk_init_queue_node); | 585 | EXPORT_SYMBOL(blk_init_queue_node); |
578 | 586 | ||
@@ -592,10 +600,8 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
592 | return NULL; | 600 | return NULL; |
593 | 601 | ||
594 | q->node = node_id; | 602 | q->node = node_id; |
595 | if (blk_init_free_list(q)) { | 603 | if (blk_init_free_list(q)) |
596 | kmem_cache_free(blk_requestq_cachep, q); | ||
597 | return NULL; | 604 | return NULL; |
598 | } | ||
599 | 605 | ||
600 | q->request_fn = rfn; | 606 | q->request_fn = rfn; |
601 | q->prep_rq_fn = NULL; | 607 | q->prep_rq_fn = NULL; |
@@ -618,7 +624,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
618 | return q; | 624 | return q; |
619 | } | 625 | } |
620 | 626 | ||
621 | blk_put_queue(q); | ||
622 | return NULL; | 627 | return NULL; |
623 | } | 628 | } |
624 | EXPORT_SYMBOL(blk_init_allocated_queue_node); | 629 | EXPORT_SYMBOL(blk_init_allocated_queue_node); |