diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-25 04:14:47 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-29 06:33:06 -0400 |
commit | 6eca9004dfcb274a502438a591df5b197690afb1 (patch) | |
tree | fd281ef7c016fbae03e2a27e42a53efd37c3ec63 /block | |
parent | 3a424f2d56613acfb9e583ec9c85a2be3e3af028 (diff) |
[BLOCK] Fix bad sharing of tag busy list on queues with shared tag maps
For the locking to work, only the tag map and tag bit map may be shared
(incidentally, I was just explaining this to Nick yesterday, but I
apparently didn't review the code well enough myself). But we also share
the busy list! The busy_list must be queue private, or we need a
block_queue_tag covering lock as well.
So we have to move the busy_list to the queue. This'll work fine, and
it'll actually also fix a problem with blk_queue_invalidate_tags() which
will invalidate tags across all shared queues. This is a bit confusing,
the low level driver should call it for each queue seperately since
otherwise you cannot kill tags on just a single queue for eg a hard
drive that stops responding. Since the function has no callers
currently, it's not an issue.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index a8a181072bf8..56f2646612e6 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) | |||
791 | retval = atomic_dec_and_test(&bqt->refcnt); | 791 | retval = atomic_dec_and_test(&bqt->refcnt); |
792 | if (retval) { | 792 | if (retval) { |
793 | BUG_ON(bqt->busy); | 793 | BUG_ON(bqt->busy); |
794 | BUG_ON(!list_empty(&bqt->busy_list)); | ||
795 | 794 | ||
796 | kfree(bqt->tag_index); | 795 | kfree(bqt->tag_index); |
797 | bqt->tag_index = NULL; | 796 | bqt->tag_index = NULL; |
@@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |||
903 | if (init_tag_map(q, tags, depth)) | 902 | if (init_tag_map(q, tags, depth)) |
904 | goto fail; | 903 | goto fail; |
905 | 904 | ||
906 | INIT_LIST_HEAD(&tags->busy_list); | ||
907 | tags->busy = 0; | 905 | tags->busy = 0; |
908 | atomic_set(&tags->refcnt, 1); | 906 | atomic_set(&tags->refcnt, 1); |
909 | return tags; | 907 | return tags; |
@@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
954 | */ | 952 | */ |
955 | q->queue_tags = tags; | 953 | q->queue_tags = tags; |
956 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | 954 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); |
955 | INIT_LIST_HEAD(&q->tag_busy_list); | ||
957 | return 0; | 956 | return 0; |
958 | fail: | 957 | fail: |
959 | kfree(tags); | 958 | kfree(tags); |
@@ -1122,7 +1121,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
1122 | rq->tag = tag; | 1121 | rq->tag = tag; |
1123 | bqt->tag_index[tag] = rq; | 1122 | bqt->tag_index[tag] = rq; |
1124 | blkdev_dequeue_request(rq); | 1123 | blkdev_dequeue_request(rq); |
1125 | list_add(&rq->queuelist, &bqt->busy_list); | 1124 | list_add(&rq->queuelist, &q->tag_busy_list); |
1126 | bqt->busy++; | 1125 | bqt->busy++; |
1127 | return 0; | 1126 | return 0; |
1128 | } | 1127 | } |
@@ -1143,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1143 | **/ | 1142 | **/ |
1144 | void blk_queue_invalidate_tags(struct request_queue *q) | 1143 | void blk_queue_invalidate_tags(struct request_queue *q) |
1145 | { | 1144 | { |
1146 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1147 | struct list_head *tmp, *n; | 1145 | struct list_head *tmp, *n; |
1148 | struct request *rq; | 1146 | struct request *rq; |
1149 | 1147 | ||
1150 | list_for_each_safe(tmp, n, &bqt->busy_list) { | 1148 | list_for_each_safe(tmp, n, &q->tag_busy_list) { |
1151 | rq = list_entry_rq(tmp); | 1149 | rq = list_entry_rq(tmp); |
1152 | 1150 | ||
1153 | if (rq->tag == -1) { | 1151 | if (rq->tag == -1) { |