aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-10-25 04:14:47 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-10-29 06:33:06 -0400
commit6eca9004dfcb274a502438a591df5b197690afb1 (patch)
treefd281ef7c016fbae03e2a27e42a53efd37c3ec63
parent3a424f2d56613acfb9e583ec9c85a2be3e3af028 (diff)
[BLOCK] Fix bad sharing of tag busy list on queues with shared tag maps
For the locking to work, only the tag map and tag bit map may be shared (incidentally, I was just explaining this to Nick yesterday, but I apparently didn't review the code well enough myself). But we also share the busy list! The busy_list must be queue private, or we need a block_queue_tag covering lock as well. So we have to move the busy_list to the queue. This'll work fine, and it'll actually also fix a problem with blk_queue_invalidate_tags() which will invalidate tags across all shared queues. This is a bit confusing, the low level driver should call it for each queue seperately since otherwise you cannot kill tags on just a single queue for eg a hard drive that stops responding. Since the function has no callers currently, it's not an issue. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/ll_rw_blk.c8
-rw-r--r--include/linux/blkdev.h2
2 files changed, 4 insertions, 6 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a8a181072bf8..56f2646612e6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
791 retval = atomic_dec_and_test(&bqt->refcnt); 791 retval = atomic_dec_and_test(&bqt->refcnt);
792 if (retval) { 792 if (retval) {
793 BUG_ON(bqt->busy); 793 BUG_ON(bqt->busy);
794 BUG_ON(!list_empty(&bqt->busy_list));
795 794
796 kfree(bqt->tag_index); 795 kfree(bqt->tag_index);
797 bqt->tag_index = NULL; 796 bqt->tag_index = NULL;
@@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
903 if (init_tag_map(q, tags, depth)) 902 if (init_tag_map(q, tags, depth))
904 goto fail; 903 goto fail;
905 904
906 INIT_LIST_HEAD(&tags->busy_list);
907 tags->busy = 0; 905 tags->busy = 0;
908 atomic_set(&tags->refcnt, 1); 906 atomic_set(&tags->refcnt, 1);
909 return tags; 907 return tags;
@@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
954 */ 952 */
955 q->queue_tags = tags; 953 q->queue_tags = tags;
956 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); 954 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
955 INIT_LIST_HEAD(&q->tag_busy_list);
957 return 0; 956 return 0;
958fail: 957fail:
959 kfree(tags); 958 kfree(tags);
@@ -1122,7 +1121,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
1122 rq->tag = tag; 1121 rq->tag = tag;
1123 bqt->tag_index[tag] = rq; 1122 bqt->tag_index[tag] = rq;
1124 blkdev_dequeue_request(rq); 1123 blkdev_dequeue_request(rq);
1125 list_add(&rq->queuelist, &bqt->busy_list); 1124 list_add(&rq->queuelist, &q->tag_busy_list);
1126 bqt->busy++; 1125 bqt->busy++;
1127 return 0; 1126 return 0;
1128} 1127}
@@ -1143,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag);
1143 **/ 1142 **/
1144void blk_queue_invalidate_tags(struct request_queue *q) 1143void blk_queue_invalidate_tags(struct request_queue *q)
1145{ 1144{
1146 struct blk_queue_tag *bqt = q->queue_tags;
1147 struct list_head *tmp, *n; 1145 struct list_head *tmp, *n;
1148 struct request *rq; 1146 struct request *rq;
1149 1147
1150 list_for_each_safe(tmp, n, &bqt->busy_list) { 1148 list_for_each_safe(tmp, n, &q->tag_busy_list) {
1151 rq = list_entry_rq(tmp); 1149 rq = list_entry_rq(tmp);
1152 1150
1153 if (rq->tag == -1) { 1151 if (rq->tag == -1) {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bbf906a0b419..8396db24d019 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -341,7 +341,6 @@ enum blk_queue_state {
341struct blk_queue_tag { 341struct blk_queue_tag {
342 struct request **tag_index; /* map of busy tags */ 342 struct request **tag_index; /* map of busy tags */
343 unsigned long *tag_map; /* bit map of free/busy tags */ 343 unsigned long *tag_map; /* bit map of free/busy tags */
344 struct list_head busy_list; /* fifo list of busy tags */
345 int busy; /* current depth */ 344 int busy; /* current depth */
346 int max_depth; /* what we will send to device */ 345 int max_depth; /* what we will send to device */
347 int real_max_depth; /* what the array can hold */ 346 int real_max_depth; /* what the array can hold */
@@ -435,6 +434,7 @@ struct request_queue
435 unsigned int dma_alignment; 434 unsigned int dma_alignment;
436 435
437 struct blk_queue_tag *queue_tags; 436 struct blk_queue_tag *queue_tags;
437 struct list_head tag_busy_list;
438 438
439 unsigned int nr_sorted; 439 unsigned int nr_sorted;
440 unsigned int in_flight; 440 unsigned int in_flight;