aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2006-09-21 14:37:22 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:52:34 -0400
commit059af497c23492cb1ddcbba11c09dad385960bc0 (patch)
tree656bbfa7104e065f0da8eac0c14b62ba1c088214 /block/ll_rw_blk.c
parent0fe23479577124bd2687e6783e39fa0fa4c28005 (diff)
[PATCH] blk_queue_start_tag() shared map race fix
If we share the tag map between two or more queues, then we cannot use __set_bit() to set the bit. In fact we need to make sure we atomically acquire this tag, so loop using test_and_set_bit() to protect from that. Noticed by Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index f757ed413214..83425fb3c8db 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1171,11 +1171,16 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1171 BUG(); 1171 BUG();
1172 } 1172 }
1173 1173
1174 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); 1174 /*
1175 if (tag >= bqt->max_depth) 1175 * Protect against shared tag maps, as we may not have exclusive
1176 return 1; 1176 * access to the tag map.
1177 */
1178 do {
1179 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
1180 if (tag >= bqt->max_depth)
1181 return 1;
1177 1182
1178 __set_bit(tag, bqt->tag_map); 1183 } while (test_and_set_bit(tag, bqt->tag_map));
1179 1184
1180 rq->cmd_flags |= REQ_QUEUED; 1185 rq->cmd_flags |= REQ_QUEUED;
1181 rq->tag = tag; 1186 rq->tag = tag;