diff options
-rw-r--r-- | block/ll_rw_blk.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index cd20367061d7..ed39313c4085 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1085,6 +1085,12 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |||
1085 | 1085 | ||
1086 | bqt->tag_index[tag] = NULL; | 1086 | bqt->tag_index[tag] = NULL; |
1087 | 1087 | ||
1088 | /* | ||
1089 | * We use test_and_clear_bit's memory ordering properties here. | ||
1090 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
1091 | * a barrer before clearing the bit (precisely: release semantics). | ||
1092 | * Could use clear_bit_unlock when it is merged. | ||
1093 | */ | ||
1088 | if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { | 1094 | if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { |
1089 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | 1095 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", |
1090 | __FUNCTION__, tag); | 1096 | __FUNCTION__, tag); |
@@ -1137,6 +1143,10 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
1137 | return 1; | 1143 | return 1; |
1138 | 1144 | ||
1139 | } while (test_and_set_bit(tag, bqt->tag_map)); | 1145 | } while (test_and_set_bit(tag, bqt->tag_map)); |
1146 | /* | ||
1147 | * We rely on test_and_set_bit providing lock memory ordering semantics | ||
1148 | * (could use test_and_set_bit_lock when it is merged). | ||
1149 | */ | ||
1140 | 1150 | ||
1141 | rq->cmd_flags |= REQ_QUEUED; | 1151 | rq->cmd_flags |= REQ_QUEUED; |
1142 | rq->tag = tag; | 1152 | rq->tag = tag; |