aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJun'ichi Nomura <j-nomura@ce.jp.nec.com>2005-09-09 19:23:42 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-09 19:39:09 -0400
commit844e8d904a7c1446e3f040683b4a0645c3eb168f (patch)
tree5e277539dab95d6325cec564a91e0e70acdce021 /drivers/md
parente5dcdd80a60627371f40797426273048630dc8ca (diff)
[PATCH] dm: fix rh_dec()/rh_inc() race in dm-raid1.c
Fix another bug in dm-raid1.c that the dirty region may stay in or be moved to clean list and freed while in use. It happens as follows: CPU0 CPU1 ------------------------------------------------------------------------------ rh_dec() if (atomic_dec_and_test(pending)) <the region is still marked dirty> rh_inc() if the region is clean mark the region dirty and remove from clean list mark the region clean and move to clean list atomic_inc(pending) At this stage, the region is in clean list and will be mistakenly reclaimed by rh_update_states() later. Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-raid1.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index b08df8b9b2ca..863282513753 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -375,16 +375,18 @@ static void rh_inc(struct region_hash *rh, region_t region)
375 375
376 read_lock(&rh->hash_lock); 376 read_lock(&rh->hash_lock);
377 reg = __rh_find(rh, region); 377 reg = __rh_find(rh, region);
378
379 atomic_inc(&reg->pending);
380
381 spin_lock_irq(&rh->region_lock);
378 if (reg->state == RH_CLEAN) { 382 if (reg->state == RH_CLEAN) {
379 rh->log->type->mark_region(rh->log, reg->key); 383 rh->log->type->mark_region(rh->log, reg->key);
380 384
381 spin_lock_irq(&rh->region_lock);
382 reg->state = RH_DIRTY; 385 reg->state = RH_DIRTY;
383 list_del_init(&reg->list); /* take off the clean list */ 386 list_del_init(&reg->list); /* take off the clean list */
384 spin_unlock_irq(&rh->region_lock);
385 } 387 }
388 spin_unlock_irq(&rh->region_lock);
386 389
387 atomic_inc(&reg->pending);
388 read_unlock(&rh->hash_lock); 390 read_unlock(&rh->hash_lock);
389} 391}
390 392
@@ -408,6 +410,10 @@ static void rh_dec(struct region_hash *rh, region_t region)
408 410
409 if (atomic_dec_and_test(&reg->pending)) { 411 if (atomic_dec_and_test(&reg->pending)) {
410 spin_lock_irqsave(&rh->region_lock, flags); 412 spin_lock_irqsave(&rh->region_lock, flags);
413 if (atomic_read(&reg->pending)) { /* check race */
414 spin_unlock_irqrestore(&rh->region_lock, flags);
415 return;
416 }
411 if (reg->state == RH_RECOVERING) { 417 if (reg->state == RH_RECOVERING) {
412 list_add_tail(&reg->list, &rh->quiesced_regions); 418 list_add_tail(&reg->list, &rh->quiesced_regions);
413 } else { 419 } else {