aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bitmap.c
diff options
context:
space:
mode:
authorNeil Brown <neilb@notabene.brown>2008-06-27 18:31:22 -0400
committerNeil Brown <neilb@notabene.brown>2008-06-27 18:31:22 -0400
commita0da84f35b25875870270d16b6eccda4884d61a7 (patch)
tree3c092bcef7a8c8704054b02197156e1c803306b2 /drivers/md/bitmap.c
parent0e13fe23a00ad88c737d91d94a050707c6139ce4 (diff)
Improve setting of "events_cleared" for write-intent bitmaps.
When an array is degraded, bits in the write-intent bitmap are not cleared, so that if the missing device is re-added, it can be synced by only updated those parts of the device that have changed since it was removed. The enable this a 'events_cleared' value is stored. It is the event counter for the array the last time that any bits were cleared. Sometimes - if a device disappears from an array while it is 'clean' - the events_cleared value gets updated incorrectly (there are subtle ordering issues between updateing events in the main metadata and the bitmap metadata) resulting in the missing device appearing to require a full resync when it is re-added. With this patch, we update events_cleared precisely when we are about to clear a bit in the bitmap. We record events_cleared when we clear the bit internally, and copy that to the superblock which is written out before the bit on storage. This makes it more "obviously correct". We also need to update events_cleared when the event_count is going backwards (as happens on a dirty->clean transition of a non-degraded array). Thanks to Mike Snitzer for identifying this problem and testing early "fixes". Cc: "Mike Snitzer" <snitzer@gmail.com> Signed-off-by: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/bitmap.c')
-rw-r--r--drivers/md/bitmap.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index b26927ce889c..dedba16d42f7 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -454,8 +454,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
454 spin_unlock_irqrestore(&bitmap->lock, flags); 454 spin_unlock_irqrestore(&bitmap->lock, flags);
455 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 455 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
456 sb->events = cpu_to_le64(bitmap->mddev->events); 456 sb->events = cpu_to_le64(bitmap->mddev->events);
457 if (!bitmap->mddev->degraded) 457 if (bitmap->mddev->events < bitmap->events_cleared) {
458 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 458 /* rocking back to read-only */
459 bitmap->events_cleared = bitmap->mddev->events;
460 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
461 }
459 kunmap_atomic(sb, KM_USER0); 462 kunmap_atomic(sb, KM_USER0);
460 write_page(bitmap, bitmap->sb_page, 1); 463 write_page(bitmap, bitmap->sb_page, 1);
461} 464}
@@ -1085,9 +1088,19 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1085 } else 1088 } else
1086 spin_unlock_irqrestore(&bitmap->lock, flags); 1089 spin_unlock_irqrestore(&bitmap->lock, flags);
1087 lastpage = page; 1090 lastpage = page;
1088/* 1091
1089 printk("bitmap clean at page %lu\n", j); 1092 /* We are possibly going to clear some bits, so make
1090*/ 1093 * sure that events_cleared is up-to-date.
1094 */
1095 if (bitmap->need_sync) {
1096 bitmap_super_t *sb;
1097 bitmap->need_sync = 0;
1098 sb = kmap_atomic(bitmap->sb_page, KM_USER0);
1099 sb->events_cleared =
1100 cpu_to_le64(bitmap->events_cleared);
1101 kunmap_atomic(sb, KM_USER0);
1102 write_page(bitmap, bitmap->sb_page, 1);
1103 }
1091 spin_lock_irqsave(&bitmap->lock, flags); 1104 spin_lock_irqsave(&bitmap->lock, flags);
1092 clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1105 clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1093 } 1106 }
@@ -1257,6 +1270,12 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1257 return; 1270 return;
1258 } 1271 }
1259 1272
1273 if (success &&
1274 bitmap->events_cleared < bitmap->mddev->events) {
1275 bitmap->events_cleared = bitmap->mddev->events;
1276 bitmap->need_sync = 1;
1277 }
1278
1260 if (!success && ! (*bmc & NEEDED_MASK)) 1279 if (!success && ! (*bmc & NEEDED_MASK))
1261 *bmc |= NEEDED_MASK; 1280 *bmc |= NEEDED_MASK;
1262 1281