aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorGoldwyn Rodrigues <rgoldwyn@suse.com>2015-08-18 18:14:42 -0400
committerGoldwyn Rodrigues <rgoldwyn@suse.com>2015-10-12 02:32:05 -0400
commitc40f341f1e7fd4eddcfc5881d94cfa8669071ee6 (patch)
treed8d572cb6b88dcd1102596d31b2bd153f79fdaab /drivers/md/raid1.c
parent3c462c880b52aae2cfbbb8db8b401eef118cc128 (diff)
md-cluster: Use a small window for resync
Suspending the entire device for resync could take too long. Resync in small chunks. cluster's resync window (32M) is maintained in r1conf as cluster_sync_low and cluster_sync_high and processed in raid1's sync_request(). If the current resync is outside the cluster resync window: 1. Set the cluster_sync_low to curr_resync_completed. 2. Check if the sync will fit in the new window, if not issue a wait_barrier() and set cluster_sync_low to sector_nr. 3. Set cluster_sync_high to cluster_sync_low + resync_window. 4. Send a message to all nodes so they may add it in their suspension list. bitmap_cond_end_sync is modified to allow to force a sync inorder to get the curr_resync_completed uptodate with the sector passed. Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 049df6c4a8cc..1dd13bb52940 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -90,6 +90,8 @@ static void r1bio_pool_free(void *r1_bio, void *data)
90#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 90#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
91#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 91#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
92#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 92#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
93#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
94#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
93#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) 95#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
94 96
95static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 97static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
@@ -2488,6 +2490,13 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2488 2490
2489 bitmap_close_sync(mddev->bitmap); 2491 bitmap_close_sync(mddev->bitmap);
2490 close_sync(conf); 2492 close_sync(conf);
2493
2494 if (mddev_is_clustered(mddev)) {
2495 conf->cluster_sync_low = 0;
2496 conf->cluster_sync_high = 0;
2497 /* Send zeros to mark end of resync */
2498 md_cluster_ops->resync_info_update(mddev, 0, 0);
2499 }
2491 return 0; 2500 return 0;
2492 } 2501 }
2493 2502
@@ -2508,7 +2517,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2508 return sync_blocks; 2517 return sync_blocks;
2509 } 2518 }
2510 2519
2511 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 2520 /* we are incrementing sector_nr below. To be safe, we check against
2521 * sector_nr + two times RESYNC_SECTORS
2522 */
2523
2524 bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2525 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2512 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2526 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2513 2527
2514 raise_barrier(conf, sector_nr); 2528 raise_barrier(conf, sector_nr);
@@ -2699,6 +2713,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2699 bio_full: 2713 bio_full:
2700 r1_bio->sectors = nr_sectors; 2714 r1_bio->sectors = nr_sectors;
2701 2715
2716 if (mddev_is_clustered(mddev) &&
2717 conf->cluster_sync_high < sector_nr + nr_sectors) {
2718 conf->cluster_sync_low = mddev->curr_resync_completed;
2719 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2720 /* Send resync message */
2721 md_cluster_ops->resync_info_update(mddev,
2722 conf->cluster_sync_low,
2723 conf->cluster_sync_high);
2724 }
2725
2702 /* For a user-requested sync, we read all readable devices and do a 2726 /* For a user-requested sync, we read all readable devices and do a
2703 * compare 2727 * compare
2704 */ 2728 */