aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-05-06 22:49:35 -0400
committerNeilBrown <neilb@suse.de>2009-05-06 22:49:35 -0400
commitdd71cf6b2773310b01c6fe6c773064c80fd2476b (patch)
tree33a4cc4fe4db45ec4f1a8f477bb294a0b685efa4 /drivers
parentdb305e507d554430a69ede901a6308e6ecb72349 (diff)
md: tidy up status_resync to handle large arrays.
Two problems in status_resync. 1/ It still used Kilobytes as the basic block unit, while most code now uses sectors uniformly. 2/ It doesn't allow for the possibility that max_sectors exceeds the range of "unsigned long". So - change "max_blocks" to "max_sectors", and store sector numbers in there and in 'resync' - Make 'rt' a 'sector_t' so it can temporarily hold the number of remaining sectors. - use sector_div rather than normal division. - change the magic '100' used to preserve precision to '32'. + making it a power of 2 makes division easier + it doesn't need to be as large as it was chosen when we averaged speed over the entire run. Now we average speed over the last 30 seconds or so. Reported-by: "Mario 'BitKoenig' Holbe" <Mario.Holbe@TU-Ilmenau.DE> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 612343fdde94..5eb01a4d27ba 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5705,37 +5705,38 @@ static void status_unused(struct seq_file *seq)
5705 5705
5706static void status_resync(struct seq_file *seq, mddev_t * mddev) 5706static void status_resync(struct seq_file *seq, mddev_t * mddev)
5707{ 5707{
5708 sector_t max_blocks, resync, res; 5708 sector_t max_sectors, resync, res;
5709 unsigned long dt, db, rt; 5709 unsigned long dt, db;
5710 sector_t rt;
5710 int scale; 5711 int scale;
5711 unsigned int per_milli; 5712 unsigned int per_milli;
5712 5713
5713 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 5714 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
5714 5715
5715 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 5716 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5716 max_blocks = mddev->resync_max_sectors >> 1; 5717 max_sectors = mddev->resync_max_sectors;
5717 else 5718 else
5718 max_blocks = mddev->dev_sectors / 2; 5719 max_sectors = mddev->dev_sectors;
5719 5720
5720 /* 5721 /*
5721 * Should not happen. 5722 * Should not happen.
5722 */ 5723 */
5723 if (!max_blocks) { 5724 if (!max_sectors) {
5724 MD_BUG(); 5725 MD_BUG();
5725 return; 5726 return;
5726 } 5727 }
5727 /* Pick 'scale' such that (resync>>scale)*1000 will fit 5728 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5728 * in a sector_t, and (max_blocks>>scale) will fit in a 5729 * in a sector_t, and (max_sectors>>scale) will fit in a
5729 * u32, as those are the requirements for sector_div. 5730 * u32, as those are the requirements for sector_div.
5730 * Thus 'scale' must be at least 10 5731 * Thus 'scale' must be at least 10
5731 */ 5732 */
5732 scale = 10; 5733 scale = 10;
5733 if (sizeof(sector_t) > sizeof(unsigned long)) { 5734 if (sizeof(sector_t) > sizeof(unsigned long)) {
5734 while ( max_blocks/2 > (1ULL<<(scale+32))) 5735 while ( max_sectors/2 > (1ULL<<(scale+32)))
5735 scale++; 5736 scale++;
5736 } 5737 }
5737 res = (resync>>scale)*1000; 5738 res = (resync>>scale)*1000;
5738 sector_div(res, (u32)((max_blocks>>scale)+1)); 5739 sector_div(res, (u32)((max_sectors>>scale)+1));
5739 5740
5740 per_milli = res; 5741 per_milli = res;
5741 { 5742 {
@@ -5756,25 +5757,35 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
5756 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 5757 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5757 "resync" : "recovery"))), 5758 "resync" : "recovery"))),
5758 per_milli/10, per_milli % 10, 5759 per_milli/10, per_milli % 10,
5759 (unsigned long long) resync, 5760 (unsigned long long) resync/2,
5760 (unsigned long long) max_blocks); 5761 (unsigned long long) max_sectors/2);
5761 5762
5762 /* 5763 /*
5763 * We do not want to overflow, so the order of operands and
5764 * the * 100 / 100 trick are important. We do a +1 to be
5765 * safe against division by zero. We only estimate anyway.
5766 *
5767 * dt: time from mark until now 5764 * dt: time from mark until now
5768 * db: blocks written from mark until now 5765 * db: blocks written from mark until now
5769 * rt: remaining time 5766 * rt: remaining time
5767 *
5768 * rt is a sector_t, so could be 32bit or 64bit.
5769 * So we divide before multiply in case it is 32bit and close
5770 * to the limit.
5771 * We scale the divisor (db) by 32 to avoid loosing precision
5772 * near the end of resync when the number of remaining sectors
5773 * is close to 'db'.
5774 * We then divide rt by 32 after multiplying by db to compensate.
5775 * The '+1' avoids division by zero if db is very small.
5770 */ 5776 */
5771 dt = ((jiffies - mddev->resync_mark) / HZ); 5777 dt = ((jiffies - mddev->resync_mark) / HZ);
5772 if (!dt) dt++; 5778 if (!dt) dt++;
5773 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 5779 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5774 - mddev->resync_mark_cnt; 5780 - mddev->resync_mark_cnt;
5775 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5776 5781
5777 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 5782 rt = max_sectors - resync; /* number of remaining sectors */
5783 sector_div(rt, db/32+1);
5784 rt *= dt;
5785 rt >>= 5;
5786
5787 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
5788 ((unsigned long)rt % 60)/6);
5778 5789
5779 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 5790 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5780} 5791}