aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c53
1 files changed, 42 insertions, 11 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 20ae32d67e21..089a32604305 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1762,8 +1762,9 @@ static int make_request(request_queue_t *q, struct bio * bi)
1762 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 1762 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1763 DEFINE_WAIT(w); 1763 DEFINE_WAIT(w);
1764 int disks; 1764 int disks;
1765 1765
1766 retry: 1766 retry:
1767 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1767 if (likely(conf->expand_progress == MaxSector)) 1768 if (likely(conf->expand_progress == MaxSector))
1768 disks = conf->raid_disks; 1769 disks = conf->raid_disks;
1769 else { 1770 else {
@@ -1771,6 +1772,13 @@ static int make_request(request_queue_t *q, struct bio * bi)
1771 disks = conf->raid_disks; 1772 disks = conf->raid_disks;
1772 if (logical_sector >= conf->expand_progress) 1773 if (logical_sector >= conf->expand_progress)
1773 disks = conf->previous_raid_disks; 1774 disks = conf->previous_raid_disks;
1775 else {
1776 if (logical_sector >= conf->expand_lo) {
1777 spin_unlock_irq(&conf->device_lock);
1778 schedule();
1779 goto retry;
1780 }
1781 }
1774 spin_unlock_irq(&conf->device_lock); 1782 spin_unlock_irq(&conf->device_lock);
1775 } 1783 }
1776 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1, 1784 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
@@ -1779,7 +1787,6 @@ static int make_request(request_queue_t *q, struct bio * bi)
1779 (unsigned long long)new_sector, 1787 (unsigned long long)new_sector,
1780 (unsigned long long)logical_sector); 1788 (unsigned long long)logical_sector);
1781 1789
1782 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1783 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 1790 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
1784 if (sh) { 1791 if (sh) {
1785 if (unlikely(conf->expand_progress != MaxSector)) { 1792 if (unlikely(conf->expand_progress != MaxSector)) {
@@ -1877,6 +1884,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1877 */ 1884 */
1878 int i; 1885 int i;
1879 int dd_idx; 1886 int dd_idx;
1887 sector_t writepos, safepos, gap;
1880 1888
1881 if (sector_nr == 0 && 1889 if (sector_nr == 0 &&
1882 conf->expand_progress != 0) { 1890 conf->expand_progress != 0) {
@@ -1887,15 +1895,36 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1887 return sector_nr; 1895 return sector_nr;
1888 } 1896 }
1889 1897
1890 /* Cannot proceed until we've updated the superblock... */ 1898 /* we update the metadata when there is more than 3Meg
1891 wait_event(conf->wait_for_overlap, 1899 * in the block range (that is rather arbitrary, should
1892 atomic_read(&conf->reshape_stripes)==0); 1900 * probably be time based) or when the data about to be
1893 mddev->reshape_position = conf->expand_progress; 1901 * copied would over-write the source of the data at
1894 1902 * the front of the range.
1895 mddev->sb_dirty = 1; 1903 * i.e. one new_stripe forward from expand_progress new_maps
1896 md_wakeup_thread(mddev->thread); 1904 * to after where expand_lo old_maps to
1897 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 || 1905 */
1898 kthread_should_stop()); 1906 writepos = conf->expand_progress +
1907 conf->chunk_size/512*(conf->raid_disks-1);
1908 sector_div(writepos, conf->raid_disks-1);
1909 safepos = conf->expand_lo;
1910 sector_div(safepos, conf->previous_raid_disks-1);
1911 gap = conf->expand_progress - conf->expand_lo;
1912
1913 if (writepos >= safepos ||
1914 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
1915 /* Cannot proceed until we've updated the superblock... */
1916 wait_event(conf->wait_for_overlap,
1917 atomic_read(&conf->reshape_stripes)==0);
1918 mddev->reshape_position = conf->expand_progress;
1919 mddev->sb_dirty = 1;
1920 md_wakeup_thread(mddev->thread);
1921 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
1922 kthread_should_stop());
1923 spin_lock_irq(&conf->device_lock);
1924 conf->expand_lo = mddev->reshape_position;
1925 spin_unlock_irq(&conf->device_lock);
1926 wake_up(&conf->wait_for_overlap);
1927 }
1899 1928
1900 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 1929 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
1901 int j; 1930 int j;
@@ -2322,6 +2351,7 @@ static int run(mddev_t *mddev)
2322 2351
2323 if (conf->expand_progress != MaxSector) { 2352 if (conf->expand_progress != MaxSector) {
2324 printk("...ok start reshape thread\n"); 2353 printk("...ok start reshape thread\n");
2354 conf->expand_lo = conf->expand_progress;
2325 atomic_set(&conf->reshape_stripes, 0); 2355 atomic_set(&conf->reshape_stripes, 0);
2326 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2356 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2327 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2357 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -2610,6 +2640,7 @@ static int raid5_reshape(mddev_t *mddev, int raid_disks)
2610 conf->previous_raid_disks = conf->raid_disks; 2640 conf->previous_raid_disks = conf->raid_disks;
2611 conf->raid_disks = raid_disks; 2641 conf->raid_disks = raid_disks;
2612 conf->expand_progress = 0; 2642 conf->expand_progress = 0;
2643 conf->expand_lo = 0;
2613 spin_unlock_irq(&conf->device_lock); 2644 spin_unlock_irq(&conf->device_lock);
2614 2645
2615 /* Add some new drives, as many as will fit. 2646 /* Add some new drives, as many as will fit.