diff options
author | NeilBrown <neilb@suse.de> | 2011-05-11 00:54:41 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2011-05-11 00:54:41 -0400 |
commit | ab9d47e990c12c11cc95ed1247a3782234a7e33a (patch) | |
tree | 7c82006d01e903e79d23d3b289a35effed4eacde /drivers/md/raid10.c | |
parent | f17ed07c853d5d772515f565a7fc68f9098d6d69 (diff) |
md/raid10: reformat some loops with less indenting.
When a loop ends with an 'if' with a large body, it is neater
to make the if 'continue' on the inverse condition, and then
the body is indented less.
Apply this pattern 3 times, and wrap some other long lines.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 228 |
1 files changed, 120 insertions, 108 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f44a3da5a80d..6e846688962f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1736,7 +1736,8 @@ static int init_resync(conf_t *conf) | |||
1736 | * | 1736 | * |
1737 | */ | 1737 | */ |
1738 | 1738 | ||
1739 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) | 1739 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, |
1740 | int *skipped, int go_faster) | ||
1740 | { | 1741 | { |
1741 | conf_t *conf = mddev->private; | 1742 | conf_t *conf = mddev->private; |
1742 | r10bio_t *r10_bio; | 1743 | r10bio_t *r10_bio; |
@@ -1830,108 +1831,114 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1830 | int j, k; | 1831 | int j, k; |
1831 | r10_bio = NULL; | 1832 | r10_bio = NULL; |
1832 | 1833 | ||
1833 | for (i=0 ; i<conf->raid_disks; i++) | 1834 | for (i=0 ; i<conf->raid_disks; i++) { |
1834 | if (conf->mirrors[i].rdev && | 1835 | int still_degraded; |
1835 | !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { | 1836 | r10bio_t *rb2; |
1836 | int still_degraded = 0; | 1837 | sector_t sect; |
1837 | /* want to reconstruct this device */ | 1838 | int must_sync; |
1838 | r10bio_t *rb2 = r10_bio; | ||
1839 | sector_t sect = raid10_find_virt(conf, sector_nr, i); | ||
1840 | int must_sync; | ||
1841 | /* Unless we are doing a full sync, we only need | ||
1842 | * to recover the block if it is set in the bitmap | ||
1843 | */ | ||
1844 | must_sync = bitmap_start_sync(mddev->bitmap, sect, | ||
1845 | &sync_blocks, 1); | ||
1846 | if (sync_blocks < max_sync) | ||
1847 | max_sync = sync_blocks; | ||
1848 | if (!must_sync && | ||
1849 | !conf->fullsync) { | ||
1850 | /* yep, skip the sync_blocks here, but don't assume | ||
1851 | * that there will never be anything to do here | ||
1852 | */ | ||
1853 | chunks_skipped = -1; | ||
1854 | continue; | ||
1855 | } | ||
1856 | 1839 | ||
1857 | r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); | 1840 | if (conf->mirrors[i].rdev == NULL || |
1858 | raise_barrier(conf, rb2 != NULL); | 1841 | test_bit(In_sync, &conf->mirrors[i].rdev->flags)) |
1859 | atomic_set(&r10_bio->remaining, 0); | 1842 | continue; |
1860 | 1843 | ||
1861 | r10_bio->master_bio = (struct bio*)rb2; | 1844 | still_degraded = 0; |
1862 | if (rb2) | 1845 | /* want to reconstruct this device */ |
1863 | atomic_inc(&rb2->remaining); | 1846 | rb2 = r10_bio; |
1864 | r10_bio->mddev = mddev; | 1847 | sect = raid10_find_virt(conf, sector_nr, i); |
1865 | set_bit(R10BIO_IsRecover, &r10_bio->state); | 1848 | /* Unless we are doing a full sync, we only need |
1866 | r10_bio->sector = sect; | 1849 | * to recover the block if it is set in the bitmap |
1850 | */ | ||
1851 | must_sync = bitmap_start_sync(mddev->bitmap, sect, | ||
1852 | &sync_blocks, 1); | ||
1853 | if (sync_blocks < max_sync) | ||
1854 | max_sync = sync_blocks; | ||
1855 | if (!must_sync && | ||
1856 | !conf->fullsync) { | ||
1857 | /* yep, skip the sync_blocks here, but don't assume | ||
1858 | * that there will never be anything to do here | ||
1859 | */ | ||
1860 | chunks_skipped = -1; | ||
1861 | continue; | ||
1862 | } | ||
1867 | 1863 | ||
1868 | raid10_find_phys(conf, r10_bio); | 1864 | r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); |
1865 | raise_barrier(conf, rb2 != NULL); | ||
1866 | atomic_set(&r10_bio->remaining, 0); | ||
1869 | 1867 | ||
1870 | /* Need to check if the array will still be | 1868 | r10_bio->master_bio = (struct bio*)rb2; |
1871 | * degraded | 1869 | if (rb2) |
1872 | */ | 1870 | atomic_inc(&rb2->remaining); |
1873 | for (j=0; j<conf->raid_disks; j++) | 1871 | r10_bio->mddev = mddev; |
1874 | if (conf->mirrors[j].rdev == NULL || | 1872 | set_bit(R10BIO_IsRecover, &r10_bio->state); |
1875 | test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { | 1873 | r10_bio->sector = sect; |
1876 | still_degraded = 1; | ||
1877 | break; | ||
1878 | } | ||
1879 | |||
1880 | must_sync = bitmap_start_sync(mddev->bitmap, sect, | ||
1881 | &sync_blocks, still_degraded); | ||
1882 | |||
1883 | for (j=0; j<conf->copies;j++) { | ||
1884 | int d = r10_bio->devs[j].devnum; | ||
1885 | if (conf->mirrors[d].rdev && | ||
1886 | test_bit(In_sync, &conf->mirrors[d].rdev->flags)) { | ||
1887 | /* This is where we read from */ | ||
1888 | bio = r10_bio->devs[0].bio; | ||
1889 | bio->bi_next = biolist; | ||
1890 | biolist = bio; | ||
1891 | bio->bi_private = r10_bio; | ||
1892 | bio->bi_end_io = end_sync_read; | ||
1893 | bio->bi_rw = READ; | ||
1894 | bio->bi_sector = r10_bio->devs[j].addr + | ||
1895 | conf->mirrors[d].rdev->data_offset; | ||
1896 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; | ||
1897 | atomic_inc(&conf->mirrors[d].rdev->nr_pending); | ||
1898 | atomic_inc(&r10_bio->remaining); | ||
1899 | /* and we write to 'i' */ | ||
1900 | |||
1901 | for (k=0; k<conf->copies; k++) | ||
1902 | if (r10_bio->devs[k].devnum == i) | ||
1903 | break; | ||
1904 | BUG_ON(k == conf->copies); | ||
1905 | bio = r10_bio->devs[1].bio; | ||
1906 | bio->bi_next = biolist; | ||
1907 | biolist = bio; | ||
1908 | bio->bi_private = r10_bio; | ||
1909 | bio->bi_end_io = end_sync_write; | ||
1910 | bio->bi_rw = WRITE; | ||
1911 | bio->bi_sector = r10_bio->devs[k].addr + | ||
1912 | conf->mirrors[i].rdev->data_offset; | ||
1913 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; | ||
1914 | |||
1915 | r10_bio->devs[0].devnum = d; | ||
1916 | r10_bio->devs[1].devnum = i; | ||
1917 | 1874 | ||
1918 | break; | 1875 | raid10_find_phys(conf, r10_bio); |
1919 | } | 1876 | |
1920 | } | 1877 | /* Need to check if the array will still be |
1921 | if (j == conf->copies) { | 1878 | * degraded |
1922 | /* Cannot recover, so abort the recovery */ | 1879 | */ |
1923 | put_buf(r10_bio); | 1880 | for (j=0; j<conf->raid_disks; j++) |
1924 | if (rb2) | 1881 | if (conf->mirrors[j].rdev == NULL || |
1925 | atomic_dec(&rb2->remaining); | 1882 | test_bit(Faulty, &conf->mirrors[j].rdev->flags)) { |
1926 | r10_bio = rb2; | 1883 | still_degraded = 1; |
1927 | if (!test_and_set_bit(MD_RECOVERY_INTR, | ||
1928 | &mddev->recovery)) | ||
1929 | printk(KERN_INFO "md/raid10:%s: insufficient " | ||
1930 | "working devices for recovery.\n", | ||
1931 | mdname(mddev)); | ||
1932 | break; | 1884 | break; |
1933 | } | 1885 | } |
1886 | |||
1887 | must_sync = bitmap_start_sync(mddev->bitmap, sect, | ||
1888 | &sync_blocks, still_degraded); | ||
1889 | |||
1890 | for (j=0; j<conf->copies;j++) { | ||
1891 | int d = r10_bio->devs[j].devnum; | ||
1892 | if (!conf->mirrors[d].rdev || | ||
1893 | !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) | ||
1894 | continue; | ||
1895 | /* This is where we read from */ | ||
1896 | bio = r10_bio->devs[0].bio; | ||
1897 | bio->bi_next = biolist; | ||
1898 | biolist = bio; | ||
1899 | bio->bi_private = r10_bio; | ||
1900 | bio->bi_end_io = end_sync_read; | ||
1901 | bio->bi_rw = READ; | ||
1902 | bio->bi_sector = r10_bio->devs[j].addr + | ||
1903 | conf->mirrors[d].rdev->data_offset; | ||
1904 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; | ||
1905 | atomic_inc(&conf->mirrors[d].rdev->nr_pending); | ||
1906 | atomic_inc(&r10_bio->remaining); | ||
1907 | /* and we write to 'i' */ | ||
1908 | |||
1909 | for (k=0; k<conf->copies; k++) | ||
1910 | if (r10_bio->devs[k].devnum == i) | ||
1911 | break; | ||
1912 | BUG_ON(k == conf->copies); | ||
1913 | bio = r10_bio->devs[1].bio; | ||
1914 | bio->bi_next = biolist; | ||
1915 | biolist = bio; | ||
1916 | bio->bi_private = r10_bio; | ||
1917 | bio->bi_end_io = end_sync_write; | ||
1918 | bio->bi_rw = WRITE; | ||
1919 | bio->bi_sector = r10_bio->devs[k].addr + | ||
1920 | conf->mirrors[i].rdev->data_offset; | ||
1921 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; | ||
1922 | |||
1923 | r10_bio->devs[0].devnum = d; | ||
1924 | r10_bio->devs[1].devnum = i; | ||
1925 | |||
1926 | break; | ||
1927 | } | ||
1928 | if (j == conf->copies) { | ||
1929 | /* Cannot recover, so abort the recovery */ | ||
1930 | put_buf(r10_bio); | ||
1931 | if (rb2) | ||
1932 | atomic_dec(&rb2->remaining); | ||
1933 | r10_bio = rb2; | ||
1934 | if (!test_and_set_bit(MD_RECOVERY_INTR, | ||
1935 | &mddev->recovery)) | ||
1936 | printk(KERN_INFO "md/raid10:%s: insufficient " | ||
1937 | "working devices for recovery.\n", | ||
1938 | mdname(mddev)); | ||
1939 | break; | ||
1934 | } | 1940 | } |
1941 | } | ||
1935 | if (biolist == NULL) { | 1942 | if (biolist == NULL) { |
1936 | while (r10_bio) { | 1943 | while (r10_bio) { |
1937 | r10bio_t *rb2 = r10_bio; | 1944 | r10bio_t *rb2 = r10_bio; |
@@ -1949,7 +1956,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1949 | 1956 | ||
1950 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, | 1957 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, |
1951 | &sync_blocks, mddev->degraded) && | 1958 | &sync_blocks, mddev->degraded) && |
1952 | !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 1959 | !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, |
1960 | &mddev->recovery)) { | ||
1953 | /* We can skip this block */ | 1961 | /* We can skip this block */ |
1954 | *skipped = 1; | 1962 | *skipped = 1; |
1955 | return sync_blocks + sectors_skipped; | 1963 | return sync_blocks + sectors_skipped; |
@@ -1994,7 +2002,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1994 | for (i=0; i<conf->copies; i++) { | 2002 | for (i=0; i<conf->copies; i++) { |
1995 | int d = r10_bio->devs[i].devnum; | 2003 | int d = r10_bio->devs[i].devnum; |
1996 | if (r10_bio->devs[i].bio->bi_end_io) | 2004 | if (r10_bio->devs[i].bio->bi_end_io) |
1997 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | 2005 | rdev_dec_pending(conf->mirrors[d].rdev, |
2006 | mddev); | ||
1998 | } | 2007 | } |
1999 | put_buf(r10_bio); | 2008 | put_buf(r10_bio); |
2000 | biolist = NULL; | 2009 | biolist = NULL; |
@@ -2024,19 +2033,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2024 | if (len == 0) | 2033 | if (len == 0) |
2025 | break; | 2034 | break; |
2026 | for (bio= biolist ; bio ; bio=bio->bi_next) { | 2035 | for (bio= biolist ; bio ; bio=bio->bi_next) { |
2036 | struct bio *bio2; | ||
2027 | page = bio->bi_io_vec[bio->bi_vcnt].bv_page; | 2037 | page = bio->bi_io_vec[bio->bi_vcnt].bv_page; |
2028 | if (bio_add_page(bio, page, len, 0) == 0) { | 2038 | if (bio_add_page(bio, page, len, 0)) |
2029 | /* stop here */ | 2039 | continue; |
2030 | struct bio *bio2; | 2040 | |
2031 | bio->bi_io_vec[bio->bi_vcnt].bv_page = page; | 2041 | /* stop here */ |
2032 | for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) { | 2042 | bio->bi_io_vec[bio->bi_vcnt].bv_page = page; |
2033 | /* remove last page from this bio */ | 2043 | for (bio2 = biolist; |
2034 | bio2->bi_vcnt--; | 2044 | bio2 && bio2 != bio; |
2035 | bio2->bi_size -= len; | 2045 | bio2 = bio2->bi_next) { |
2036 | bio2->bi_flags &= ~(1<< BIO_SEG_VALID); | 2046 | /* remove last page from this bio */ |
2037 | } | 2047 | bio2->bi_vcnt--; |
2038 | goto bio_full; | 2048 | bio2->bi_size -= len; |
2049 | bio2->bi_flags &= ~(1<< BIO_SEG_VALID); | ||
2039 | } | 2050 | } |
2051 | goto bio_full; | ||
2040 | } | 2052 | } |
2041 | nr_sectors += len>>9; | 2053 | nr_sectors += len>>9; |
2042 | sector_nr += len>>9; | 2054 | sector_nr += len>>9; |