aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorYuri Tikhonov <yur@emcraft.com>2009-08-29 22:13:13 -0400
committerDan Williams <dan.j.williams@intel.com>2009-08-29 22:13:13 -0400
commitb774ef491b4edf6876077014ecbb87f10c69c10f (patch)
treea4132f50e29b43c0b23e478001220b3e90ea4731 /drivers/md
parent6c0069c0ae9659e3a91b68eaed06a5c6c37f45c8 (diff)
md/raid6: remove synchronous infrastructure
These routines have been replaced by there asynchronous counterparts. Signed-off-by: Yuri Tikhonov <yur@emcraft.com> Signed-off-by: Ilya Yanok <yanok@emcraft.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c254
1 files changed, 0 insertions, 254 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a833de189ca6..7c22e19aca82 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1927,253 +1927,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1927} 1927}
1928 1928
1929 1929
1930
1931/*
1932 * Copy data between a page in the stripe cache, and one or more bion
1933 * The page could align with the middle of the bio, or there could be
1934 * several bion, each with several bio_vecs, which cover part of the page
1935 * Multiple bion are linked together on bi_next. There may be extras
1936 * at the end of this list. We ignore them.
1937 */
1938static void copy_data(int frombio, struct bio *bio,
1939 struct page *page,
1940 sector_t sector)
1941{
1942 char *pa = page_address(page);
1943 struct bio_vec *bvl;
1944 int i;
1945 int page_offset;
1946
1947 if (bio->bi_sector >= sector)
1948 page_offset = (signed)(bio->bi_sector - sector) * 512;
1949 else
1950 page_offset = (signed)(sector - bio->bi_sector) * -512;
1951 bio_for_each_segment(bvl, bio, i) {
1952 int len = bio_iovec_idx(bio,i)->bv_len;
1953 int clen;
1954 int b_offset = 0;
1955
1956 if (page_offset < 0) {
1957 b_offset = -page_offset;
1958 page_offset += b_offset;
1959 len -= b_offset;
1960 }
1961
1962 if (len > 0 && page_offset + len > STRIPE_SIZE)
1963 clen = STRIPE_SIZE - page_offset;
1964 else clen = len;
1965
1966 if (clen > 0) {
1967 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
1968 if (frombio)
1969 memcpy(pa+page_offset, ba+b_offset, clen);
1970 else
1971 memcpy(ba+b_offset, pa+page_offset, clen);
1972 __bio_kunmap_atomic(ba, KM_USER0);
1973 }
1974 if (clen < len) /* hit end of page */
1975 break;
1976 page_offset += len;
1977 }
1978}
1979
1980#define check_xor() do { \
1981 if (count == MAX_XOR_BLOCKS) { \
1982 xor_blocks(count, STRIPE_SIZE, dest, ptr);\
1983 count = 0; \
1984 } \
1985 } while(0)
1986
1987static void compute_parity6(struct stripe_head *sh, int method)
1988{
1989 raid5_conf_t *conf = sh->raid_conf;
1990 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
1991 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1992 struct bio *chosen;
1993 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1994 void *ptrs[syndrome_disks+2];
1995
1996 pd_idx = sh->pd_idx;
1997 qd_idx = sh->qd_idx;
1998 d0_idx = raid6_d0(sh);
1999
2000 pr_debug("compute_parity, stripe %llu, method %d\n",
2001 (unsigned long long)sh->sector, method);
2002
2003 switch(method) {
2004 case READ_MODIFY_WRITE:
2005 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
2006 case RECONSTRUCT_WRITE:
2007 for (i= disks; i-- ;)
2008 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
2009 chosen = sh->dev[i].towrite;
2010 sh->dev[i].towrite = NULL;
2011
2012 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2013 wake_up(&conf->wait_for_overlap);
2014
2015 BUG_ON(sh->dev[i].written);
2016 sh->dev[i].written = chosen;
2017 }
2018 break;
2019 case CHECK_PARITY:
2020 BUG(); /* Not implemented yet */
2021 }
2022
2023 for (i = disks; i--;)
2024 if (sh->dev[i].written) {
2025 sector_t sector = sh->dev[i].sector;
2026 struct bio *wbi = sh->dev[i].written;
2027 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
2028 copy_data(1, wbi, sh->dev[i].page, sector);
2029 wbi = r5_next_bio(wbi, sector);
2030 }
2031
2032 set_bit(R5_LOCKED, &sh->dev[i].flags);
2033 set_bit(R5_UPTODATE, &sh->dev[i].flags);
2034 }
2035
2036 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
2037
2038 for (i = 0; i < disks; i++)
2039 ptrs[i] = (void *)raid6_empty_zero_page;
2040
2041 count = 0;
2042 i = d0_idx;
2043 do {
2044 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
2045
2046 ptrs[slot] = page_address(sh->dev[i].page);
2047 if (slot < syndrome_disks &&
2048 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
2049 printk(KERN_ERR "block %d/%d not uptodate "
2050 "on parity calc\n", i, count);
2051 BUG();
2052 }
2053
2054 i = raid6_next_disk(i, disks);
2055 } while (i != d0_idx);
2056 BUG_ON(count != syndrome_disks);
2057
2058 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs);
2059
2060 switch(method) {
2061 case RECONSTRUCT_WRITE:
2062 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2063 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
2064 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2065 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
2066 break;
2067 case UPDATE_PARITY:
2068 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2069 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
2070 break;
2071 }
2072}
2073
2074
2075/* Compute one missing block */
2076static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
2077{
2078 int i, count, disks = sh->disks;
2079 void *ptr[MAX_XOR_BLOCKS], *dest, *p;
2080 int qd_idx = sh->qd_idx;
2081
2082 pr_debug("compute_block_1, stripe %llu, idx %d\n",
2083 (unsigned long long)sh->sector, dd_idx);
2084
2085 if ( dd_idx == qd_idx ) {
2086 /* We're actually computing the Q drive */
2087 compute_parity6(sh, UPDATE_PARITY);
2088 } else {
2089 dest = page_address(sh->dev[dd_idx].page);
2090 if (!nozero) memset(dest, 0, STRIPE_SIZE);
2091 count = 0;
2092 for (i = disks ; i--; ) {
2093 if (i == dd_idx || i == qd_idx)
2094 continue;
2095 p = page_address(sh->dev[i].page);
2096 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
2097 ptr[count++] = p;
2098 else
2099 printk("compute_block() %d, stripe %llu, %d"
2100 " not present\n", dd_idx,
2101 (unsigned long long)sh->sector, i);
2102
2103 check_xor();
2104 }
2105 if (count)
2106 xor_blocks(count, STRIPE_SIZE, dest, ptr);
2107 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
2108 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
2109 }
2110}
2111
2112/* Compute two missing blocks */
2113static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
2114{
2115 int i, count, disks = sh->disks;
2116 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
2117 int d0_idx = raid6_d0(sh);
2118 int faila = -1, failb = -1;
2119 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
2120 void *ptrs[syndrome_disks+2];
2121
2122 for (i = 0; i < disks ; i++)
2123 ptrs[i] = (void *)raid6_empty_zero_page;
2124 count = 0;
2125 i = d0_idx;
2126 do {
2127 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
2128
2129 ptrs[slot] = page_address(sh->dev[i].page);
2130
2131 if (i == dd_idx1)
2132 faila = slot;
2133 if (i == dd_idx2)
2134 failb = slot;
2135 i = raid6_next_disk(i, disks);
2136 } while (i != d0_idx);
2137 BUG_ON(count != syndrome_disks);
2138
2139 BUG_ON(faila == failb);
2140 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
2141
2142 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
2143 (unsigned long long)sh->sector, dd_idx1, dd_idx2,
2144 faila, failb);
2145
2146 if (failb == syndrome_disks+1) {
2147 /* Q disk is one of the missing disks */
2148 if (faila == syndrome_disks) {
2149 /* Missing P+Q, just recompute */
2150 compute_parity6(sh, UPDATE_PARITY);
2151 return;
2152 } else {
2153 /* We're missing D+Q; recompute D from P */
2154 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
2155 dd_idx2 : dd_idx1),
2156 0);
2157 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
2158 return;
2159 }
2160 }
2161
2162 /* We're missing D+P or D+D; */
2163 if (failb == syndrome_disks) {
2164 /* We're missing D+P. */
2165 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs);
2166 } else {
2167 /* We're missing D+D. */
2168 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb,
2169 ptrs);
2170 }
2171
2172 /* Both the above update both missing blocks */
2173 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
2174 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
2175}
2176
2177static void 1930static void
2178schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 1931schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2179 int rcw, int expand) 1932 int rcw, int expand)
@@ -2331,13 +2084,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2331 2084
2332static void end_reshape(raid5_conf_t *conf); 2085static void end_reshape(raid5_conf_t *conf);
2333 2086
2334static int page_is_zero(struct page *p)
2335{
2336 char *a = page_address(p);
2337 return ((*(u32*)a) == 0 &&
2338 memcmp(a, a+4, STRIPE_SIZE-4)==0);
2339}
2340
2341static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, 2087static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2342 struct stripe_head *sh) 2088 struct stripe_head *sh)
2343{ 2089{