aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c62
1 files changed, 34 insertions, 28 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 750550c1166f..ae12ceafe10c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
188 188
189static void free_r10bio(r10bio_t *r10_bio) 189static void free_r10bio(r10bio_t *r10_bio)
190{ 190{
191 conf_t *conf = mddev_to_conf(r10_bio->mddev); 191 conf_t *conf = r10_bio->mddev->private;
192 192
193 /* 193 /*
194 * Wake up any possible resync thread that waits for the device 194 * Wake up any possible resync thread that waits for the device
@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio)
202 202
203static void put_buf(r10bio_t *r10_bio) 203static void put_buf(r10bio_t *r10_bio)
204{ 204{
205 conf_t *conf = mddev_to_conf(r10_bio->mddev); 205 conf_t *conf = r10_bio->mddev->private;
206 206
207 mempool_free(r10_bio, conf->r10buf_pool); 207 mempool_free(r10_bio, conf->r10buf_pool);
208 208
@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
213{ 213{
214 unsigned long flags; 214 unsigned long flags;
215 mddev_t *mddev = r10_bio->mddev; 215 mddev_t *mddev = r10_bio->mddev;
216 conf_t *conf = mddev_to_conf(mddev); 216 conf_t *conf = mddev->private;
217 217
218 spin_lock_irqsave(&conf->device_lock, flags); 218 spin_lock_irqsave(&conf->device_lock, flags);
219 list_add(&r10_bio->retry_list, &conf->retry_list); 219 list_add(&r10_bio->retry_list, &conf->retry_list);
@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
245 */ 245 */
246static inline void update_head_pos(int slot, r10bio_t *r10_bio) 246static inline void update_head_pos(int slot, r10bio_t *r10_bio)
247{ 247{
248 conf_t *conf = mddev_to_conf(r10_bio->mddev); 248 conf_t *conf = r10_bio->mddev->private;
249 249
250 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 250 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
251 r10_bio->devs[slot].addr + (r10_bio->sectors); 251 r10_bio->devs[slot].addr + (r10_bio->sectors);
@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
256 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 256 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
257 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 257 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
258 int slot, dev; 258 int slot, dev;
259 conf_t *conf = mddev_to_conf(r10_bio->mddev); 259 conf_t *conf = r10_bio->mddev->private;
260 260
261 261
262 slot = r10_bio->read_slot; 262 slot = r10_bio->read_slot;
@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
297 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 297 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
298 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 298 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
299 int slot, dev; 299 int slot, dev;
300 conf_t *conf = mddev_to_conf(r10_bio->mddev); 300 conf_t *conf = r10_bio->mddev->private;
301 301
302 for (slot = 0; slot < conf->copies; slot++) 302 for (slot = 0; slot < conf->copies; slot++)
303 if (r10_bio->devs[slot].bio == bio) 303 if (r10_bio->devs[slot].bio == bio)
@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
461 mddev_t *mddev = q->queuedata; 461 mddev_t *mddev = q->queuedata;
462 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 462 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
463 int max; 463 int max;
464 unsigned int chunk_sectors = mddev->chunk_size >> 9; 464 unsigned int chunk_sectors = mddev->chunk_sectors;
465 unsigned int bio_sectors = bvm->bi_size >> 9; 465 unsigned int bio_sectors = bvm->bi_size >> 9;
466 466
467 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 467 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -596,7 +596,7 @@ rb_out:
596 596
597static void unplug_slaves(mddev_t *mddev) 597static void unplug_slaves(mddev_t *mddev)
598{ 598{
599 conf_t *conf = mddev_to_conf(mddev); 599 conf_t *conf = mddev->private;
600 int i; 600 int i;
601 601
602 rcu_read_lock(); 602 rcu_read_lock();
@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q)
628static int raid10_congested(void *data, int bits) 628static int raid10_congested(void *data, int bits)
629{ 629{
630 mddev_t *mddev = data; 630 mddev_t *mddev = data;
631 conf_t *conf = mddev_to_conf(mddev); 631 conf_t *conf = mddev->private;
632 int i, ret = 0; 632 int i, ret = 0;
633 633
634 rcu_read_lock(); 634 rcu_read_lock();
@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf)
788static int make_request(struct request_queue *q, struct bio * bio) 788static int make_request(struct request_queue *q, struct bio * bio)
789{ 789{
790 mddev_t *mddev = q->queuedata; 790 mddev_t *mddev = q->queuedata;
791 conf_t *conf = mddev_to_conf(mddev); 791 conf_t *conf = mddev->private;
792 mirror_info_t *mirror; 792 mirror_info_t *mirror;
793 r10bio_t *r10_bio; 793 r10bio_t *r10_bio;
794 struct bio *read_bio; 794 struct bio *read_bio;
@@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
981 981
982static void status(struct seq_file *seq, mddev_t *mddev) 982static void status(struct seq_file *seq, mddev_t *mddev)
983{ 983{
984 conf_t *conf = mddev_to_conf(mddev); 984 conf_t *conf = mddev->private;
985 int i; 985 int i;
986 986
987 if (conf->near_copies < conf->raid_disks) 987 if (conf->near_copies < conf->raid_disks)
988 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); 988 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
989 if (conf->near_copies > 1) 989 if (conf->near_copies > 1)
990 seq_printf(seq, " %d near-copies", conf->near_copies); 990 seq_printf(seq, " %d near-copies", conf->near_copies);
991 if (conf->far_copies > 1) { 991 if (conf->far_copies > 1) {
@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
1006static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1006static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1007{ 1007{
1008 char b[BDEVNAME_SIZE]; 1008 char b[BDEVNAME_SIZE];
1009 conf_t *conf = mddev_to_conf(mddev); 1009 conf_t *conf = mddev->private;
1010 1010
1011 /* 1011 /*
1012 * If it is not operational, then we have already marked it as dead 1012 * If it is not operational, then we have already marked it as dead
@@ -1215,7 +1215,7 @@ abort:
1215static void end_sync_read(struct bio *bio, int error) 1215static void end_sync_read(struct bio *bio, int error)
1216{ 1216{
1217 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1217 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1218 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1218 conf_t *conf = r10_bio->mddev->private;
1219 int i,d; 1219 int i,d;
1220 1220
1221 for (i=0; i<conf->copies; i++) 1221 for (i=0; i<conf->copies; i++)
@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error)
1253 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1253 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1254 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1254 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1255 mddev_t *mddev = r10_bio->mddev; 1255 mddev_t *mddev = r10_bio->mddev;
1256 conf_t *conf = mddev_to_conf(mddev); 1256 conf_t *conf = mddev->private;
1257 int i,d; 1257 int i,d;
1258 1258
1259 for (i = 0; i < conf->copies; i++) 1259 for (i = 0; i < conf->copies; i++)
@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error)
1300 */ 1300 */
1301static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1301static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1302{ 1302{
1303 conf_t *conf = mddev_to_conf(mddev); 1303 conf_t *conf = mddev->private;
1304 int i, first; 1304 int i, first;
1305 struct bio *tbio, *fbio; 1305 struct bio *tbio, *fbio;
1306 1306
@@ -1400,7 +1400,7 @@ done:
1400 1400
1401static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1401static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1402{ 1402{
1403 conf_t *conf = mddev_to_conf(mddev); 1403 conf_t *conf = mddev->private;
1404 int i, d; 1404 int i, d;
1405 struct bio *bio, *wbio; 1405 struct bio *bio, *wbio;
1406 1406
@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev)
1549 r10bio_t *r10_bio; 1549 r10bio_t *r10_bio;
1550 struct bio *bio; 1550 struct bio *bio;
1551 unsigned long flags; 1551 unsigned long flags;
1552 conf_t *conf = mddev_to_conf(mddev); 1552 conf_t *conf = mddev->private;
1553 struct list_head *head = &conf->retry_list; 1553 struct list_head *head = &conf->retry_list;
1554 int unplug=0; 1554 int unplug=0;
1555 mdk_rdev_t *rdev; 1555 mdk_rdev_t *rdev;
@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev)
1572 spin_unlock_irqrestore(&conf->device_lock, flags); 1572 spin_unlock_irqrestore(&conf->device_lock, flags);
1573 1573
1574 mddev = r10_bio->mddev; 1574 mddev = r10_bio->mddev;
1575 conf = mddev_to_conf(mddev); 1575 conf = mddev->private;
1576 if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1576 if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1577 sync_request_write(mddev, r10_bio); 1577 sync_request_write(mddev, r10_bio);
1578 unplug = 1; 1578 unplug = 1;
@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf)
1680 1680
1681static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1681static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1682{ 1682{
1683 conf_t *conf = mddev_to_conf(mddev); 1683 conf_t *conf = mddev->private;
1684 r10bio_t *r10_bio; 1684 r10bio_t *r10_bio;
1685 struct bio *biolist = NULL, *bio; 1685 struct bio *biolist = NULL, *bio;
1686 sector_t max_sector, nr_sectors; 1686 sector_t max_sector, nr_sectors;
@@ -2026,7 +2026,7 @@ static sector_t
2026raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) 2026raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2027{ 2027{
2028 sector_t size; 2028 sector_t size;
2029 conf_t *conf = mddev_to_conf(mddev); 2029 conf_t *conf = mddev->private;
2030 2030
2031 if (!raid_disks) 2031 if (!raid_disks)
2032 raid_disks = mddev->raid_disks; 2032 raid_disks = mddev->raid_disks;
@@ -2050,9 +2050,10 @@ static int run(mddev_t *mddev)
2050 int nc, fc, fo; 2050 int nc, fc, fo;
2051 sector_t stride, size; 2051 sector_t stride, size;
2052 2052
2053 if (mddev->chunk_size < PAGE_SIZE) { 2053 if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
2054 !is_power_of_2(mddev->chunk_sectors)) {
2054 printk(KERN_ERR "md/raid10: chunk size must be " 2055 printk(KERN_ERR "md/raid10: chunk size must be "
2055 "at least PAGE_SIZE(%ld).\n", PAGE_SIZE); 2056 "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
2056 return -EINVAL; 2057 return -EINVAL;
2057 } 2058 }
2058 2059
@@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev)
2095 conf->far_copies = fc; 2096 conf->far_copies = fc;
2096 conf->copies = nc*fc; 2097 conf->copies = nc*fc;
2097 conf->far_offset = fo; 2098 conf->far_offset = fo;
2098 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; 2099 conf->chunk_mask = mddev->chunk_sectors - 1;
2099 conf->chunk_shift = ffz(~mddev->chunk_size) - 9; 2100 conf->chunk_shift = ffz(~mddev->chunk_sectors);
2100 size = mddev->dev_sectors >> conf->chunk_shift; 2101 size = mddev->dev_sectors >> conf->chunk_shift;
2101 sector_div(size, fc); 2102 sector_div(size, fc);
2102 size = size * conf->raid_disks; 2103 size = size * conf->raid_disks;
@@ -2185,6 +2186,10 @@ static int run(mddev_t *mddev)
2185 goto out_free_conf; 2186 goto out_free_conf;
2186 } 2187 }
2187 2188
2189 if (mddev->recovery_cp != MaxSector)
2190 printk(KERN_NOTICE "raid10: %s is not clean"
2191 " -- starting background reconstruction\n",
2192 mdname(mddev));
2188 printk(KERN_INFO 2193 printk(KERN_INFO
2189 "raid10: raid set %s active with %d out of %d devices\n", 2194 "raid10: raid set %s active with %d out of %d devices\n",
2190 mdname(mddev), mddev->raid_disks - mddev->degraded, 2195 mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2204,7 +2209,8 @@ static int run(mddev_t *mddev)
2204 * maybe... 2209 * maybe...
2205 */ 2210 */
2206 { 2211 {
2207 int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); 2212 int stripe = conf->raid_disks *
2213 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
2208 stripe /= conf->near_copies; 2214 stripe /= conf->near_copies;
2209 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2215 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2210 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2216 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -2227,7 +2233,7 @@ out:
2227 2233
2228static int stop(mddev_t *mddev) 2234static int stop(mddev_t *mddev)
2229{ 2235{
2230 conf_t *conf = mddev_to_conf(mddev); 2236 conf_t *conf = mddev->private;
2231 2237
2232 raise_barrier(conf, 0); 2238 raise_barrier(conf, 0);
2233 lower_barrier(conf); 2239 lower_barrier(conf);
@@ -2245,7 +2251,7 @@ static int stop(mddev_t *mddev)
2245 2251
2246static void raid10_quiesce(mddev_t *mddev, int state) 2252static void raid10_quiesce(mddev_t *mddev, int state)
2247{ 2253{
2248 conf_t *conf = mddev_to_conf(mddev); 2254 conf_t *conf = mddev->private;
2249 2255
2250 switch(state) { 2256 switch(state) {
2251 case 1: 2257 case 1: