aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c31
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/linear.c20
-rw-r--r--drivers/md/md.c52
-rw-r--r--drivers/md/multipath.c30
-rw-r--r--drivers/md/raid0.c31
-rw-r--r--drivers/md/raid1.c34
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c31
11 files changed, 53 insertions, 252 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8216a6f75be5..64fee90bb68b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -441,33 +441,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
441 return clone; 441 return clone;
442} 442}
443 443
444static void crypt_free_buffer_pages(struct crypt_config *cc, 444static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
445 struct bio *clone, unsigned int bytes)
446{ 445{
447 unsigned int i, start, end; 446 unsigned int i;
448 struct bio_vec *bv; 447 struct bio_vec *bv;
449 448
450 /* 449 for (i = 0; i < clone->bi_vcnt; i++) {
451 * This is ugly, but Jens Axboe thinks that using bi_idx in the
452 * endio function is too dangerous at the moment, so I calculate the
453 * correct position using bi_vcnt and bi_size.
454 * The bv_offset and bv_len fields might already be modified but we
455 * know that we always allocated whole pages.
456 * A fix to the bi_idx issue in the kernel is in the works, so
457 * we will hopefully be able to revert to the cleaner solution soon.
458 */
459 i = clone->bi_vcnt - 1;
460 bv = bio_iovec_idx(clone, i);
461 end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
462 start = end - bytes;
463
464 start >>= PAGE_SHIFT;
465 if (!clone->bi_size)
466 end = clone->bi_vcnt;
467 else
468 end >>= PAGE_SHIFT;
469
470 for (i = start; i < end; i++) {
471 bv = bio_iovec_idx(clone, i); 450 bv = bio_iovec_idx(clone, i);
472 BUG_ON(!bv->bv_page); 451 BUG_ON(!bv->bv_page);
473 mempool_free(bv->bv_page, cc->page_pool); 452 mempool_free(bv->bv_page, cc->page_pool);
@@ -519,7 +498,7 @@ static void crypt_endio(struct bio *clone, int error)
519 * free the processed pages 498 * free the processed pages
520 */ 499 */
521 if (!read_io) { 500 if (!read_io) {
522 crypt_free_buffer_pages(cc, clone, clone->bi_size); 501 crypt_free_buffer_pages(cc, clone);
523 goto out; 502 goto out;
524 } 503 }
525 504
@@ -608,7 +587,7 @@ static void process_write(struct dm_crypt_io *io)
608 ctx.idx_out = 0; 587 ctx.idx_out = 0;
609 588
610 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 589 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
611 crypt_free_buffer_pages(cc, clone, clone->bi_size); 590 crypt_free_buffer_pages(cc, clone);
612 bio_put(clone); 591 bio_put(clone);
613 dec_pending(io, -EIO); 592 dec_pending(io, -EIO);
614 return; 593 return;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2bcde5798b5a..fbe477bb2c68 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
999 } 999 }
1000} 1000}
1001 1001
1002int dm_table_flush_all(struct dm_table *t)
1003{
1004 struct list_head *d, *devices = dm_table_get_devices(t);
1005 int ret = 0;
1006 unsigned i;
1007
1008 for (i = 0; i < t->num_targets; i++)
1009 if (t->targets[i].type->flush)
1010 t->targets[i].type->flush(&t->targets[i]);
1011
1012 for (d = devices->next; d != devices; d = d->next) {
1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1014 struct request_queue *q = bdev_get_queue(dd->bdev);
1015 int err;
1016
1017 if (!q->issue_flush_fn)
1018 err = -EOPNOTSUPP;
1019 else
1020 err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
1021
1022 if (!ret)
1023 ret = err;
1024 }
1025
1026 return ret;
1027}
1028
1029struct mapped_device *dm_table_get_md(struct dm_table *t) 1002struct mapped_device *dm_table_get_md(struct dm_table *t)
1030{ 1003{
1031 dm_get(t->md); 1004 dm_get(t->md);
@@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
1043EXPORT_SYMBOL(dm_table_put); 1016EXPORT_SYMBOL(dm_table_put);
1044EXPORT_SYMBOL(dm_table_get); 1017EXPORT_SYMBOL(dm_table_get);
1045EXPORT_SYMBOL(dm_table_unplug_all); 1018EXPORT_SYMBOL(dm_table_unplug_all);
1046EXPORT_SYMBOL(dm_table_flush_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 167765c47747..d837d37f6209 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
840 return 0; 840 return 0;
841} 841}
842 842
843static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
844 sector_t *error_sector)
845{
846 struct mapped_device *md = q->queuedata;
847 struct dm_table *map = dm_get_table(md);
848 int ret = -ENXIO;
849
850 if (map) {
851 ret = dm_table_flush_all(map);
852 dm_table_put(map);
853 }
854
855 return ret;
856}
857
858static void dm_unplug_all(struct request_queue *q) 843static void dm_unplug_all(struct request_queue *q)
859{ 844{
860 struct mapped_device *md = q->queuedata; 845 struct mapped_device *md = q->queuedata;
@@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
1003 blk_queue_make_request(md->queue, dm_request); 988 blk_queue_make_request(md->queue, dm_request);
1004 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 989 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1005 md->queue->unplug_fn = dm_unplug_all; 990 md->queue->unplug_fn = dm_unplug_all;
1006 md->queue->issue_flush_fn = dm_flush_all;
1007 991
1008 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 992 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1009 if (!md->io_pool) 993 if (!md->io_pool)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 462ee652a890..4b3faa45277e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
111int dm_table_resume_targets(struct dm_table *t); 111int dm_table_resume_targets(struct dm_table *t);
112int dm_table_any_congested(struct dm_table *t, int bdi_bits); 112int dm_table_any_congested(struct dm_table *t, int bdi_bits);
113void dm_table_unplug_all(struct dm_table *t); 113void dm_table_unplug_all(struct dm_table *t);
114int dm_table_flush_all(struct dm_table *t);
115 114
116/*----------------------------------------------------------------- 115/*-----------------------------------------------------------------
117 * A registry of target types. 116 * A registry of target types.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 550148770bb2..56a11f6c127b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
92 } 92 }
93} 93}
94 94
95static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
96 sector_t *error_sector)
97{
98 mddev_t *mddev = q->queuedata;
99 linear_conf_t *conf = mddev_to_conf(mddev);
100 int i, ret = 0;
101
102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {
103 struct block_device *bdev = conf->disks[i].rdev->bdev;
104 struct request_queue *r_queue = bdev_get_queue(bdev);
105
106 if (!r_queue->issue_flush_fn)
107 ret = -EOPNOTSUPP;
108 else
109 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
110 }
111 return ret;
112}
113
114static int linear_congested(void *data, int bits) 95static int linear_congested(void *data, int bits)
115{ 96{
116 mddev_t *mddev = data; 97 mddev_t *mddev = data;
@@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
279 260
280 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 261 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
281 mddev->queue->unplug_fn = linear_unplug; 262 mddev->queue->unplug_fn = linear_unplug;
282 mddev->queue->issue_flush_fn = linear_issue_flush;
283 mddev->queue->backing_dev_info.congested_fn = linear_congested; 263 mddev->queue->backing_dev_info.congested_fn = linear_congested;
284 mddev->queue->backing_dev_info.congested_data = mddev; 264 mddev->queue->backing_dev_info.congested_data = mddev;
285 return 0; 265 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index acf1b81b47cb..c059ae6f37e5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2714,7 +2714,7 @@ action_show(mddev_t *mddev, char *page)
2714{ 2714{
2715 char *type = "idle"; 2715 char *type = "idle";
2716 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2716 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2717 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2717 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
2718 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2718 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2719 type = "reshape"; 2719 type = "reshape";
2720 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2720 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -2833,6 +2833,12 @@ sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2833static struct md_sysfs_entry md_sync_max = 2833static struct md_sysfs_entry md_sync_max =
2834__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 2834__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2835 2835
2836static ssize_t
2837degraded_show(mddev_t *mddev, char *page)
2838{
2839 return sprintf(page, "%d\n", mddev->degraded);
2840}
2841static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
2836 2842
2837static ssize_t 2843static ssize_t
2838sync_speed_show(mddev_t *mddev, char *page) 2844sync_speed_show(mddev_t *mddev, char *page)
@@ -2976,6 +2982,7 @@ static struct attribute *md_redundancy_attrs[] = {
2976 &md_suspend_lo.attr, 2982 &md_suspend_lo.attr,
2977 &md_suspend_hi.attr, 2983 &md_suspend_hi.attr,
2978 &md_bitmap.attr, 2984 &md_bitmap.attr,
2985 &md_degraded.attr,
2979 NULL, 2986 NULL,
2980}; 2987};
2981static struct attribute_group md_redundancy_group = { 2988static struct attribute_group md_redundancy_group = {
@@ -3463,7 +3470,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3463 mddev->pers->stop(mddev); 3470 mddev->pers->stop(mddev);
3464 mddev->queue->merge_bvec_fn = NULL; 3471 mddev->queue->merge_bvec_fn = NULL;
3465 mddev->queue->unplug_fn = NULL; 3472 mddev->queue->unplug_fn = NULL;
3466 mddev->queue->issue_flush_fn = NULL;
3467 mddev->queue->backing_dev_info.congested_fn = NULL; 3473 mddev->queue->backing_dev_info.congested_fn = NULL;
3468 if (mddev->pers->sync_request) 3474 if (mddev->pers->sync_request)
3469 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 3475 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
@@ -5771,26 +5777,47 @@ static int __init md_init(void)
5771 * Searches all registered partitions for autorun RAID arrays 5777 * Searches all registered partitions for autorun RAID arrays
5772 * at boot time. 5778 * at boot time.
5773 */ 5779 */
5774static dev_t detected_devices[128]; 5780
5775static int dev_cnt; 5781static LIST_HEAD(all_detected_devices);
5782struct detected_devices_node {
5783 struct list_head list;
5784 dev_t dev;
5785};
5776 5786
5777void md_autodetect_dev(dev_t dev) 5787void md_autodetect_dev(dev_t dev)
5778{ 5788{
5779 if (dev_cnt >= 0 && dev_cnt < 127) 5789 struct detected_devices_node *node_detected_dev;
5780 detected_devices[dev_cnt++] = dev; 5790
5791 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
5792 if (node_detected_dev) {
5793 node_detected_dev->dev = dev;
5794 list_add_tail(&node_detected_dev->list, &all_detected_devices);
5795 } else {
5796 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
5797 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
5798 }
5781} 5799}
5782 5800
5783 5801
5784static void autostart_arrays(int part) 5802static void autostart_arrays(int part)
5785{ 5803{
5786 mdk_rdev_t *rdev; 5804 mdk_rdev_t *rdev;
5787 int i; 5805 struct detected_devices_node *node_detected_dev;
5806 dev_t dev;
5807 int i_scanned, i_passed;
5788 5808
5789 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 5809 i_scanned = 0;
5810 i_passed = 0;
5790 5811
5791 for (i = 0; i < dev_cnt; i++) { 5812 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5792 dev_t dev = detected_devices[i];
5793 5813
5814 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
5815 i_scanned++;
5816 node_detected_dev = list_entry(all_detected_devices.next,
5817 struct detected_devices_node, list);
5818 list_del(&node_detected_dev->list);
5819 dev = node_detected_dev->dev;
5820 kfree(node_detected_dev);
5794 rdev = md_import_device(dev,0, 90); 5821 rdev = md_import_device(dev,0, 90);
5795 if (IS_ERR(rdev)) 5822 if (IS_ERR(rdev))
5796 continue; 5823 continue;
@@ -5800,8 +5827,11 @@ static void autostart_arrays(int part)
5800 continue; 5827 continue;
5801 } 5828 }
5802 list_add(&rdev->same_set, &pending_raid_disks); 5829 list_add(&rdev->same_set, &pending_raid_disks);
5830 i_passed++;
5803 } 5831 }
5804 dev_cnt = 0; 5832
5833 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
5834 i_scanned, i_passed);
5805 5835
5806 autorun_devices(part); 5836 autorun_devices(part);
5807} 5837}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index f2a63f394ad9..b35731cceac6 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
194 seq_printf (seq, "]"); 194 seq_printf (seq, "]");
195} 195}
196 196
197static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
198 sector_t *error_sector)
199{
200 mddev_t *mddev = q->queuedata;
201 multipath_conf_t *conf = mddev_to_conf(mddev);
202 int i, ret = 0;
203
204 rcu_read_lock();
205 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
206 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
207 if (rdev && !test_bit(Faulty, &rdev->flags)) {
208 struct block_device *bdev = rdev->bdev;
209 struct request_queue *r_queue = bdev_get_queue(bdev);
210
211 if (!r_queue->issue_flush_fn)
212 ret = -EOPNOTSUPP;
213 else {
214 atomic_inc(&rdev->nr_pending);
215 rcu_read_unlock();
216 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
217 error_sector);
218 rdev_dec_pending(rdev, mddev);
219 rcu_read_lock();
220 }
221 }
222 }
223 rcu_read_unlock();
224 return ret;
225}
226static int multipath_congested(void *data, int bits) 197static int multipath_congested(void *data, int bits)
227{ 198{
228 mddev_t *mddev = data; 199 mddev_t *mddev = data;
@@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
527 mddev->array_size = mddev->size; 498 mddev->array_size = mddev->size;
528 499
529 mddev->queue->unplug_fn = multipath_unplug; 500 mddev->queue->unplug_fn = multipath_unplug;
530 mddev->queue->issue_flush_fn = multipath_issue_flush;
531 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 501 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
532 mddev->queue->backing_dev_info.congested_data = mddev; 502 mddev->queue->backing_dev_info.congested_data = mddev;
533 503
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ef0da2d84959..c111105fc2dc 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
40 } 40 }
41} 41}
42 42
43static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
44 sector_t *error_sector)
45{
46 mddev_t *mddev = q->queuedata;
47 raid0_conf_t *conf = mddev_to_conf(mddev);
48 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
49 int i, ret = 0;
50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev;
53 struct request_queue *r_queue = bdev_get_queue(bdev);
54
55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP;
57 else
58 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
59 }
60 return ret;
61}
62
63static int raid0_congested(void *data, int bits) 43static int raid0_congested(void *data, int bits)
64{ 44{
65 mddev_t *mddev = data; 45 mddev_t *mddev = data;
@@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
250 230
251 mddev->queue->unplug_fn = raid0_unplug; 231 mddev->queue->unplug_fn = raid0_unplug;
252 232
253 mddev->queue->issue_flush_fn = raid0_issue_flush;
254 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 233 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
255 mddev->queue->backing_dev_info.congested_data = mddev; 234 mddev->queue->backing_dev_info.congested_data = mddev;
256 235
@@ -493,7 +472,7 @@ bad_map:
493 bio_io_error(bio); 472 bio_io_error(bio);
494 return 0; 473 return 0;
495} 474}
496 475
497static void raid0_status (struct seq_file *seq, mddev_t *mddev) 476static void raid0_status (struct seq_file *seq, mddev_t *mddev)
498{ 477{
499#undef MD_DEBUG 478#undef MD_DEBUG
@@ -501,18 +480,18 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
501 int j, k, h; 480 int j, k, h;
502 char b[BDEVNAME_SIZE]; 481 char b[BDEVNAME_SIZE];
503 raid0_conf_t *conf = mddev_to_conf(mddev); 482 raid0_conf_t *conf = mddev_to_conf(mddev);
504 483
505 h = 0; 484 h = 0;
506 for (j = 0; j < conf->nr_strip_zones; j++) { 485 for (j = 0; j < conf->nr_strip_zones; j++) {
507 seq_printf(seq, " z%d", j); 486 seq_printf(seq, " z%d", j);
508 if (conf->hash_table[h] == conf->strip_zone+j) 487 if (conf->hash_table[h] == conf->strip_zone+j)
509 seq_printf("(h%d)", h++); 488 seq_printf(seq, "(h%d)", h++);
510 seq_printf(seq, "=["); 489 seq_printf(seq, "=[");
511 for (k = 0; k < conf->strip_zone[j].nb_dev; k++) 490 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
512 seq_printf (seq, "%s/", bdevname( 491 seq_printf(seq, "%s/", bdevname(
513 conf->strip_zone[j].dev[k]->bdev,b)); 492 conf->strip_zone[j].dev[k]->bdev,b));
514 493
515 seq_printf (seq, "] zo=%d do=%d s=%d\n", 494 seq_printf(seq, "] zo=%d do=%d s=%d\n",
516 conf->strip_zone[j].zone_offset, 495 conf->strip_zone[j].zone_offset,
517 conf->strip_zone[j].dev_offset, 496 conf->strip_zone[j].dev_offset,
518 conf->strip_zone[j].size); 497 conf->strip_zone[j].size);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6d03bea6fa58..16775a0df7f6 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
567 md_wakeup_thread(mddev->thread); 567 md_wakeup_thread(mddev->thread);
568} 568}
569 569
570static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
571 sector_t *error_sector)
572{
573 mddev_t *mddev = q->queuedata;
574 conf_t *conf = mddev_to_conf(mddev);
575 int i, ret = 0;
576
577 rcu_read_lock();
578 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
579 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
580 if (rdev && !test_bit(Faulty, &rdev->flags)) {
581 struct block_device *bdev = rdev->bdev;
582 struct request_queue *r_queue = bdev_get_queue(bdev);
583
584 if (!r_queue->issue_flush_fn)
585 ret = -EOPNOTSUPP;
586 else {
587 atomic_inc(&rdev->nr_pending);
588 rcu_read_unlock();
589 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
590 error_sector);
591 rdev_dec_pending(rdev, mddev);
592 rcu_read_lock();
593 }
594 }
595 }
596 rcu_read_unlock();
597 return ret;
598}
599
600static int raid1_congested(void *data, int bits) 570static int raid1_congested(void *data, int bits)
601{ 571{
602 mddev_t *mddev = data; 572 mddev_t *mddev = data;
@@ -1244,7 +1214,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1244 j = 0; 1214 j = 0;
1245 if (j >= 0) 1215 if (j >= 0)
1246 mddev->resync_mismatches += r1_bio->sectors; 1216 mddev->resync_mismatches += r1_bio->sectors;
1247 if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 1217 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1218 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1248 sbio->bi_end_io = NULL; 1219 sbio->bi_end_io = NULL;
1249 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 1220 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1250 } else { 1221 } else {
@@ -1997,7 +1968,6 @@ static int run(mddev_t *mddev)
1997 mddev->array_size = mddev->size; 1968 mddev->array_size = mddev->size;
1998 1969
1999 mddev->queue->unplug_fn = raid1_unplug; 1970 mddev->queue->unplug_fn = raid1_unplug;
2000 mddev->queue->issue_flush_fn = raid1_issue_flush;
2001 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 1971 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2002 mddev->queue->backing_dev_info.congested_data = mddev; 1972 mddev->queue->backing_dev_info.congested_data = mddev;
2003 1973
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 25a96c42bdb0..fc6607acb6e4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
611 md_wakeup_thread(mddev->thread); 611 md_wakeup_thread(mddev->thread);
612} 612}
613 613
614static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
615 sector_t *error_sector)
616{
617 mddev_t *mddev = q->queuedata;
618 conf_t *conf = mddev_to_conf(mddev);
619 int i, ret = 0;
620
621 rcu_read_lock();
622 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
623 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
624 if (rdev && !test_bit(Faulty, &rdev->flags)) {
625 struct block_device *bdev = rdev->bdev;
626 struct request_queue *r_queue = bdev_get_queue(bdev);
627
628 if (!r_queue->issue_flush_fn)
629 ret = -EOPNOTSUPP;
630 else {
631 atomic_inc(&rdev->nr_pending);
632 rcu_read_unlock();
633 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
634 error_sector);
635 rdev_dec_pending(rdev, mddev);
636 rcu_read_lock();
637 }
638 }
639 }
640 rcu_read_unlock();
641 return ret;
642}
643
644static int raid10_congested(void *data, int bits) 614static int raid10_congested(void *data, int bits)
645{ 615{
646 mddev_t *mddev = data; 616 mddev_t *mddev = data;
@@ -2118,7 +2088,6 @@ static int run(mddev_t *mddev)
2118 mddev->resync_max_sectors = size << conf->chunk_shift; 2088 mddev->resync_max_sectors = size << conf->chunk_shift;
2119 2089
2120 mddev->queue->unplug_fn = raid10_unplug; 2090 mddev->queue->unplug_fn = raid10_unplug;
2121 mddev->queue->issue_flush_fn = raid10_issue_flush;
2122 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2091 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2123 mddev->queue->backing_dev_info.congested_data = mddev; 2092 mddev->queue->backing_dev_info.congested_data = mddev;
2124 2093
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index caaca9e178bc..8ee181a01f52 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3204,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
3204 unplug_slaves(mddev); 3204 unplug_slaves(mddev);
3205} 3205}
3206 3206
3207static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
3208 sector_t *error_sector)
3209{
3210 mddev_t *mddev = q->queuedata;
3211 raid5_conf_t *conf = mddev_to_conf(mddev);
3212 int i, ret = 0;
3213
3214 rcu_read_lock();
3215 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
3216 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3217 if (rdev && !test_bit(Faulty, &rdev->flags)) {
3218 struct block_device *bdev = rdev->bdev;
3219 struct request_queue *r_queue = bdev_get_queue(bdev);
3220
3221 if (!r_queue->issue_flush_fn)
3222 ret = -EOPNOTSUPP;
3223 else {
3224 atomic_inc(&rdev->nr_pending);
3225 rcu_read_unlock();
3226 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
3227 error_sector);
3228 rdev_dec_pending(rdev, mddev);
3229 rcu_read_lock();
3230 }
3231 }
3232 }
3233 rcu_read_unlock();
3234 return ret;
3235}
3236
3237static int raid5_congested(void *data, int bits) 3207static int raid5_congested(void *data, int bits)
3238{ 3208{
3239 mddev_t *mddev = data; 3209 mddev_t *mddev = data;
@@ -4263,7 +4233,6 @@ static int run(mddev_t *mddev)
4263 mdname(mddev)); 4233 mdname(mddev));
4264 4234
4265 mddev->queue->unplug_fn = raid5_unplug_device; 4235 mddev->queue->unplug_fn = raid5_unplug_device;
4266 mddev->queue->issue_flush_fn = raid5_issue_flush;
4267 mddev->queue->backing_dev_info.congested_data = mddev; 4236 mddev->queue->backing_dev_info.congested_data = mddev;
4268 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4237 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4269 4238