aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/md/raid0.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c65
1 files changed, 44 insertions, 21 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6f7af46d623c..e86bf3682e1e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,21 +25,6 @@
25#include "raid0.h" 25#include "raid0.h"
26#include "raid5.h" 26#include "raid5.h"
27 27
28static void raid0_unplug(struct request_queue *q)
29{
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev->private;
32 mdk_rdev_t **devlist = conf->devlist;
33 int raid_disks = conf->strip_zone[0].nb_dev;
34 int i;
35
36 for (i=0; i < raid_disks; i++) {
37 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38
39 blk_unplug(r_queue);
40 }
41}
42
43static int raid0_congested(void *data, int bits) 28static int raid0_congested(void *data, int bits)
44{ 29{
45 mddev_t *mddev = data; 30 mddev_t *mddev = data;
@@ -179,6 +164,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
179 rdev1->new_raid_disk = j; 164 rdev1->new_raid_disk = j;
180 } 165 }
181 166
167 if (mddev->level == 1) {
168 /* taiking over a raid1 array-
169 * we have only one active disk
170 */
171 j = 0;
172 rdev1->new_raid_disk = j;
173 }
174
182 if (j < 0 || j >= mddev->raid_disks) { 175 if (j < 0 || j >= mddev->raid_disks) {
183 printk(KERN_ERR "md/raid0:%s: bad disk number %d - " 176 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
184 "aborting!\n", mdname(mddev), j); 177 "aborting!\n", mdname(mddev), j);
@@ -264,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
264 mdname(mddev), 257 mdname(mddev),
265 (unsigned long long)smallest->sectors); 258 (unsigned long long)smallest->sectors);
266 } 259 }
267 mddev->queue->unplug_fn = raid0_unplug;
268 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 260 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
269 mddev->queue->backing_dev_info.congested_data = mddev; 261 mddev->queue->backing_dev_info.congested_data = mddev;
270 262
@@ -353,7 +345,6 @@ static int raid0_run(mddev_t *mddev)
353 if (md_check_no_bitmap(mddev)) 345 if (md_check_no_bitmap(mddev))
354 return -EINVAL; 346 return -EINVAL;
355 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 347 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
356 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
357 348
358 /* if private is not null, we are here after takeover */ 349 /* if private is not null, we are here after takeover */
359 if (mddev->private == NULL) { 350 if (mddev->private == NULL) {
@@ -388,8 +379,7 @@ static int raid0_run(mddev_t *mddev)
388 379
389 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 380 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
390 dump_zones(mddev); 381 dump_zones(mddev);
391 md_integrity_register(mddev); 382 return md_integrity_register(mddev);
392 return 0;
393} 383}
394 384
395static int raid0_stop(mddev_t *mddev) 385static int raid0_stop(mddev_t *mddev)
@@ -483,8 +473,8 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
483 struct strip_zone *zone; 473 struct strip_zone *zone;
484 mdk_rdev_t *tmp_dev; 474 mdk_rdev_t *tmp_dev;
485 475
486 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 476 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
487 md_barrier_request(mddev, bio); 477 md_flush_request(mddev, bio);
488 return 0; 478 return 0;
489 } 479 }
490 480
@@ -644,12 +634,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
644 return priv_conf; 634 return priv_conf;
645} 635}
646 636
637static void *raid0_takeover_raid1(mddev_t *mddev)
638{
639 raid0_conf_t *priv_conf;
640
641 /* Check layout:
642 * - (N - 1) mirror drives must be already faulty
643 */
644 if ((mddev->raid_disks - 1) != mddev->degraded) {
645 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
646 mdname(mddev));
647 return ERR_PTR(-EINVAL);
648 }
649
650 /* Set new parameters */
651 mddev->new_level = 0;
652 mddev->new_layout = 0;
653 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
654 mddev->delta_disks = 1 - mddev->raid_disks;
655 mddev->raid_disks = 1;
656 /* make sure it will be not marked as dirty */
657 mddev->recovery_cp = MaxSector;
658
659 create_strip_zones(mddev, &priv_conf);
660 return priv_conf;
661}
662
647static void *raid0_takeover(mddev_t *mddev) 663static void *raid0_takeover(mddev_t *mddev)
648{ 664{
649 /* raid0 can take over: 665 /* raid0 can take over:
650 * raid4 - if all data disks are active. 666 * raid4 - if all data disks are active.
651 * raid5 - providing it is Raid4 layout and one disk is faulty 667 * raid5 - providing it is Raid4 layout and one disk is faulty
652 * raid10 - assuming we have all necessary active disks 668 * raid10 - assuming we have all necessary active disks
669 * raid1 - with (N -1) mirror drives faulty
653 */ 670 */
654 if (mddev->level == 4) 671 if (mddev->level == 4)
655 return raid0_takeover_raid45(mddev); 672 return raid0_takeover_raid45(mddev);
@@ -665,6 +682,12 @@ static void *raid0_takeover(mddev_t *mddev)
665 if (mddev->level == 10) 682 if (mddev->level == 10)
666 return raid0_takeover_raid10(mddev); 683 return raid0_takeover_raid10(mddev);
667 684
685 if (mddev->level == 1)
686 return raid0_takeover_raid1(mddev);
687
688 printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
689 mddev->level);
690
668 return ERR_PTR(-EINVAL); 691 return ERR_PTR(-EINVAL);
669} 692}
670 693