aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c26
6 files changed, 24 insertions, 15 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 0b8511776b3e..10748240cb2f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -250,6 +250,7 @@ static int linear_run (mddev_t *mddev)
250{ 250{
251 linear_conf_t *conf; 251 linear_conf_t *conf;
252 252
253 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
253 conf = linear_conf(mddev, mddev->raid_disks); 254 conf = linear_conf(mddev, mddev->raid_disks);
254 255
255 if (!conf) 256 if (!conf)
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 42ee1a2dc144..4f4d1f383842 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -417,6 +417,7 @@ static int multipath_run (mddev_t *mddev)
417 * bookkeeping area. [whatever we allocate in multipath_run(), 417 * bookkeeping area. [whatever we allocate in multipath_run(),
418 * should be freed in multipath_stop()] 418 * should be freed in multipath_stop()]
419 */ 419 */
420 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
420 421
421 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 422 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
422 mddev->private = conf; 423 mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 818b48284096..914c04ddec7c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -280,6 +280,7 @@ static int raid0_run (mddev_t *mddev)
280 (mddev->chunk_size>>1)-1); 280 (mddev->chunk_size>>1)-1);
281 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); 281 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
282 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); 282 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
283 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
283 284
284 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); 285 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
285 if (!conf) 286 if (!conf)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6778b7cb39bd..ac409b7d83f5 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1935,6 +1935,9 @@ static int run(mddev_t *mddev)
1935 if (!conf->r1bio_pool) 1935 if (!conf->r1bio_pool)
1936 goto out_no_mem; 1936 goto out_no_mem;
1937 1937
1938 spin_lock_init(&conf->device_lock);
1939 mddev->queue->queue_lock = &conf->device_lock;
1940
1938 rdev_for_each(rdev, tmp, mddev) { 1941 rdev_for_each(rdev, tmp, mddev) {
1939 disk_idx = rdev->raid_disk; 1942 disk_idx = rdev->raid_disk;
1940 if (disk_idx >= mddev->raid_disks 1943 if (disk_idx >= mddev->raid_disks
@@ -1958,7 +1961,6 @@ static int run(mddev_t *mddev)
1958 } 1961 }
1959 conf->raid_disks = mddev->raid_disks; 1962 conf->raid_disks = mddev->raid_disks;
1960 conf->mddev = mddev; 1963 conf->mddev = mddev;
1961 spin_lock_init(&conf->device_lock);
1962 INIT_LIST_HEAD(&conf->retry_list); 1964 INIT_LIST_HEAD(&conf->retry_list);
1963 1965
1964 spin_lock_init(&conf->resync_lock); 1966 spin_lock_init(&conf->resync_lock);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5938fa962922..8536ede1e712 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -886,7 +886,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
886 */ 886 */
887 raid10_find_phys(conf, r10_bio); 887 raid10_find_phys(conf, r10_bio);
888 retry_write: 888 retry_write:
889 blocked_rdev = 0; 889 blocked_rdev = NULL;
890 rcu_read_lock(); 890 rcu_read_lock();
891 for (i = 0; i < conf->copies; i++) { 891 for (i = 0; i < conf->copies; i++) {
892 int d = r10_bio->devs[i].devnum; 892 int d = r10_bio->devs[i].devnum;
@@ -2082,6 +2082,9 @@ static int run(mddev_t *mddev)
2082 goto out_free_conf; 2082 goto out_free_conf;
2083 } 2083 }
2084 2084
2085 spin_lock_init(&conf->device_lock);
2086 mddev->queue->queue_lock = &conf->device_lock;
2087
2085 rdev_for_each(rdev, tmp, mddev) { 2088 rdev_for_each(rdev, tmp, mddev) {
2086 disk_idx = rdev->raid_disk; 2089 disk_idx = rdev->raid_disk;
2087 if (disk_idx >= mddev->raid_disks 2090 if (disk_idx >= mddev->raid_disks
@@ -2103,7 +2106,6 @@ static int run(mddev_t *mddev)
2103 2106
2104 disk->head_position = 0; 2107 disk->head_position = 0;
2105 } 2108 }
2106 spin_lock_init(&conf->device_lock);
2107 INIT_LIST_HEAD(&conf->retry_list); 2109 INIT_LIST_HEAD(&conf->retry_list);
2108 2110
2109 spin_lock_init(&conf->resync_lock); 2111 spin_lock_init(&conf->resync_lock);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 087eee0cb809..93fde48c0f42 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2369,8 +2369,8 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2369 2369
2370 /* complete a check operation */ 2370 /* complete a check operation */
2371 if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) { 2371 if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
2372 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack); 2372 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
2373 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending); 2373 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
2374 if (s->failed == 0) { 2374 if (s->failed == 0) {
2375 if (sh->ops.zero_sum_result == 0) 2375 if (sh->ops.zero_sum_result == 0)
2376 /* parity is correct (on disc, 2376 /* parity is correct (on disc,
@@ -2400,16 +2400,6 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2400 canceled_check = 1; /* STRIPE_INSYNC is not set */ 2400 canceled_check = 1; /* STRIPE_INSYNC is not set */
2401 } 2401 }
2402 2402
2403 /* check if we can clear a parity disk reconstruct */
2404 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
2405 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
2406
2407 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
2408 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
2409 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
2410 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
2411 }
2412
2413 /* start a new check operation if there are no failures, the stripe is 2403 /* start a new check operation if there are no failures, the stripe is
2414 * not insync, and a repair is not in flight 2404 * not insync, and a repair is not in flight
2415 */ 2405 */
@@ -2424,6 +2414,17 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2424 } 2414 }
2425 } 2415 }
2426 2416
2417 /* check if we can clear a parity disk reconstruct */
2418 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
2419 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
2420
2421 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
2422 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
2423 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
2424 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
2425 }
2426
2427
2427 /* Wait for check parity and compute block operations to complete 2428 /* Wait for check parity and compute block operations to complete
2428 * before write-back. If a failure occurred while the check operation 2429 * before write-back. If a failure occurred while the check operation
2429 * was in flight we need to cycle this stripe through handle_stripe 2430 * was in flight we need to cycle this stripe through handle_stripe
@@ -4256,6 +4257,7 @@ static int run(mddev_t *mddev)
4256 goto abort; 4257 goto abort;
4257 } 4258 }
4258 spin_lock_init(&conf->device_lock); 4259 spin_lock_init(&conf->device_lock);
4260 mddev->queue->queue_lock = &conf->device_lock;
4259 init_waitqueue_head(&conf->wait_for_stripe); 4261 init_waitqueue_head(&conf->wait_for_stripe);
4260 init_waitqueue_head(&conf->wait_for_overlap); 4262 init_waitqueue_head(&conf->wait_for_overlap);
4261 INIT_LIST_HEAD(&conf->handle_list); 4263 INIT_LIST_HEAD(&conf->handle_list);