aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-bio-list.h3
-rw-r--r--drivers/md/dm-ioctl.c3
-rw-r--r--drivers/md/dm-log.c4
-rw-r--r--drivers/md/dm-mpath.c13
-rw-r--r--drivers/md/dm-raid1.c20
-rw-r--r--drivers/md/md.c42
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c9
-rw-r--r--drivers/md/raid6main.c27
11 files changed, 97 insertions, 47 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 51315302a85e..252d55df9642 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -326,9 +326,9 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
326 } 326 }
327 } 327 }
328 328
329 ret = page->mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE); 329 ret = page->mapping->a_ops->prepare_write(bitmap->file, page, 0, PAGE_SIZE);
330 if (!ret) 330 if (!ret)
331 ret = page->mapping->a_ops->commit_write(NULL, page, 0, 331 ret = page->mapping->a_ops->commit_write(bitmap->file, page, 0,
332 PAGE_SIZE); 332 PAGE_SIZE);
333 if (ret) { 333 if (ret) {
334 unlock_page(page); 334 unlock_page(page);
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index bc021e1fd4d1..bbf4615f0e30 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -33,6 +33,9 @@ static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
33 33
34static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 34static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
35{ 35{
36 if (!bl2->head)
37 return;
38
36 if (bl->tail) 39 if (bl->tail)
37 bl->tail->bi_next = bl2->head; 40 bl->tail->bi_next = bl2->head;
38 else 41 else
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 54ec737195e0..07d44e19536e 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -425,8 +425,8 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
425{ 425{
426 size_t *needed = needed_param; 426 size_t *needed = needed_param;
427 427
428 *needed += sizeof(struct dm_target_versions);
428 *needed += strlen(tt->name); 429 *needed += strlen(tt->name);
429 *needed += sizeof(tt->version);
430 *needed += ALIGN_MASK; 430 *needed += ALIGN_MASK;
431} 431}
432 432
@@ -974,6 +974,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
974 if (!hc) { 974 if (!hc) {
975 DMWARN("device doesn't appear to be in the dev hash table."); 975 DMWARN("device doesn't appear to be in the dev hash table.");
976 up_write(&_hash_lock); 976 up_write(&_hash_lock);
977 dm_table_put(t);
977 return -ENXIO; 978 return -ENXIO;
978 } 979 }
979 980
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index e110655eabdb..a76349cb10a5 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -333,10 +333,10 @@ static int core_ctr(struct dirty_log *log, struct dm_target *ti,
333 lc->sync = sync; 333 lc->sync = sync;
334 334
335 /* 335 /*
336 * Work out how many words we need to hold the bitset. 336 * Work out how many "unsigned long"s we need to hold the bitset.
337 */ 337 */
338 bitset_size = dm_round_up(region_count, 338 bitset_size = dm_round_up(region_count,
339 sizeof(*lc->clean_bits) << BYTE_SHIFT); 339 sizeof(unsigned long) << BYTE_SHIFT);
340 bitset_size >>= BYTE_SHIFT; 340 bitset_size >>= BYTE_SHIFT;
341 341
342 lc->bitset_uint32_count = bitset_size / 4; 342 lc->bitset_uint32_count = bitset_size / 4;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f9b7b32d5d5c..f72a82fb9434 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1000,6 +1000,7 @@ static int do_end_io(struct multipath *m, struct bio *bio,
1000{ 1000{
1001 struct hw_handler *hwh = &m->hw_handler; 1001 struct hw_handler *hwh = &m->hw_handler;
1002 unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ 1002 unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
1003 unsigned long flags;
1003 1004
1004 if (!error) 1005 if (!error)
1005 return 0; /* I/O complete */ 1006 return 0; /* I/O complete */
@@ -1010,17 +1011,17 @@ static int do_end_io(struct multipath *m, struct bio *bio,
1010 if (error == -EOPNOTSUPP) 1011 if (error == -EOPNOTSUPP)
1011 return error; 1012 return error;
1012 1013
1013 spin_lock(&m->lock); 1014 spin_lock_irqsave(&m->lock, flags);
1014 if (!m->nr_valid_paths) { 1015 if (!m->nr_valid_paths) {
1015 if (!m->queue_if_no_path) { 1016 if (!m->queue_if_no_path) {
1016 spin_unlock(&m->lock); 1017 spin_unlock_irqrestore(&m->lock, flags);
1017 return -EIO; 1018 return -EIO;
1018 } else { 1019 } else {
1019 spin_unlock(&m->lock); 1020 spin_unlock_irqrestore(&m->lock, flags);
1020 goto requeue; 1021 goto requeue;
1021 } 1022 }
1022 } 1023 }
1023 spin_unlock(&m->lock); 1024 spin_unlock_irqrestore(&m->lock, flags);
1024 1025
1025 if (hwh->type && hwh->type->error) 1026 if (hwh->type && hwh->type->error)
1026 err_flags = hwh->type->error(hwh, bio); 1027 err_flags = hwh->type->error(hwh, bio);
@@ -1040,12 +1041,12 @@ static int do_end_io(struct multipath *m, struct bio *bio,
1040 dm_bio_restore(&mpio->details, bio); 1041 dm_bio_restore(&mpio->details, bio);
1041 1042
1042 /* queue for the daemon to resubmit or fail */ 1043 /* queue for the daemon to resubmit or fail */
1043 spin_lock(&m->lock); 1044 spin_lock_irqsave(&m->lock, flags);
1044 bio_list_add(&m->queued_ios, bio); 1045 bio_list_add(&m->queued_ios, bio);
1045 m->queue_size++; 1046 m->queue_size++;
1046 if (!m->queue_io) 1047 if (!m->queue_io)
1047 queue_work(kmultipathd, &m->process_queued_ios); 1048 queue_work(kmultipathd, &m->process_queued_ios);
1048 spin_unlock(&m->lock); 1049 spin_unlock_irqrestore(&m->lock, flags);
1049 1050
1050 return 1; /* io not complete */ 1051 return 1; /* io not complete */
1051} 1052}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 2375709a392c..6b0fc1670929 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -376,16 +376,18 @@ static void rh_inc(struct region_hash *rh, region_t region)
376 read_lock(&rh->hash_lock); 376 read_lock(&rh->hash_lock);
377 reg = __rh_find(rh, region); 377 reg = __rh_find(rh, region);
378 378
379 spin_lock_irq(&rh->region_lock);
379 atomic_inc(&reg->pending); 380 atomic_inc(&reg->pending);
380 381
381 spin_lock_irq(&rh->region_lock);
382 if (reg->state == RH_CLEAN) { 382 if (reg->state == RH_CLEAN) {
383 rh->log->type->mark_region(rh->log, reg->key);
384
385 reg->state = RH_DIRTY; 383 reg->state = RH_DIRTY;
386 list_del_init(&reg->list); /* take off the clean list */ 384 list_del_init(&reg->list); /* take off the clean list */
387 } 385 spin_unlock_irq(&rh->region_lock);
388 spin_unlock_irq(&rh->region_lock); 386
387 rh->log->type->mark_region(rh->log, reg->key);
388 } else
389 spin_unlock_irq(&rh->region_lock);
390
389 391
390 read_unlock(&rh->hash_lock); 392 read_unlock(&rh->hash_lock);
391} 393}
@@ -408,21 +410,17 @@ static void rh_dec(struct region_hash *rh, region_t region)
408 reg = __rh_lookup(rh, region); 410 reg = __rh_lookup(rh, region);
409 read_unlock(&rh->hash_lock); 411 read_unlock(&rh->hash_lock);
410 412
413 spin_lock_irqsave(&rh->region_lock, flags);
411 if (atomic_dec_and_test(&reg->pending)) { 414 if (atomic_dec_and_test(&reg->pending)) {
412 spin_lock_irqsave(&rh->region_lock, flags);
413 if (atomic_read(&reg->pending)) { /* check race */
414 spin_unlock_irqrestore(&rh->region_lock, flags);
415 return;
416 }
417 if (reg->state == RH_RECOVERING) { 415 if (reg->state == RH_RECOVERING) {
418 list_add_tail(&reg->list, &rh->quiesced_regions); 416 list_add_tail(&reg->list, &rh->quiesced_regions);
419 } else { 417 } else {
420 reg->state = RH_CLEAN; 418 reg->state = RH_CLEAN;
421 list_add(&reg->list, &rh->clean_regions); 419 list_add(&reg->list, &rh->clean_regions);
422 } 420 }
423 spin_unlock_irqrestore(&rh->region_lock, flags);
424 should_wake = 1; 421 should_wake = 1;
425 } 422 }
423 spin_unlock_irqrestore(&rh->region_lock, flags);
426 424
427 if (should_wake) 425 if (should_wake)
428 wake(); 426 wake();
diff --git a/drivers/md/md.c b/drivers/md/md.c
index adf960d8a7c9..8175a2a222da 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1028,7 +1028,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1028 mddev->size = le64_to_cpu(sb->size)/2; 1028 mddev->size = le64_to_cpu(sb->size)/2;
1029 mddev->events = le64_to_cpu(sb->events); 1029 mddev->events = le64_to_cpu(sb->events);
1030 mddev->bitmap_offset = 0; 1030 mddev->bitmap_offset = 0;
1031 mddev->default_bitmap_offset = 0;
1032 mddev->default_bitmap_offset = 1024; 1031 mddev->default_bitmap_offset = 1024;
1033 1032
1034 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1033 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
@@ -1730,7 +1729,7 @@ level_show(mddev_t *mddev, char *page)
1730 if (p == NULL && mddev->raid_disks == 0) 1729 if (p == NULL && mddev->raid_disks == 0)
1731 return 0; 1730 return 0;
1732 if (mddev->level >= 0) 1731 if (mddev->level >= 0)
1733 return sprintf(page, "RAID-%d\n", mddev->level); 1732 return sprintf(page, "raid%d\n", mddev->level);
1734 else 1733 else
1735 return sprintf(page, "%s\n", p->name); 1734 return sprintf(page, "%s\n", p->name);
1736} 1735}
@@ -2932,6 +2931,9 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2932 2931
2933 mddev->sb_dirty = 1; 2932 mddev->sb_dirty = 1;
2934 2933
2934 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
2935 mddev->bitmap_offset = 0;
2936
2935 /* 2937 /*
2936 * Generate a 128 bit UUID 2938 * Generate a 128 bit UUID
2937 */ 2939 */
@@ -3156,7 +3158,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
3156 if (cnt > 0 ) { 3158 if (cnt > 0 ) {
3157 printk(KERN_WARNING 3159 printk(KERN_WARNING
3158 "md: %s(pid %d) used deprecated START_ARRAY ioctl. " 3160 "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
3159 "This will not be supported beyond 2.6\n", 3161 "This will not be supported beyond July 2006\n",
3160 current->comm, current->pid); 3162 current->comm, current->pid);
3161 cnt--; 3163 cnt--;
3162 } 3164 }
@@ -3437,10 +3439,19 @@ static int md_thread(void * arg)
3437 allow_signal(SIGKILL); 3439 allow_signal(SIGKILL);
3438 while (!kthread_should_stop()) { 3440 while (!kthread_should_stop()) {
3439 3441
3440 wait_event_timeout(thread->wqueue, 3442 /* We need to wait INTERRUPTIBLE so that
3441 test_bit(THREAD_WAKEUP, &thread->flags) 3443 * we don't add to the load-average.
3442 || kthread_should_stop(), 3444 * That means we need to be sure no signals are
3443 thread->timeout); 3445 * pending
3446 */
3447 if (signal_pending(current))
3448 flush_signals(current);
3449
3450 wait_event_interruptible_timeout
3451 (thread->wqueue,
3452 test_bit(THREAD_WAKEUP, &thread->flags)
3453 || kthread_should_stop(),
3454 thread->timeout);
3444 try_to_freeze(); 3455 try_to_freeze();
3445 3456
3446 clear_bit(THREAD_WAKEUP, &thread->flags); 3457 clear_bit(THREAD_WAKEUP, &thread->flags);
@@ -3837,11 +3848,20 @@ static int is_mddev_idle(mddev_t *mddev)
3837 curr_events = disk_stat_read(disk, sectors[0]) + 3848 curr_events = disk_stat_read(disk, sectors[0]) +
3838 disk_stat_read(disk, sectors[1]) - 3849 disk_stat_read(disk, sectors[1]) -
3839 atomic_read(&disk->sync_io); 3850 atomic_read(&disk->sync_io);
3840 /* Allow some slack between valud of curr_events and last_events, 3851 /* The difference between curr_events and last_events
3841 * as there are some uninteresting races. 3852 * will be affected by any new non-sync IO (making
3853 * curr_events bigger) and any difference in the amount of
3854 * in-flight syncio (making current_events bigger or smaller)
3855 * The amount in-flight is currently limited to
3856 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
3857 * which is at most 4096 sectors.
3858 * These numbers are fairly fragile and should be made
3859 * more robust, probably by enforcing the
3860 * 'window size' that md_do_sync sort-of uses.
3861 *
3842 * Note: the following is an unsigned comparison. 3862 * Note: the following is an unsigned comparison.
3843 */ 3863 */
3844 if ((curr_events - rdev->last_events + 32) > 64) { 3864 if ((curr_events - rdev->last_events + 4096) > 8192) {
3845 rdev->last_events = curr_events; 3865 rdev->last_events = curr_events;
3846 idle = 0; 3866 idle = 0;
3847 } 3867 }
@@ -4100,7 +4120,7 @@ static void md_do_sync(mddev_t *mddev)
4100 if (currspeed > sysctl_speed_limit_min) { 4120 if (currspeed > sysctl_speed_limit_min) {
4101 if ((currspeed > sysctl_speed_limit_max) || 4121 if ((currspeed > sysctl_speed_limit_max) ||
4102 !is_mddev_idle(mddev)) { 4122 !is_mddev_idle(mddev)) {
4103 msleep(250); 4123 msleep(500);
4104 goto repeat; 4124 goto repeat;
4105 } 4125 }
4106 } 4126 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 2da9d3ba902d..229d7b204297 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -320,7 +320,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
320 * this branch is our 'one mirror IO has finished' event handler: 320 * this branch is our 'one mirror IO has finished' event handler:
321 */ 321 */
322 r1_bio->bios[mirror] = NULL; 322 r1_bio->bios[mirror] = NULL;
323 bio_put(bio);
324 if (!uptodate) { 323 if (!uptodate) {
325 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 324 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
326 /* an I/O failed, we can't clear the bitmap */ 325 /* an I/O failed, we can't clear the bitmap */
@@ -377,7 +376,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
377 } 376 }
378 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 377 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
379 /* free extra copy of the data pages */ 378 /* free extra copy of the data pages */
380/* FIXME bio has been freed!!! */
381 int i = bio->bi_vcnt; 379 int i = bio->bi_vcnt;
382 while (i--) 380 while (i--)
383 __free_page(bio->bi_io_vec[i].bv_page); 381 __free_page(bio->bi_io_vec[i].bv_page);
@@ -391,6 +389,9 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
391 raid_end_bio_io(r1_bio); 389 raid_end_bio_io(r1_bio);
392 } 390 }
393 391
392 if (r1_bio->bios[mirror]==NULL)
393 bio_put(bio);
394
394 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 395 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
395 return 0; 396 return 0;
396} 397}
@@ -953,9 +954,6 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
953 int mirror = 0; 954 int mirror = 0;
954 mirror_info_t *p; 955 mirror_info_t *p;
955 956
956 if (rdev->saved_raid_disk >= 0 &&
957 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
958 mirror = rdev->saved_raid_disk;
959 for (mirror=0; mirror < mddev->raid_disks; mirror++) 957 for (mirror=0; mirror < mddev->raid_disks; mirror++)
960 if ( !(p=conf->mirrors+mirror)->rdev) { 958 if ( !(p=conf->mirrors+mirror)->rdev) {
961 959
@@ -972,7 +970,10 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
972 p->head_position = 0; 970 p->head_position = 0;
973 rdev->raid_disk = mirror; 971 rdev->raid_disk = mirror;
974 found = 1; 972 found = 1;
975 if (rdev->saved_raid_disk != mirror) 973 /* As all devices are equivalent, we don't need a full recovery
974 * if this was recently any drive of the array
975 */
976 if (rdev->saved_raid_disk < 0)
976 conf->fullsync = 1; 977 conf->fullsync = 1;
977 rcu_assign_pointer(p->rdev, rdev); 978 rcu_assign_pointer(p->rdev, rdev);
978 break; 979 break;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 867f06ae33d9..713dc9c2c730 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -552,7 +552,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
552 !test_bit(In_sync, &rdev->flags)) 552 !test_bit(In_sync, &rdev->flags))
553 continue; 553 continue;
554 554
555 if (!atomic_read(&rdev->nr_pending)) { 555 /* This optimisation is debatable, and completely destroys
556 * sequential read speed for 'far copies' arrays. So only
557 * keep it for 'near' arrays, and review those later.
558 */
559 if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
556 disk = ndisk; 560 disk = ndisk;
557 slot = nslot; 561 slot = nslot;
558 break; 562 break;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e2a40283e323..fafc4bc045f7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -98,7 +98,7 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
98 list_add_tail(&sh->lru, &conf->inactive_list); 98 list_add_tail(&sh->lru, &conf->inactive_list);
99 atomic_dec(&conf->active_stripes); 99 atomic_dec(&conf->active_stripes);
100 if (!conf->inactive_blocked || 100 if (!conf->inactive_blocked ||
101 atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4)) 101 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
102 wake_up(&conf->wait_for_stripe); 102 wake_up(&conf->wait_for_stripe);
103 } 103 }
104 } 104 }
@@ -264,7 +264,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
264 conf->inactive_blocked = 1; 264 conf->inactive_blocked = 1;
265 wait_event_lock_irq(conf->wait_for_stripe, 265 wait_event_lock_irq(conf->wait_for_stripe,
266 !list_empty(&conf->inactive_list) && 266 !list_empty(&conf->inactive_list) &&
267 (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4) 267 (atomic_read(&conf->active_stripes)
268 < (conf->max_nr_stripes *3/4)
268 || !conf->inactive_blocked), 269 || !conf->inactive_blocked),
269 conf->device_lock, 270 conf->device_lock,
270 unplug_slaves(conf->mddev); 271 unplug_slaves(conf->mddev);
@@ -1704,7 +1705,9 @@ static void raid5d (mddev_t *mddev)
1704 1705
1705 if (conf->seq_flush - conf->seq_write > 0) { 1706 if (conf->seq_flush - conf->seq_write > 0) {
1706 int seq = conf->seq_flush; 1707 int seq = conf->seq_flush;
1708 spin_unlock_irq(&conf->device_lock);
1707 bitmap_unplug(mddev->bitmap); 1709 bitmap_unplug(mddev->bitmap);
1710 spin_lock_irq(&conf->device_lock);
1708 conf->seq_write = seq; 1711 conf->seq_write = seq;
1709 activate_bit_delay(conf); 1712 activate_bit_delay(conf);
1710 } 1713 }
@@ -1915,7 +1918,7 @@ static int run(mddev_t *mddev)
1915 goto abort; 1918 goto abort;
1916 } 1919 }
1917 } 1920 }
1918memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 1921 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1919 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 1922 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1920 if (grow_stripes(conf, conf->max_nr_stripes)) { 1923 if (grow_stripes(conf, conf->max_nr_stripes)) {
1921 printk(KERN_ERR 1924 printk(KERN_ERR
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index eae5a35629c5..0000d162d198 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -1702,6 +1702,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1702 int data_disks = raid_disks - 2; 1702 int data_disks = raid_disks - 2;
1703 sector_t max_sector = mddev->size << 1; 1703 sector_t max_sector = mddev->size << 1;
1704 int sync_blocks; 1704 int sync_blocks;
1705 int still_degraded = 0;
1706 int i;
1705 1707
1706 if (sector_nr >= max_sector) { 1708 if (sector_nr >= max_sector) {
1707 /* just being told to finish up .. nothing much to do */ 1709 /* just being told to finish up .. nothing much to do */
@@ -1710,7 +1712,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1710 if (mddev->curr_resync < max_sector) /* aborted */ 1712 if (mddev->curr_resync < max_sector) /* aborted */
1711 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1713 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1712 &sync_blocks, 1); 1714 &sync_blocks, 1);
1713 else /* compelted sync */ 1715 else /* completed sync */
1714 conf->fullsync = 0; 1716 conf->fullsync = 0;
1715 bitmap_close_sync(mddev->bitmap); 1717 bitmap_close_sync(mddev->bitmap);
1716 1718
@@ -1748,7 +1750,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1748 */ 1750 */
1749 schedule_timeout_uninterruptible(1); 1751 schedule_timeout_uninterruptible(1);
1750 } 1752 }
1751 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); 1753 /* Need to check if array will still be degraded after recovery/resync
1754 * We don't need to check the 'failed' flag as when that gets set,
1755 * recovery aborts.
1756 */
1757 for (i=0; i<mddev->raid_disks; i++)
1758 if (conf->disks[i].rdev == NULL)
1759 still_degraded = 1;
1760
1761 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
1762
1752 spin_lock(&sh->lock); 1763 spin_lock(&sh->lock);
1753 set_bit(STRIPE_SYNCING, &sh->state); 1764 set_bit(STRIPE_SYNCING, &sh->state);
1754 clear_bit(STRIPE_INSYNC, &sh->state); 1765 clear_bit(STRIPE_INSYNC, &sh->state);
@@ -1784,7 +1795,9 @@ static void raid6d (mddev_t *mddev)
1784 1795
1785 if (conf->seq_flush - conf->seq_write > 0) { 1796 if (conf->seq_flush - conf->seq_write > 0) {
1786 int seq = conf->seq_flush; 1797 int seq = conf->seq_flush;
1798 spin_unlock_irq(&conf->device_lock);
1787 bitmap_unplug(mddev->bitmap); 1799 bitmap_unplug(mddev->bitmap);
1800 spin_lock_irq(&conf->device_lock);
1788 conf->seq_write = seq; 1801 conf->seq_write = seq;
1789 activate_bit_delay(conf); 1802 activate_bit_delay(conf);
1790 } 1803 }
@@ -2145,9 +2158,15 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2145 /* no point adding a device */ 2158 /* no point adding a device */
2146 return 0; 2159 return 0;
2147 /* 2160 /*
2148 * find the disk ... 2161 * find the disk ... but prefer rdev->saved_raid_disk
2162 * if possible.
2149 */ 2163 */
2150 for (disk=0; disk < mddev->raid_disks; disk++) 2164 if (rdev->saved_raid_disk >= 0 &&
2165 conf->disks[rdev->saved_raid_disk].rdev == NULL)
2166 disk = rdev->saved_raid_disk;
2167 else
2168 disk = 0;
2169 for ( ; disk < mddev->raid_disks; disk++)
2151 if ((p=conf->disks + disk)->rdev == NULL) { 2170 if ((p=conf->disks + disk)->rdev == NULL) {
2152 clear_bit(In_sync, &rdev->flags); 2171 clear_bit(In_sync, &rdev->flags);
2153 rdev->raid_disk = disk; 2172 rdev->raid_disk = disk;