diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 47 | ||||
-rw-r--r-- | drivers/md/md-cluster.c | 12 | ||||
-rw-r--r-- | drivers/md/md.c | 28 | ||||
-rw-r--r-- | drivers/md/raid10.c | 13 | ||||
-rw-r--r-- | drivers/md/raid5.c | 64 |
5 files changed, 107 insertions, 57 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 6fff794e0c72..13041ee37ad6 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page) | |||
2183 | static ssize_t | 2183 | static ssize_t |
2184 | location_store(struct mddev *mddev, const char *buf, size_t len) | 2184 | location_store(struct mddev *mddev, const char *buf, size_t len) |
2185 | { | 2185 | { |
2186 | int rv; | ||
2186 | 2187 | ||
2188 | rv = mddev_lock(mddev); | ||
2189 | if (rv) | ||
2190 | return rv; | ||
2187 | if (mddev->pers) { | 2191 | if (mddev->pers) { |
2188 | if (!mddev->pers->quiesce) | 2192 | if (!mddev->pers->quiesce) { |
2189 | return -EBUSY; | 2193 | rv = -EBUSY; |
2190 | if (mddev->recovery || mddev->sync_thread) | 2194 | goto out; |
2191 | return -EBUSY; | 2195 | } |
2196 | if (mddev->recovery || mddev->sync_thread) { | ||
2197 | rv = -EBUSY; | ||
2198 | goto out; | ||
2199 | } | ||
2192 | } | 2200 | } |
2193 | 2201 | ||
2194 | if (mddev->bitmap || mddev->bitmap_info.file || | 2202 | if (mddev->bitmap || mddev->bitmap_info.file || |
2195 | mddev->bitmap_info.offset) { | 2203 | mddev->bitmap_info.offset) { |
2196 | /* bitmap already configured. Only option is to clear it */ | 2204 | /* bitmap already configured. Only option is to clear it */ |
2197 | if (strncmp(buf, "none", 4) != 0) | 2205 | if (strncmp(buf, "none", 4) != 0) { |
2198 | return -EBUSY; | 2206 | rv = -EBUSY; |
2207 | goto out; | ||
2208 | } | ||
2199 | if (mddev->pers) { | 2209 | if (mddev->pers) { |
2200 | mddev->pers->quiesce(mddev, 1); | 2210 | mddev->pers->quiesce(mddev, 1); |
2201 | bitmap_destroy(mddev); | 2211 | bitmap_destroy(mddev); |
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len) | |||
2214 | /* nothing to be done */; | 2224 | /* nothing to be done */; |
2215 | else if (strncmp(buf, "file:", 5) == 0) { | 2225 | else if (strncmp(buf, "file:", 5) == 0) { |
2216 | /* Not supported yet */ | 2226 | /* Not supported yet */ |
2217 | return -EINVAL; | 2227 | rv = -EINVAL; |
2228 | goto out; | ||
2218 | } else { | 2229 | } else { |
2219 | int rv; | ||
2220 | if (buf[0] == '+') | 2230 | if (buf[0] == '+') |
2221 | rv = kstrtoll(buf+1, 10, &offset); | 2231 | rv = kstrtoll(buf+1, 10, &offset); |
2222 | else | 2232 | else |
2223 | rv = kstrtoll(buf, 10, &offset); | 2233 | rv = kstrtoll(buf, 10, &offset); |
2224 | if (rv) | 2234 | if (rv) |
2225 | return rv; | 2235 | goto out; |
2226 | if (offset == 0) | 2236 | if (offset == 0) { |
2227 | return -EINVAL; | 2237 | rv = -EINVAL; |
2238 | goto out; | ||
2239 | } | ||
2228 | if (mddev->bitmap_info.external == 0 && | 2240 | if (mddev->bitmap_info.external == 0 && |
2229 | mddev->major_version == 0 && | 2241 | mddev->major_version == 0 && |
2230 | offset != mddev->bitmap_info.default_offset) | 2242 | offset != mddev->bitmap_info.default_offset) { |
2231 | return -EINVAL; | 2243 | rv = -EINVAL; |
2244 | goto out; | ||
2245 | } | ||
2232 | mddev->bitmap_info.offset = offset; | 2246 | mddev->bitmap_info.offset = offset; |
2233 | if (mddev->pers) { | 2247 | if (mddev->pers) { |
2234 | struct bitmap *bitmap; | 2248 | struct bitmap *bitmap; |
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len) | |||
2245 | mddev->pers->quiesce(mddev, 0); | 2259 | mddev->pers->quiesce(mddev, 0); |
2246 | if (rv) { | 2260 | if (rv) { |
2247 | bitmap_destroy(mddev); | 2261 | bitmap_destroy(mddev); |
2248 | return rv; | 2262 | goto out; |
2249 | } | 2263 | } |
2250 | } | 2264 | } |
2251 | } | 2265 | } |
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len) | |||
2257 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 2271 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
2258 | md_wakeup_thread(mddev->thread); | 2272 | md_wakeup_thread(mddev->thread); |
2259 | } | 2273 | } |
2274 | rv = 0; | ||
2275 | out: | ||
2276 | mddev_unlock(mddev); | ||
2277 | if (rv) | ||
2278 | return rv; | ||
2260 | return len; | 2279 | return len; |
2261 | } | 2280 | } |
2262 | 2281 | ||
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 41573f1f626f..34a840d9df76 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes) | |||
834 | goto err; | 834 | goto err; |
835 | } | 835 | } |
836 | cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); | 836 | cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); |
837 | if (!cinfo->ack_lockres) | 837 | if (!cinfo->ack_lockres) { |
838 | ret = -ENOMEM; | ||
838 | goto err; | 839 | goto err; |
840 | } | ||
839 | /* get sync CR lock on ACK. */ | 841 | /* get sync CR lock on ACK. */ |
840 | if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) | 842 | if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) |
841 | pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", | 843 | pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", |
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes) | |||
849 | pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); | 851 | pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); |
850 | snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); | 852 | snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); |
851 | cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); | 853 | cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); |
852 | if (!cinfo->bitmap_lockres) | 854 | if (!cinfo->bitmap_lockres) { |
855 | ret = -ENOMEM; | ||
853 | goto err; | 856 | goto err; |
857 | } | ||
854 | if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { | 858 | if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { |
855 | pr_err("Failed to get bitmap lock\n"); | 859 | pr_err("Failed to get bitmap lock\n"); |
856 | ret = -EINVAL; | 860 | ret = -EINVAL; |
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes) | |||
858 | } | 862 | } |
859 | 863 | ||
860 | cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); | 864 | cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); |
861 | if (!cinfo->resync_lockres) | 865 | if (!cinfo->resync_lockres) { |
866 | ret = -ENOMEM; | ||
862 | goto err; | 867 | goto err; |
868 | } | ||
863 | 869 | ||
864 | return 0; | 870 | return 0; |
865 | err: | 871 | err: |
diff --git a/drivers/md/md.c b/drivers/md/md.c index d646f6e444f0..67642bacd597 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) | |||
1604 | mddev->new_chunk_sectors = mddev->chunk_sectors; | 1604 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) { | 1607 | if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) |
1608 | set_bit(MD_HAS_JOURNAL, &mddev->flags); | 1608 | set_bit(MD_HAS_JOURNAL, &mddev->flags); |
1609 | if (mddev->recovery_cp == MaxSector) | ||
1610 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); | ||
1611 | } | ||
1612 | } else if (mddev->pers == NULL) { | 1609 | } else if (mddev->pers == NULL) { |
1613 | /* Insist of good event counter while assembling, except for | 1610 | /* Insist of good event counter while assembling, except for |
1614 | * spares (which don't need an event count) */ | 1611 | * spares (which don't need an event count) */ |
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg) | |||
5851 | working++; | 5848 | working++; |
5852 | if (test_bit(In_sync, &rdev->flags)) | 5849 | if (test_bit(In_sync, &rdev->flags)) |
5853 | insync++; | 5850 | insync++; |
5851 | else if (test_bit(Journal, &rdev->flags)) | ||
5852 | /* TODO: add journal count to md_u.h */ | ||
5853 | ; | ||
5854 | else | 5854 | else |
5855 | spare++; | 5855 | spare++; |
5856 | } | 5856 | } |
@@ -7862,6 +7862,7 @@ void md_do_sync(struct md_thread *thread) | |||
7862 | */ | 7862 | */ |
7863 | 7863 | ||
7864 | do { | 7864 | do { |
7865 | int mddev2_minor = -1; | ||
7865 | mddev->curr_resync = 2; | 7866 | mddev->curr_resync = 2; |
7866 | 7867 | ||
7867 | try_again: | 7868 | try_again: |
@@ -7891,10 +7892,14 @@ void md_do_sync(struct md_thread *thread) | |||
7891 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); | 7892 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); |
7892 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7893 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
7893 | mddev2->curr_resync >= mddev->curr_resync) { | 7894 | mddev2->curr_resync >= mddev->curr_resync) { |
7894 | printk(KERN_INFO "md: delaying %s of %s" | 7895 | if (mddev2_minor != mddev2->md_minor) { |
7895 | " until %s has finished (they" | 7896 | mddev2_minor = mddev2->md_minor; |
7896 | " share one or more physical units)\n", | 7897 | printk(KERN_INFO "md: delaying %s of %s" |
7897 | desc, mdname(mddev), mdname(mddev2)); | 7898 | " until %s has finished (they" |
7899 | " share one or more physical units)\n", | ||
7900 | desc, mdname(mddev), | ||
7901 | mdname(mddev2)); | ||
7902 | } | ||
7898 | mddev_put(mddev2); | 7903 | mddev_put(mddev2); |
7899 | if (signal_pending(current)) | 7904 | if (signal_pending(current)) |
7900 | flush_signals(current); | 7905 | flush_signals(current); |
@@ -8275,16 +8280,13 @@ no_add: | |||
8275 | static void md_start_sync(struct work_struct *ws) | 8280 | static void md_start_sync(struct work_struct *ws) |
8276 | { | 8281 | { |
8277 | struct mddev *mddev = container_of(ws, struct mddev, del_work); | 8282 | struct mddev *mddev = container_of(ws, struct mddev, del_work); |
8278 | int ret = 0; | ||
8279 | 8283 | ||
8280 | mddev->sync_thread = md_register_thread(md_do_sync, | 8284 | mddev->sync_thread = md_register_thread(md_do_sync, |
8281 | mddev, | 8285 | mddev, |
8282 | "resync"); | 8286 | "resync"); |
8283 | if (!mddev->sync_thread) { | 8287 | if (!mddev->sync_thread) { |
8284 | if (!(mddev_is_clustered(mddev) && ret == -EAGAIN)) | 8288 | printk(KERN_ERR "%s: could not start resync thread...\n", |
8285 | printk(KERN_ERR "%s: could not start resync" | 8289 | mdname(mddev)); |
8286 | " thread...\n", | ||
8287 | mdname(mddev)); | ||
8288 | /* leave the spares where they are, it shouldn't hurt */ | 8290 | /* leave the spares where they are, it shouldn't hurt */ |
8289 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 8291 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
8290 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 8292 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0e4efcd10795..be1a9fca3b2d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio) | |||
1064 | int max_sectors; | 1064 | int max_sectors; |
1065 | int sectors; | 1065 | int sectors; |
1066 | 1066 | ||
1067 | md_write_start(mddev, bio); | ||
1068 | |||
1067 | /* | 1069 | /* |
1068 | * Register the new request and wait if the reconstruction | 1070 | * Register the new request and wait if the reconstruction |
1069 | * thread has put up a bar for new requests. | 1071 | * thread has put up a bar for new requests. |
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) | |||
1445 | return; | 1447 | return; |
1446 | } | 1448 | } |
1447 | 1449 | ||
1448 | md_write_start(mddev, bio); | ||
1449 | |||
1450 | do { | 1450 | do { |
1451 | 1451 | ||
1452 | /* | 1452 | /* |
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) | |||
2465 | 2465 | ||
2466 | while (sect_to_write) { | 2466 | while (sect_to_write) { |
2467 | struct bio *wbio; | 2467 | struct bio *wbio; |
2468 | sector_t wsector; | ||
2468 | if (sectors > sect_to_write) | 2469 | if (sectors > sect_to_write) |
2469 | sectors = sect_to_write; | 2470 | sectors = sect_to_write; |
2470 | /* Write at 'sector' for 'sectors' */ | 2471 | /* Write at 'sector' for 'sectors' */ |
2471 | wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 2472 | wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
2472 | bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); | 2473 | bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); |
2473 | wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ | 2474 | wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); |
2474 | choose_data_offset(r10_bio, rdev) + | 2475 | wbio->bi_iter.bi_sector = wsector + |
2475 | (sector - r10_bio->sector)); | 2476 | choose_data_offset(r10_bio, rdev); |
2476 | wbio->bi_bdev = rdev->bdev; | 2477 | wbio->bi_bdev = rdev->bdev; |
2477 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); | 2478 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2478 | 2479 | ||
2479 | if (submit_bio_wait(wbio) < 0) | 2480 | if (submit_bio_wait(wbio) < 0) |
2480 | /* Failure! */ | 2481 | /* Failure! */ |
2481 | ok = rdev_set_badblocks(rdev, sector, | 2482 | ok = rdev_set_badblocks(rdev, wsector, |
2482 | sectors, 0) | 2483 | sectors, 0) |
2483 | && ok; | 2484 | && ok; |
2484 | 2485 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8912407a4dd0..da583bb43c84 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | |||
659 | { | 659 | { |
660 | struct stripe_head *sh; | 660 | struct stripe_head *sh; |
661 | int hash = stripe_hash_locks_hash(sector); | 661 | int hash = stripe_hash_locks_hash(sector); |
662 | int inc_empty_inactive_list_flag; | ||
662 | 663 | ||
663 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); | 664 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
664 | 665 | ||
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | |||
703 | atomic_inc(&conf->active_stripes); | 704 | atomic_inc(&conf->active_stripes); |
704 | BUG_ON(list_empty(&sh->lru) && | 705 | BUG_ON(list_empty(&sh->lru) && |
705 | !test_bit(STRIPE_EXPANDING, &sh->state)); | 706 | !test_bit(STRIPE_EXPANDING, &sh->state)); |
707 | inc_empty_inactive_list_flag = 0; | ||
708 | if (!list_empty(conf->inactive_list + hash)) | ||
709 | inc_empty_inactive_list_flag = 1; | ||
706 | list_del_init(&sh->lru); | 710 | list_del_init(&sh->lru); |
711 | if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) | ||
712 | atomic_inc(&conf->empty_inactive_list_nr); | ||
707 | if (sh->group) { | 713 | if (sh->group) { |
708 | sh->group->stripes_cnt--; | 714 | sh->group->stripes_cnt--; |
709 | sh->group = NULL; | 715 | sh->group = NULL; |
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
762 | sector_t head_sector, tmp_sec; | 768 | sector_t head_sector, tmp_sec; |
763 | int hash; | 769 | int hash; |
764 | int dd_idx; | 770 | int dd_idx; |
771 | int inc_empty_inactive_list_flag; | ||
765 | 772 | ||
766 | /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ | 773 | /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ |
767 | tmp_sec = sh->sector; | 774 | tmp_sec = sh->sector; |
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
779 | atomic_inc(&conf->active_stripes); | 786 | atomic_inc(&conf->active_stripes); |
780 | BUG_ON(list_empty(&head->lru) && | 787 | BUG_ON(list_empty(&head->lru) && |
781 | !test_bit(STRIPE_EXPANDING, &head->state)); | 788 | !test_bit(STRIPE_EXPANDING, &head->state)); |
789 | inc_empty_inactive_list_flag = 0; | ||
790 | if (!list_empty(conf->inactive_list + hash)) | ||
791 | inc_empty_inactive_list_flag = 1; | ||
782 | list_del_init(&head->lru); | 792 | list_del_init(&head->lru); |
793 | if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) | ||
794 | atomic_inc(&conf->empty_inactive_list_nr); | ||
783 | if (head->group) { | 795 | if (head->group) { |
784 | head->group->stripes_cnt--; | 796 | head->group->stripes_cnt--; |
785 | head->group = NULL; | 797 | head->group = NULL; |
@@ -993,7 +1005,6 @@ again: | |||
993 | 1005 | ||
994 | set_bit(STRIPE_IO_STARTED, &sh->state); | 1006 | set_bit(STRIPE_IO_STARTED, &sh->state); |
995 | 1007 | ||
996 | bio_reset(bi); | ||
997 | bi->bi_bdev = rdev->bdev; | 1008 | bi->bi_bdev = rdev->bdev; |
998 | bio_set_op_attrs(bi, op, op_flags); | 1009 | bio_set_op_attrs(bi, op, op_flags); |
999 | bi->bi_end_io = op_is_write(op) | 1010 | bi->bi_end_io = op_is_write(op) |
@@ -1045,7 +1056,6 @@ again: | |||
1045 | 1056 | ||
1046 | set_bit(STRIPE_IO_STARTED, &sh->state); | 1057 | set_bit(STRIPE_IO_STARTED, &sh->state); |
1047 | 1058 | ||
1048 | bio_reset(rbi); | ||
1049 | rbi->bi_bdev = rrdev->bdev; | 1059 | rbi->bi_bdev = rrdev->bdev; |
1050 | bio_set_op_attrs(rbi, op, op_flags); | 1060 | bio_set_op_attrs(rbi, op, op_flags); |
1051 | BUG_ON(!op_is_write(op)); | 1061 | BUG_ON(!op_is_write(op)); |
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1978 | put_cpu(); | 1988 | put_cpu(); |
1979 | } | 1989 | } |
1980 | 1990 | ||
1981 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) | 1991 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, |
1992 | int disks) | ||
1982 | { | 1993 | { |
1983 | struct stripe_head *sh; | 1994 | struct stripe_head *sh; |
1995 | int i; | ||
1984 | 1996 | ||
1985 | sh = kmem_cache_zalloc(sc, gfp); | 1997 | sh = kmem_cache_zalloc(sc, gfp); |
1986 | if (sh) { | 1998 | if (sh) { |
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) | |||
1989 | INIT_LIST_HEAD(&sh->batch_list); | 2001 | INIT_LIST_HEAD(&sh->batch_list); |
1990 | INIT_LIST_HEAD(&sh->lru); | 2002 | INIT_LIST_HEAD(&sh->lru); |
1991 | atomic_set(&sh->count, 1); | 2003 | atomic_set(&sh->count, 1); |
2004 | for (i = 0; i < disks; i++) { | ||
2005 | struct r5dev *dev = &sh->dev[i]; | ||
2006 | |||
2007 | bio_init(&dev->req); | ||
2008 | dev->req.bi_io_vec = &dev->vec; | ||
2009 | dev->req.bi_max_vecs = 1; | ||
2010 | |||
2011 | bio_init(&dev->rreq); | ||
2012 | dev->rreq.bi_io_vec = &dev->rvec; | ||
2013 | dev->rreq.bi_max_vecs = 1; | ||
2014 | } | ||
1992 | } | 2015 | } |
1993 | return sh; | 2016 | return sh; |
1994 | } | 2017 | } |
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
1996 | { | 2019 | { |
1997 | struct stripe_head *sh; | 2020 | struct stripe_head *sh; |
1998 | 2021 | ||
1999 | sh = alloc_stripe(conf->slab_cache, gfp); | 2022 | sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size); |
2000 | if (!sh) | 2023 | if (!sh) |
2001 | return 0; | 2024 | return 0; |
2002 | 2025 | ||
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2167 | mutex_lock(&conf->cache_size_mutex); | 2190 | mutex_lock(&conf->cache_size_mutex); |
2168 | 2191 | ||
2169 | for (i = conf->max_nr_stripes; i; i--) { | 2192 | for (i = conf->max_nr_stripes; i; i--) { |
2170 | nsh = alloc_stripe(sc, GFP_KERNEL); | 2193 | nsh = alloc_stripe(sc, GFP_KERNEL, newsize); |
2171 | if (!nsh) | 2194 | if (!nsh) |
2172 | break; | 2195 | break; |
2173 | 2196 | ||
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2299 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2322 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2300 | bi->bi_error); | 2323 | bi->bi_error); |
2301 | if (i == disks) { | 2324 | if (i == disks) { |
2325 | bio_reset(bi); | ||
2302 | BUG(); | 2326 | BUG(); |
2303 | return; | 2327 | return; |
2304 | } | 2328 | } |
@@ -2402,6 +2426,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2402 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 2426 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
2403 | set_bit(STRIPE_HANDLE, &sh->state); | 2427 | set_bit(STRIPE_HANDLE, &sh->state); |
2404 | raid5_release_stripe(sh); | 2428 | raid5_release_stripe(sh); |
2429 | bio_reset(bi); | ||
2405 | } | 2430 | } |
2406 | 2431 | ||
2407 | static void raid5_end_write_request(struct bio *bi) | 2432 | static void raid5_end_write_request(struct bio *bi) |
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2436 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2461 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2437 | bi->bi_error); | 2462 | bi->bi_error); |
2438 | if (i == disks) { | 2463 | if (i == disks) { |
2464 | bio_reset(bi); | ||
2439 | BUG(); | 2465 | BUG(); |
2440 | return; | 2466 | return; |
2441 | } | 2467 | } |
@@ -2479,22 +2505,13 @@ static void raid5_end_write_request(struct bio *bi) | |||
2479 | 2505 | ||
2480 | if (sh->batch_head && sh != sh->batch_head) | 2506 | if (sh->batch_head && sh != sh->batch_head) |
2481 | raid5_release_stripe(sh->batch_head); | 2507 | raid5_release_stripe(sh->batch_head); |
2508 | bio_reset(bi); | ||
2482 | } | 2509 | } |
2483 | 2510 | ||
2484 | static void raid5_build_block(struct stripe_head *sh, int i, int previous) | 2511 | static void raid5_build_block(struct stripe_head *sh, int i, int previous) |
2485 | { | 2512 | { |
2486 | struct r5dev *dev = &sh->dev[i]; | 2513 | struct r5dev *dev = &sh->dev[i]; |
2487 | 2514 | ||
2488 | bio_init(&dev->req); | ||
2489 | dev->req.bi_io_vec = &dev->vec; | ||
2490 | dev->req.bi_max_vecs = 1; | ||
2491 | dev->req.bi_private = sh; | ||
2492 | |||
2493 | bio_init(&dev->rreq); | ||
2494 | dev->rreq.bi_io_vec = &dev->rvec; | ||
2495 | dev->rreq.bi_max_vecs = 1; | ||
2496 | dev->rreq.bi_private = sh; | ||
2497 | |||
2498 | dev->flags = 0; | 2515 | dev->flags = 0; |
2499 | dev->sector = raid5_compute_blocknr(sh, i, previous); | 2516 | dev->sector = raid5_compute_blocknr(sh, i, previous); |
2500 | } | 2517 | } |
@@ -4628,7 +4645,9 @@ finish: | |||
4628 | } | 4645 | } |
4629 | 4646 | ||
4630 | if (!bio_list_empty(&s.return_bi)) { | 4647 | if (!bio_list_empty(&s.return_bi)) { |
4631 | if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { | 4648 | if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) && |
4649 | (s.failed <= conf->max_degraded || | ||
4650 | conf->mddev->external == 0)) { | ||
4632 | spin_lock_irq(&conf->device_lock); | 4651 | spin_lock_irq(&conf->device_lock); |
4633 | bio_list_merge(&conf->return_bi, &s.return_bi); | 4652 | bio_list_merge(&conf->return_bi, &s.return_bi); |
4634 | spin_unlock_irq(&conf->device_lock); | 4653 | spin_unlock_irq(&conf->device_lock); |
@@ -6826,11 +6845,14 @@ static int raid5_run(struct mddev *mddev) | |||
6826 | if (IS_ERR(conf)) | 6845 | if (IS_ERR(conf)) |
6827 | return PTR_ERR(conf); | 6846 | return PTR_ERR(conf); |
6828 | 6847 | ||
6829 | if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { | 6848 | if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { |
6830 | printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", | 6849 | if (!journal_dev) { |
6831 | mdname(mddev)); | 6850 | pr_err("md/raid:%s: journal disk is missing, force array readonly\n", |
6832 | mddev->ro = 1; | 6851 | mdname(mddev)); |
6833 | set_disk_ro(mddev->gendisk, 1); | 6852 | mddev->ro = 1; |
6853 | set_disk_ro(mddev->gendisk, 1); | ||
6854 | } else if (mddev->recovery_cp == MaxSector) | ||
6855 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); | ||
6834 | } | 6856 | } |
6835 | 6857 | ||
6836 | conf->min_offset_diff = min_offset_diff; | 6858 | conf->min_offset_diff = min_offset_diff; |