diff options
author | David S. Miller <davem@davemloft.net> | 2015-10-20 09:08:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-20 09:08:27 -0400 |
commit | 26440c835f8b1a491e2704118ac55bf87334366c (patch) | |
tree | 3c2d23b59fd49b252fdbf6c09efc41b354933fc6 /drivers/md | |
parent | 371f1c7e0d854796adc622cc3bacfcc5fc638db1 (diff) | |
parent | 1099f86044111e9a7807f09523e42d4c9d0fb781 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/asix_common.c
net/ipv4/inet_connection_sock.c
net/switchdev/switchdev.c
In the inet_connection_sock.c case the request socket hashing scheme
is completely different in net-next.
The other two conflicts were overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-cache-policy-cleaner.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.h | 5 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 30 | ||||
-rw-r--r-- | drivers/md/dm-snap-transient.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 14 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 11 | ||||
-rw-r--r-- | drivers/md/md.c | 5 | ||||
-rw-r--r-- | drivers/md/multipath.c | 3 | ||||
-rw-r--r-- | drivers/md/raid0.c | 12 | ||||
-rw-r--r-- | drivers/md/raid1.c | 15 | ||||
-rw-r--r-- | drivers/md/raid10.c | 13 | ||||
-rw-r--r-- | drivers/md/raid5.c | 11 |
16 files changed, 81 insertions, 57 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, | |||
1997 | if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) | 1997 | if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) |
1998 | ret = bitmap_storage_alloc(&store, chunks, | 1998 | ret = bitmap_storage_alloc(&store, chunks, |
1999 | !bitmap->mddev->bitmap_info.external, | 1999 | !bitmap->mddev->bitmap_info.external, |
2000 | bitmap->cluster_slot); | 2000 | mddev_is_clustered(bitmap->mddev) |
2001 | ? bitmap->cluster_slot : 0); | ||
2001 | if (ret) | 2002 | if (ret) |
2002 | goto err; | 2003 | goto err; |
2003 | 2004 | ||
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index 240c9f0e85e7..8a096456579b 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c | |||
@@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, | |||
436 | static struct dm_cache_policy_type wb_policy_type = { | 436 | static struct dm_cache_policy_type wb_policy_type = { |
437 | .name = "cleaner", | 437 | .name = "cleaner", |
438 | .version = {1, 0, 0}, | 438 | .version = {1, 0, 0}, |
439 | .hint_size = 0, | 439 | .hint_size = 4, |
440 | .owner = THIS_MODULE, | 440 | .owner = THIS_MODULE, |
441 | .create = wb_create | 441 | .create = wb_create |
442 | }; | 442 | }; |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index ebaa4f803eec..192bb8beeb6b 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
203 | return -EINVAL; | 203 | return -EINVAL; |
204 | } | 204 | } |
205 | 205 | ||
206 | tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); | 206 | tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); |
207 | if (!tmp_store) { | 207 | if (!tmp_store) { |
208 | ti->error = "Exception store allocation failed"; | 208 | ti->error = "Exception store allocation failed"; |
209 | return -ENOMEM; | 209 | return -ENOMEM; |
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
215 | else if (persistent == 'N') | 215 | else if (persistent == 'N') |
216 | type = get_type("N"); | 216 | type = get_type("N"); |
217 | else { | 217 | else { |
218 | ti->error = "Persistent flag is not P or N"; | 218 | ti->error = "Exception store type is not P or N"; |
219 | r = -EINVAL; | 219 | r = -EINVAL; |
220 | goto bad_type; | 220 | goto bad_type; |
221 | } | 221 | } |
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
233 | if (r) | 233 | if (r) |
234 | goto bad; | 234 | goto bad; |
235 | 235 | ||
236 | r = type->ctr(tmp_store, 0, NULL); | 236 | r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); |
237 | if (r) { | 237 | if (r) { |
238 | ti->error = "Exception store type constructor failed"; | 238 | ti->error = "Exception store type constructor failed"; |
239 | goto bad; | 239 | goto bad; |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 0b2536247cf5..fae34e7a0b1e 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -42,8 +42,7 @@ struct dm_exception_store_type { | |||
42 | const char *name; | 42 | const char *name; |
43 | struct module *module; | 43 | struct module *module; |
44 | 44 | ||
45 | int (*ctr) (struct dm_exception_store *store, | 45 | int (*ctr) (struct dm_exception_store *store, char *options); |
46 | unsigned argc, char **argv); | ||
47 | 46 | ||
48 | /* | 47 | /* |
49 | * Destroys this object when you've finished with it. | 48 | * Destroys this object when you've finished with it. |
@@ -123,6 +122,8 @@ struct dm_exception_store { | |||
123 | unsigned chunk_shift; | 122 | unsigned chunk_shift; |
124 | 123 | ||
125 | void *context; | 124 | void *context; |
125 | |||
126 | bool userspace_supports_overflow; | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | /* | 129 | /* |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 97e165183e79..a0901214aef5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
329 | */ | 329 | */ |
330 | if (min_region_size > (1 << 13)) { | 330 | if (min_region_size > (1 << 13)) { |
331 | /* If not a power of 2, make it the next power of 2 */ | 331 | /* If not a power of 2, make it the next power of 2 */ |
332 | if (min_region_size & (min_region_size - 1)) | 332 | region_size = roundup_pow_of_two(min_region_size); |
333 | region_size = 1 << fls(region_size); | ||
334 | DMINFO("Choosing default region size of %lu sectors", | 333 | DMINFO("Choosing default region size of %lu sectors", |
335 | region_size); | 334 | region_size); |
336 | } else { | 335 | } else { |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index bf71583296f7..117a05e40090 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include "dm-exception-store.h" | 8 | #include "dm-exception-store.h" |
9 | 9 | ||
10 | #include <linux/ctype.h> | ||
10 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
11 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
12 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
@@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store) | |||
843 | DMWARN("write header failed"); | 844 | DMWARN("write header failed"); |
844 | } | 845 | } |
845 | 846 | ||
846 | static int persistent_ctr(struct dm_exception_store *store, | 847 | static int persistent_ctr(struct dm_exception_store *store, char *options) |
847 | unsigned argc, char **argv) | ||
848 | { | 848 | { |
849 | struct pstore *ps; | 849 | struct pstore *ps; |
850 | int r; | ||
850 | 851 | ||
851 | /* allocate the pstore */ | 852 | /* allocate the pstore */ |
852 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); | 853 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
@@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store, | |||
868 | 869 | ||
869 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); | 870 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); |
870 | if (!ps->metadata_wq) { | 871 | if (!ps->metadata_wq) { |
871 | kfree(ps); | ||
872 | DMERR("couldn't start header metadata update thread"); | 872 | DMERR("couldn't start header metadata update thread"); |
873 | return -ENOMEM; | 873 | r = -ENOMEM; |
874 | goto err_workqueue; | ||
875 | } | ||
876 | |||
877 | if (options) { | ||
878 | char overflow = toupper(options[0]); | ||
879 | if (overflow == 'O') | ||
880 | store->userspace_supports_overflow = true; | ||
881 | else { | ||
882 | DMERR("Unsupported persistent store option: %s", options); | ||
883 | r = -EINVAL; | ||
884 | goto err_options; | ||
885 | } | ||
874 | } | 886 | } |
875 | 887 | ||
876 | store->context = ps; | 888 | store->context = ps; |
877 | 889 | ||
878 | return 0; | 890 | return 0; |
891 | |||
892 | err_options: | ||
893 | destroy_workqueue(ps->metadata_wq); | ||
894 | err_workqueue: | ||
895 | kfree(ps); | ||
896 | |||
897 | return r; | ||
879 | } | 898 | } |
880 | 899 | ||
881 | static unsigned persistent_status(struct dm_exception_store *store, | 900 | static unsigned persistent_status(struct dm_exception_store *store, |
@@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store, | |||
888 | case STATUSTYPE_INFO: | 907 | case STATUSTYPE_INFO: |
889 | break; | 908 | break; |
890 | case STATUSTYPE_TABLE: | 909 | case STATUSTYPE_TABLE: |
891 | DMEMIT(" P %llu", (unsigned long long)store->chunk_size); | 910 | DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", |
911 | (unsigned long long)store->chunk_size); | ||
892 | } | 912 | } |
893 | 913 | ||
894 | return sz; | 914 | return sz; |
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 1ce9a2586e41..9b7c8c8049d6 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c | |||
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store, | |||
70 | *metadata_sectors = 0; | 70 | *metadata_sectors = 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int transient_ctr(struct dm_exception_store *store, | 73 | static int transient_ctr(struct dm_exception_store *store, char *options) |
74 | unsigned argc, char **argv) | ||
75 | { | 74 | { |
76 | struct transient_c *tc; | 75 | struct transient_c *tc; |
77 | 76 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0bcd6516dfe..c06b74e91cd6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s) | |||
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | /* | 1100 | /* |
1101 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | 1101 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> |
1102 | */ | 1102 | */ |
1103 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 1103 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1104 | { | 1104 | { |
@@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, | |||
1302 | 1302 | ||
1303 | u.store_swap = snap_dest->store; | 1303 | u.store_swap = snap_dest->store; |
1304 | snap_dest->store = snap_src->store; | 1304 | snap_dest->store = snap_src->store; |
1305 | snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; | ||
1305 | snap_src->store = u.store_swap; | 1306 | snap_src->store = u.store_swap; |
1306 | 1307 | ||
1307 | snap_dest->store->snap = snap_dest; | 1308 | snap_dest->store->snap = snap_dest; |
@@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1739 | 1740 | ||
1740 | pe = __find_pending_exception(s, pe, chunk); | 1741 | pe = __find_pending_exception(s, pe, chunk); |
1741 | if (!pe) { | 1742 | if (!pe) { |
1742 | s->snapshot_overflowed = 1; | 1743 | if (s->store->userspace_supports_overflow) { |
1743 | DMERR("Snapshot overflowed: Unable to allocate exception."); | 1744 | s->snapshot_overflowed = 1; |
1745 | DMERR("Snapshot overflowed: Unable to allocate exception."); | ||
1746 | } else | ||
1747 | __invalidate_snapshot(s, -ENOMEM); | ||
1744 | r = -EIO; | 1748 | r = -EIO; |
1745 | goto out_unlock; | 1749 | goto out_unlock; |
1746 | } | 1750 | } |
@@ -2365,7 +2369,7 @@ static struct target_type origin_target = { | |||
2365 | 2369 | ||
2366 | static struct target_type snapshot_target = { | 2370 | static struct target_type snapshot_target = { |
2367 | .name = "snapshot", | 2371 | .name = "snapshot", |
2368 | .version = {1, 14, 0}, | 2372 | .version = {1, 15, 0}, |
2369 | .module = THIS_MODULE, | 2373 | .module = THIS_MODULE, |
2370 | .ctr = snapshot_ctr, | 2374 | .ctr = snapshot_ctr, |
2371 | .dtr = snapshot_dtr, | 2375 | .dtr = snapshot_dtr, |
@@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = { | |||
2379 | 2383 | ||
2380 | static struct target_type merge_target = { | 2384 | static struct target_type merge_target = { |
2381 | .name = dm_snapshot_merge_target_name, | 2385 | .name = dm_snapshot_merge_target_name, |
2382 | .version = {1, 3, 0}, | 2386 | .version = {1, 4, 0}, |
2383 | .module = THIS_MODULE, | 2387 | .module = THIS_MODULE, |
2384 | .ctr = snapshot_ctr, | 2388 | .ctr = snapshot_ctr, |
2385 | .dtr = snapshot_dtr, | 2389 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6fcbfb063366..3897b90bd462 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3201 | metadata_low_callback, | 3201 | metadata_low_callback, |
3202 | pool); | 3202 | pool); |
3203 | if (r) | 3203 | if (r) |
3204 | goto out_free_pt; | 3204 | goto out_flags_changed; |
3205 | 3205 | ||
3206 | pt->callbacks.congested_fn = pool_is_congested; | 3206 | pt->callbacks.congested_fn = pool_is_congested; |
3207 | dm_table_add_target_callbacks(ti->table, &pt->callbacks); | 3207 | dm_table_add_target_callbacks(ti->table, &pt->callbacks); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6264781dc69a..1b5c6047e4f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone) | |||
1001 | struct dm_rq_target_io *tio = info->tio; | 1001 | struct dm_rq_target_io *tio = info->tio; |
1002 | struct bio *bio = info->orig; | 1002 | struct bio *bio = info->orig; |
1003 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | 1003 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
1004 | int error = clone->bi_error; | ||
1004 | 1005 | ||
1005 | bio_put(clone); | 1006 | bio_put(clone); |
1006 | 1007 | ||
@@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone) | |||
1011 | * the remainder. | 1012 | * the remainder. |
1012 | */ | 1013 | */ |
1013 | return; | 1014 | return; |
1014 | else if (bio->bi_error) { | 1015 | else if (error) { |
1015 | /* | 1016 | /* |
1016 | * Don't notice the error to the upper layer yet. | 1017 | * Don't notice the error to the upper layer yet. |
1017 | * The error handling decision is made by the target driver, | 1018 | * The error handling decision is made by the target driver, |
1018 | * when the request is completed. | 1019 | * when the request is completed. |
1019 | */ | 1020 | */ |
1020 | tio->error = bio->bi_error; | 1021 | tio->error = error; |
1021 | return; | 1022 | return; |
1022 | } | 1023 | } |
1023 | 1024 | ||
@@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2837 | 2838 | ||
2838 | might_sleep(); | 2839 | might_sleep(); |
2839 | 2840 | ||
2840 | map = dm_get_live_table(md, &srcu_idx); | ||
2841 | |||
2842 | spin_lock(&_minor_lock); | 2841 | spin_lock(&_minor_lock); |
2843 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); | 2842 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2844 | set_bit(DMF_FREEING, &md->flags); | 2843 | set_bit(DMF_FREEING, &md->flags); |
@@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2852 | * do not race with internal suspend. | 2851 | * do not race with internal suspend. |
2853 | */ | 2852 | */ |
2854 | mutex_lock(&md->suspend_lock); | 2853 | mutex_lock(&md->suspend_lock); |
2854 | map = dm_get_live_table(md, &srcu_idx); | ||
2855 | if (!dm_suspended_md(md)) { | 2855 | if (!dm_suspended_md(md)) { |
2856 | dm_table_presuspend_targets(map); | 2856 | dm_table_presuspend_targets(map); |
2857 | dm_table_postsuspend_targets(map); | 2857 | dm_table_postsuspend_targets(map); |
2858 | } | 2858 | } |
2859 | mutex_unlock(&md->suspend_lock); | ||
2860 | |||
2861 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | 2859 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ |
2862 | dm_put_live_table(md, srcu_idx); | 2860 | dm_put_live_table(md, srcu_idx); |
2861 | mutex_unlock(&md->suspend_lock); | ||
2863 | 2862 | ||
2864 | /* | 2863 | /* |
2865 | * Rare, but there may be I/O requests still going to complete, | 2864 | * Rare, but there may be I/O requests still going to complete, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) | |||
5409 | * which will now never happen */ | 5409 | * which will now never happen */ |
5410 | wake_up_process(mddev->sync_thread->tsk); | 5410 | wake_up_process(mddev->sync_thread->tsk); |
5411 | 5411 | ||
5412 | if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) | ||
5413 | return -EBUSY; | ||
5412 | mddev_unlock(mddev); | 5414 | mddev_unlock(mddev); |
5413 | wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, | 5415 | wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, |
5414 | &mddev->recovery)); | 5416 | &mddev->recovery)); |
5417 | wait_event(mddev->sb_wait, | ||
5418 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5415 | mddev_lock_nointr(mddev); | 5419 | mddev_lock_nointr(mddev); |
5416 | 5420 | ||
5417 | mutex_lock(&mddev->open_mutex); | 5421 | mutex_lock(&mddev->open_mutex); |
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) | |||
8160 | md_reap_sync_thread(mddev); | 8164 | md_reap_sync_thread(mddev); |
8161 | clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); | 8165 | clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
8162 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 8166 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
8167 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
8163 | goto unlock; | 8168 | goto unlock; |
8164 | } | 8169 | } |
8165 | 8170 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) | |||
470 | return 0; | 470 | return 0; |
471 | 471 | ||
472 | out_free_conf: | 472 | out_free_conf: |
473 | if (conf->pool) | 473 | mempool_destroy(conf->pool); |
474 | mempool_destroy(conf->pool); | ||
475 | kfree(conf->multipaths); | 474 | kfree(conf->multipaths); |
476 | kfree(conf); | 475 | kfree(conf); |
477 | mddev->private = NULL; | 476 | mddev->private = NULL; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) | |||
376 | struct md_rdev *rdev; | 376 | struct md_rdev *rdev; |
377 | bool discard_supported = false; | 377 | bool discard_supported = false; |
378 | 378 | ||
379 | rdev_for_each(rdev, mddev) { | ||
380 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
381 | rdev->data_offset << 9); | ||
382 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
383 | discard_supported = true; | ||
384 | } | ||
385 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 379 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
386 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | 380 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); |
387 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | 381 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); |
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) | |||
390 | blk_queue_io_opt(mddev->queue, | 384 | blk_queue_io_opt(mddev->queue, |
391 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 385 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
392 | 386 | ||
387 | rdev_for_each(rdev, mddev) { | ||
388 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
389 | rdev->data_offset << 9); | ||
390 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
391 | discard_supported = true; | ||
392 | } | ||
393 | if (!discard_supported) | 393 | if (!discard_supported) |
394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | 394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); |
395 | else | 395 | else |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..ddd8a5f572aa 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
881 | } | 881 | } |
882 | 882 | ||
883 | if (bio && bio_data_dir(bio) == WRITE) { | 883 | if (bio && bio_data_dir(bio) == WRITE) { |
884 | if (bio->bi_iter.bi_sector >= | 884 | if (bio->bi_iter.bi_sector >= conf->next_resync) { |
885 | conf->mddev->curr_resync_completed) { | ||
886 | if (conf->start_next_window == MaxSector) | 885 | if (conf->start_next_window == MaxSector) |
887 | conf->start_next_window = | 886 | conf->start_next_window = |
888 | conf->next_resync + | 887 | conf->next_resync + |
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) | |||
1516 | conf->r1buf_pool = NULL; | 1515 | conf->r1buf_pool = NULL; |
1517 | 1516 | ||
1518 | spin_lock_irq(&conf->resync_lock); | 1517 | spin_lock_irq(&conf->resync_lock); |
1519 | conf->next_resync = 0; | 1518 | conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; |
1520 | conf->start_next_window = MaxSector; | 1519 | conf->start_next_window = MaxSector; |
1521 | conf->current_window_requests += | 1520 | conf->current_window_requests += |
1522 | conf->next_window_requests; | 1521 | conf->next_window_requests; |
@@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread) | |||
2383 | } | 2382 | } |
2384 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2383 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2385 | while (!list_empty(&tmp)) { | 2384 | while (!list_empty(&tmp)) { |
2386 | r1_bio = list_first_entry(&conf->bio_end_io_list, | 2385 | r1_bio = list_first_entry(&tmp, struct r1bio, |
2387 | struct r1bio, retry_list); | 2386 | retry_list); |
2388 | list_del(&r1_bio->retry_list); | 2387 | list_del(&r1_bio->retry_list); |
2389 | raid_end_bio_io(r1_bio); | 2388 | raid_end_bio_io(r1_bio); |
2390 | } | 2389 | } |
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
2843 | 2842 | ||
2844 | abort: | 2843 | abort: |
2845 | if (conf) { | 2844 | if (conf) { |
2846 | if (conf->r1bio_pool) | 2845 | mempool_destroy(conf->r1bio_pool); |
2847 | mempool_destroy(conf->r1bio_pool); | ||
2848 | kfree(conf->mirrors); | 2846 | kfree(conf->mirrors); |
2849 | safe_put_page(conf->tmppage); | 2847 | safe_put_page(conf->tmppage); |
2850 | kfree(conf->poolinfo); | 2848 | kfree(conf->poolinfo); |
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) | |||
2946 | { | 2944 | { |
2947 | struct r1conf *conf = priv; | 2945 | struct r1conf *conf = priv; |
2948 | 2946 | ||
2949 | if (conf->r1bio_pool) | 2947 | mempool_destroy(conf->r1bio_pool); |
2950 | mempool_destroy(conf->r1bio_pool); | ||
2951 | kfree(conf->mirrors); | 2948 | kfree(conf->mirrors); |
2952 | safe_put_page(conf->tmppage); | 2949 | safe_put_page(conf->tmppage); |
2953 | kfree(conf->poolinfo); | 2950 | kfree(conf->poolinfo); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..9f69dc526f8c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread) | |||
2688 | } | 2688 | } |
2689 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2689 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2690 | while (!list_empty(&tmp)) { | 2690 | while (!list_empty(&tmp)) { |
2691 | r10_bio = list_first_entry(&conf->bio_end_io_list, | 2691 | r10_bio = list_first_entry(&tmp, struct r10bio, |
2692 | struct r10bio, retry_list); | 2692 | retry_list); |
2693 | list_del(&r10_bio->retry_list); | 2693 | list_del(&r10_bio->retry_list); |
2694 | raid_end_bio_io(r10_bio); | 2694 | raid_end_bio_io(r10_bio); |
2695 | } | 2695 | } |
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) | |||
3486 | printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", | 3486 | printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", |
3487 | mdname(mddev)); | 3487 | mdname(mddev)); |
3488 | if (conf) { | 3488 | if (conf) { |
3489 | if (conf->r10bio_pool) | 3489 | mempool_destroy(conf->r10bio_pool); |
3490 | mempool_destroy(conf->r10bio_pool); | ||
3491 | kfree(conf->mirrors); | 3490 | kfree(conf->mirrors); |
3492 | safe_put_page(conf->tmppage); | 3491 | safe_put_page(conf->tmppage); |
3493 | kfree(conf); | 3492 | kfree(conf); |
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) | |||
3682 | 3681 | ||
3683 | out_free_conf: | 3682 | out_free_conf: |
3684 | md_unregister_thread(&mddev->thread); | 3683 | md_unregister_thread(&mddev->thread); |
3685 | if (conf->r10bio_pool) | 3684 | mempool_destroy(conf->r10bio_pool); |
3686 | mempool_destroy(conf->r10bio_pool); | ||
3687 | safe_put_page(conf->tmppage); | 3685 | safe_put_page(conf->tmppage); |
3688 | kfree(conf->mirrors); | 3686 | kfree(conf->mirrors); |
3689 | kfree(conf); | 3687 | kfree(conf); |
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) | |||
3696 | { | 3694 | { |
3697 | struct r10conf *conf = priv; | 3695 | struct r10conf *conf = priv; |
3698 | 3696 | ||
3699 | if (conf->r10bio_pool) | 3697 | mempool_destroy(conf->r10bio_pool); |
3700 | mempool_destroy(conf->r10bio_pool); | ||
3701 | safe_put_page(conf->tmppage); | 3698 | safe_put_page(conf->tmppage); |
3702 | kfree(conf->mirrors); | 3699 | kfree(conf->mirrors); |
3703 | kfree(conf->mirrors_old); | 3700 | kfree(conf->mirrors_old); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) | |||
2271 | drop_one_stripe(conf)) | 2271 | drop_one_stripe(conf)) |
2272 | ; | 2272 | ; |
2273 | 2273 | ||
2274 | if (conf->slab_cache) | 2274 | kmem_cache_destroy(conf->slab_cache); |
2275 | kmem_cache_destroy(conf->slab_cache); | ||
2276 | conf->slab_cache = NULL; | 2275 | conf->slab_cache = NULL; |
2277 | } | 2276 | } |
2278 | 2277 | ||
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3150 | spin_unlock_irq(&sh->stripe_lock); | 3149 | spin_unlock_irq(&sh->stripe_lock); |
3151 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 3150 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
3152 | wake_up(&conf->wait_for_overlap); | 3151 | wake_up(&conf->wait_for_overlap); |
3152 | if (bi) | ||
3153 | s->to_read--; | ||
3153 | while (bi && bi->bi_iter.bi_sector < | 3154 | while (bi && bi->bi_iter.bi_sector < |
3154 | sh->dev[i].sector + STRIPE_SECTORS) { | 3155 | sh->dev[i].sector + STRIPE_SECTORS) { |
3155 | struct bio *nextbi = | 3156 | struct bio *nextbi = |
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3169 | */ | 3170 | */ |
3170 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 3171 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
3171 | } | 3172 | } |
3173 | s->to_write = 0; | ||
3174 | s->written = 0; | ||
3172 | 3175 | ||
3173 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 3176 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
3174 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 3177 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3300 | */ | 3303 | */ |
3301 | return 0; | 3304 | return 0; |
3302 | 3305 | ||
3303 | for (i = 0; i < s->failed; i++) { | 3306 | for (i = 0; i < s->failed && i < 2; i++) { |
3304 | if (fdev[i]->towrite && | 3307 | if (fdev[i]->towrite && |
3305 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3308 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
3306 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) | 3309 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) |
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3324 | sh->sector < sh->raid_conf->mddev->recovery_cp) | 3327 | sh->sector < sh->raid_conf->mddev->recovery_cp) |
3325 | /* reconstruct-write isn't being forced */ | 3328 | /* reconstruct-write isn't being forced */ |
3326 | return 0; | 3329 | return 0; |
3327 | for (i = 0; i < s->failed; i++) { | 3330 | for (i = 0; i < s->failed && i < 2; i++) { |
3328 | if (s->failed_num[i] != sh->pd_idx && | 3331 | if (s->failed_num[i] != sh->pd_idx && |
3329 | s->failed_num[i] != sh->qd_idx && | 3332 | s->failed_num[i] != sh->qd_idx && |
3330 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3333 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |