diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/Makefile | 22 | ||||
-rw-r--r-- | drivers/md/bitmap.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 38 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.h | 8 | ||||
-rw-r--r-- | drivers/md/dm-log-userspace-base.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 16 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 25 | ||||
-rw-r--r-- | drivers/md/dm.c | 27 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 6 | ||||
-rw-r--r-- | drivers/md/raid10.c | 5 | ||||
-rw-r--r-- | drivers/md/raid5.c | 182 | ||||
-rw-r--r-- | drivers/md/raid5.h | 14 | ||||
-rw-r--r-- | drivers/md/raid6altivec.uc | 2 | ||||
-rw-r--r-- | drivers/md/raid6int.uc | 2 | ||||
-rw-r--r-- | drivers/md/raid6test/Makefile | 42 | ||||
-rw-r--r-- | drivers/md/unroll.awk | 20 | ||||
-rw-r--r-- | drivers/md/unroll.pl | 24 |
18 files changed, 236 insertions, 210 deletions
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 1dc4185bd781..e355e7f6a536 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o | |||
46 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 46 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
47 | 47 | ||
48 | quiet_cmd_unroll = UNROLL $@ | 48 | quiet_cmd_unroll = UNROLL $@ |
49 | cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ | 49 | cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ |
50 | < $< > $@ || ( rm -f $@ && exit 1 ) | 50 | < $< > $@ || ( rm -f $@ && exit 1 ) |
51 | 51 | ||
52 | ifeq ($(CONFIG_ALTIVEC),y) | 52 | ifeq ($(CONFIG_ALTIVEC),y) |
@@ -59,56 +59,56 @@ endif | |||
59 | 59 | ||
60 | targets += raid6int1.c | 60 | targets += raid6int1.c |
61 | $(obj)/raid6int1.c: UNROLL := 1 | 61 | $(obj)/raid6int1.c: UNROLL := 1 |
62 | $(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 62 | $(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
63 | $(call if_changed,unroll) | 63 | $(call if_changed,unroll) |
64 | 64 | ||
65 | targets += raid6int2.c | 65 | targets += raid6int2.c |
66 | $(obj)/raid6int2.c: UNROLL := 2 | 66 | $(obj)/raid6int2.c: UNROLL := 2 |
67 | $(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 67 | $(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
68 | $(call if_changed,unroll) | 68 | $(call if_changed,unroll) |
69 | 69 | ||
70 | targets += raid6int4.c | 70 | targets += raid6int4.c |
71 | $(obj)/raid6int4.c: UNROLL := 4 | 71 | $(obj)/raid6int4.c: UNROLL := 4 |
72 | $(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 72 | $(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
73 | $(call if_changed,unroll) | 73 | $(call if_changed,unroll) |
74 | 74 | ||
75 | targets += raid6int8.c | 75 | targets += raid6int8.c |
76 | $(obj)/raid6int8.c: UNROLL := 8 | 76 | $(obj)/raid6int8.c: UNROLL := 8 |
77 | $(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 77 | $(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
78 | $(call if_changed,unroll) | 78 | $(call if_changed,unroll) |
79 | 79 | ||
80 | targets += raid6int16.c | 80 | targets += raid6int16.c |
81 | $(obj)/raid6int16.c: UNROLL := 16 | 81 | $(obj)/raid6int16.c: UNROLL := 16 |
82 | $(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 82 | $(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
83 | $(call if_changed,unroll) | 83 | $(call if_changed,unroll) |
84 | 84 | ||
85 | targets += raid6int32.c | 85 | targets += raid6int32.c |
86 | $(obj)/raid6int32.c: UNROLL := 32 | 86 | $(obj)/raid6int32.c: UNROLL := 32 |
87 | $(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE | 87 | $(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE |
88 | $(call if_changed,unroll) | 88 | $(call if_changed,unroll) |
89 | 89 | ||
90 | CFLAGS_raid6altivec1.o += $(altivec_flags) | 90 | CFLAGS_raid6altivec1.o += $(altivec_flags) |
91 | targets += raid6altivec1.c | 91 | targets += raid6altivec1.c |
92 | $(obj)/raid6altivec1.c: UNROLL := 1 | 92 | $(obj)/raid6altivec1.c: UNROLL := 1 |
93 | $(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 93 | $(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
94 | $(call if_changed,unroll) | 94 | $(call if_changed,unroll) |
95 | 95 | ||
96 | CFLAGS_raid6altivec2.o += $(altivec_flags) | 96 | CFLAGS_raid6altivec2.o += $(altivec_flags) |
97 | targets += raid6altivec2.c | 97 | targets += raid6altivec2.c |
98 | $(obj)/raid6altivec2.c: UNROLL := 2 | 98 | $(obj)/raid6altivec2.c: UNROLL := 2 |
99 | $(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 99 | $(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
100 | $(call if_changed,unroll) | 100 | $(call if_changed,unroll) |
101 | 101 | ||
102 | CFLAGS_raid6altivec4.o += $(altivec_flags) | 102 | CFLAGS_raid6altivec4.o += $(altivec_flags) |
103 | targets += raid6altivec4.c | 103 | targets += raid6altivec4.c |
104 | $(obj)/raid6altivec4.c: UNROLL := 4 | 104 | $(obj)/raid6altivec4.c: UNROLL := 4 |
105 | $(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 105 | $(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
106 | $(call if_changed,unroll) | 106 | $(call if_changed,unroll) |
107 | 107 | ||
108 | CFLAGS_raid6altivec8.o += $(altivec_flags) | 108 | CFLAGS_raid6altivec8.o += $(altivec_flags) |
109 | targets += raid6altivec8.c | 109 | targets += raid6altivec8.c |
110 | $(obj)/raid6altivec8.c: UNROLL := 8 | 110 | $(obj)/raid6altivec8.c: UNROLL := 8 |
111 | $(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE | 111 | $(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE |
112 | $(call if_changed,unroll) | 112 | $(call if_changed,unroll) |
113 | 113 | ||
114 | quiet_cmd_mktable = TABLE $@ | 114 | quiet_cmd_mktable = TABLE $@ |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 6986b0059d23..60e2b322db11 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev) | |||
1624 | bitmap->offset = mddev->bitmap_offset; | 1624 | bitmap->offset = mddev->bitmap_offset; |
1625 | if (file) { | 1625 | if (file) { |
1626 | get_file(file); | 1626 | get_file(file); |
1627 | do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, | 1627 | /* As future accesses to this file will use bmap, |
1628 | SYNC_FILE_RANGE_WAIT_BEFORE | | 1628 | * and bypass the page cache, we must sync the file |
1629 | SYNC_FILE_RANGE_WRITE | | 1629 | * first. |
1630 | SYNC_FILE_RANGE_WAIT_AFTER); | 1630 | */ |
1631 | vfs_fsync(file, file->f_dentry, 1); | ||
1631 | } | 1632 | } |
1632 | /* read superblock from bitmap file (this sets bitmap->chunksize) */ | 1633 | /* read superblock from bitmap file (this sets bitmap->chunksize) */ |
1633 | err = bitmap_read_sb(bitmap); | 1634 | err = bitmap_read_sb(bitmap); |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 556acff3952f..7dbe652efb5a 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type) | |||
138 | } | 138 | } |
139 | EXPORT_SYMBOL(dm_exception_store_type_unregister); | 139 | EXPORT_SYMBOL(dm_exception_store_type_unregister); |
140 | 140 | ||
141 | /* | ||
142 | * Round a number up to the nearest 'size' boundary. size must | ||
143 | * be a power of 2. | ||
144 | */ | ||
145 | static ulong round_up(ulong n, ulong size) | ||
146 | { | ||
147 | size--; | ||
148 | return (n + size) & ~size; | ||
149 | } | ||
150 | |||
151 | static int set_chunk_size(struct dm_exception_store *store, | 141 | static int set_chunk_size(struct dm_exception_store *store, |
152 | const char *chunk_size_arg, char **error) | 142 | const char *chunk_size_arg, char **error) |
153 | { | 143 | { |
@@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
155 | char *value; | 145 | char *value; |
156 | 146 | ||
157 | chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); | 147 | chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); |
158 | if (*chunk_size_arg == '\0' || *value != '\0') { | 148 | if (*chunk_size_arg == '\0' || *value != '\0' || |
149 | chunk_size_ulong > UINT_MAX) { | ||
159 | *error = "Invalid chunk size"; | 150 | *error = "Invalid chunk size"; |
160 | return -EINVAL; | 151 | return -EINVAL; |
161 | } | 152 | } |
@@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
165 | return 0; | 156 | return 0; |
166 | } | 157 | } |
167 | 158 | ||
168 | /* | 159 | return dm_exception_store_set_chunk_size(store, |
169 | * Chunk size must be multiple of page size. Silently | 160 | (unsigned) chunk_size_ulong, |
170 | * round up if it's not. | ||
171 | */ | ||
172 | chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); | ||
173 | |||
174 | return dm_exception_store_set_chunk_size(store, chunk_size_ulong, | ||
175 | error); | 161 | error); |
176 | } | 162 | } |
177 | 163 | ||
178 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | 164 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, |
179 | unsigned long chunk_size_ulong, | 165 | unsigned chunk_size, |
180 | char **error) | 166 | char **error) |
181 | { | 167 | { |
182 | /* Check chunk_size is a power of 2 */ | 168 | /* Check chunk_size is a power of 2 */ |
183 | if (!is_power_of_2(chunk_size_ulong)) { | 169 | if (!is_power_of_2(chunk_size)) { |
184 | *error = "Chunk size is not a power of 2"; | 170 | *error = "Chunk size is not a power of 2"; |
185 | return -EINVAL; | 171 | return -EINVAL; |
186 | } | 172 | } |
187 | 173 | ||
188 | /* Validate the chunk size against the device block size */ | 174 | /* Validate the chunk size against the device block size */ |
189 | if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { | 175 | if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) { |
190 | *error = "Chunk size is not a multiple of device blocksize"; | 176 | *error = "Chunk size is not a multiple of device blocksize"; |
191 | return -EINVAL; | 177 | return -EINVAL; |
192 | } | 178 | } |
193 | 179 | ||
194 | if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { | 180 | if (chunk_size > INT_MAX >> SECTOR_SHIFT) { |
195 | *error = "Chunk size is too high"; | 181 | *error = "Chunk size is too high"; |
196 | return -EINVAL; | 182 | return -EINVAL; |
197 | } | 183 | } |
198 | 184 | ||
199 | store->chunk_size = chunk_size_ulong; | 185 | store->chunk_size = chunk_size; |
200 | store->chunk_mask = chunk_size_ulong - 1; | 186 | store->chunk_mask = chunk_size - 1; |
201 | store->chunk_shift = ffs(chunk_size_ulong) - 1; | 187 | store->chunk_shift = ffs(chunk_size) - 1; |
202 | 188 | ||
203 | return 0; | 189 | return 0; |
204 | } | 190 | } |
@@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
251 | 237 | ||
252 | r = set_chunk_size(tmp_store, argv[2], &ti->error); | 238 | r = set_chunk_size(tmp_store, argv[2], &ti->error); |
253 | if (r) | 239 | if (r) |
254 | goto bad_cow; | 240 | goto bad_ctr; |
255 | 241 | ||
256 | r = type->ctr(tmp_store, 0, NULL); | 242 | r = type->ctr(tmp_store, 0, NULL); |
257 | if (r) { | 243 | if (r) { |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 812c71872ba0..8a223a48802c 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -101,9 +101,9 @@ struct dm_exception_store { | |||
101 | struct dm_dev *cow; | 101 | struct dm_dev *cow; |
102 | 102 | ||
103 | /* Size of data blocks saved - must be a power of 2 */ | 103 | /* Size of data blocks saved - must be a power of 2 */ |
104 | chunk_t chunk_size; | 104 | unsigned chunk_size; |
105 | chunk_t chunk_mask; | 105 | unsigned chunk_mask; |
106 | chunk_t chunk_shift; | 106 | unsigned chunk_shift; |
107 | 107 | ||
108 | void *context; | 108 | void *context; |
109 | }; | 109 | }; |
@@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type); | |||
169 | int dm_exception_store_type_unregister(struct dm_exception_store_type *type); | 169 | int dm_exception_store_type_unregister(struct dm_exception_store_type *type); |
170 | 170 | ||
171 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | 171 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, |
172 | unsigned long chunk_size_ulong, | 172 | unsigned chunk_size, |
173 | char **error); | 173 | char **error); |
174 | 174 | ||
175 | int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | 175 | int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, |
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index 652bd33109e3..7ac2c1450d10 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
156 | } | 156 | } |
157 | 157 | ||
158 | /* The ptr value is sufficient for local unique id */ | 158 | /* The ptr value is sufficient for local unique id */ |
159 | lc->luid = (uint64_t)lc; | 159 | lc->luid = (unsigned long)lc; |
160 | 160 | ||
161 | lc->ti = ti; | 161 | lc->ti = ti; |
162 | 162 | ||
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index d5b2e08750d5..0c746420c008 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
284 | { | 284 | { |
285 | int r; | 285 | int r; |
286 | struct disk_header *dh; | 286 | struct disk_header *dh; |
287 | chunk_t chunk_size; | 287 | unsigned chunk_size; |
288 | int chunk_size_supplied = 1; | 288 | int chunk_size_supplied = 1; |
289 | char *chunk_err; | 289 | char *chunk_err; |
290 | 290 | ||
291 | /* | 291 | /* |
292 | * Use default chunk size (or hardsect_size, if larger) if none supplied | 292 | * Use default chunk size (or logical_block_size, if larger) |
293 | * if none supplied | ||
293 | */ | 294 | */ |
294 | if (!ps->store->chunk_size) { | 295 | if (!ps->store->chunk_size) { |
295 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, | 296 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, |
@@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
334 | return 0; | 335 | return 0; |
335 | 336 | ||
336 | if (chunk_size_supplied) | 337 | if (chunk_size_supplied) |
337 | DMWARN("chunk size %llu in device metadata overrides " | 338 | DMWARN("chunk size %u in device metadata overrides " |
338 | "table chunk size of %llu.", | 339 | "table chunk size of %u.", |
339 | (unsigned long long)chunk_size, | 340 | chunk_size, ps->store->chunk_size); |
340 | (unsigned long long)ps->store->chunk_size); | ||
341 | 341 | ||
342 | /* We had a bogus chunk_size. Fix stuff up. */ | 342 | /* We had a bogus chunk_size. Fix stuff up. */ |
343 | free_area(ps); | 343 | free_area(ps); |
@@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
345 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, | 345 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
346 | &chunk_err); | 346 | &chunk_err); |
347 | if (r) { | 347 | if (r) { |
348 | DMERR("invalid on-disk chunk size %llu: %s.", | 348 | DMERR("invalid on-disk chunk size %u: %s.", |
349 | (unsigned long long)chunk_size, chunk_err); | 349 | chunk_size, chunk_err); |
350 | return r; | 350 | return r; |
351 | } | 351 | } |
352 | 352 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 57f1bf7f3b7a..3a3ba46e6d4b 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o) | |||
296 | */ | 296 | */ |
297 | static int register_snapshot(struct dm_snapshot *snap) | 297 | static int register_snapshot(struct dm_snapshot *snap) |
298 | { | 298 | { |
299 | struct dm_snapshot *l; | ||
299 | struct origin *o, *new_o; | 300 | struct origin *o, *new_o; |
300 | struct block_device *bdev = snap->origin->bdev; | 301 | struct block_device *bdev = snap->origin->bdev; |
301 | 302 | ||
@@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap) | |||
319 | __insert_origin(o); | 320 | __insert_origin(o); |
320 | } | 321 | } |
321 | 322 | ||
322 | list_add_tail(&snap->list, &o->snapshots); | 323 | /* Sort the list according to chunk size, largest-first smallest-last */ |
324 | list_for_each_entry(l, &o->snapshots, list) | ||
325 | if (l->store->chunk_size < snap->store->chunk_size) | ||
326 | break; | ||
327 | list_add_tail(&snap->list, &l->list); | ||
323 | 328 | ||
324 | up_write(&_origins_lock); | 329 | up_write(&_origins_lock); |
325 | return 0; | 330 | return 0; |
@@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
668 | bio_list_init(&s->queued_bios); | 673 | bio_list_init(&s->queued_bios); |
669 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); | 674 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); |
670 | 675 | ||
676 | if (!s->store->chunk_size) { | ||
677 | ti->error = "Chunk size not set"; | ||
678 | goto bad_load_and_register; | ||
679 | } | ||
680 | |||
671 | /* Add snapshot to the list of snapshots for this origin */ | 681 | /* Add snapshot to the list of snapshots for this origin */ |
672 | /* Exceptions aren't triggered till snapshot_resume() is called */ | 682 | /* Exceptions aren't triggered till snapshot_resume() is called */ |
673 | if (register_snapshot(s)) { | 683 | if (register_snapshot(s)) { |
@@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) | |||
951 | 961 | ||
952 | src.bdev = bdev; | 962 | src.bdev = bdev; |
953 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); | 963 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); |
954 | src.count = min(s->store->chunk_size, dev_size - src.sector); | 964 | src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); |
955 | 965 | ||
956 | dest.bdev = s->store->cow->bdev; | 966 | dest.bdev = s->store->cow->bdev; |
957 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); | 967 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); |
@@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | |||
1142 | unsigned sz = 0; | 1152 | unsigned sz = 0; |
1143 | struct dm_snapshot *snap = ti->private; | 1153 | struct dm_snapshot *snap = ti->private; |
1144 | 1154 | ||
1155 | down_write(&snap->lock); | ||
1156 | |||
1145 | switch (type) { | 1157 | switch (type) { |
1146 | case STATUSTYPE_INFO: | 1158 | case STATUSTYPE_INFO: |
1147 | if (!snap->valid) | 1159 | if (!snap->valid) |
@@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | |||
1173 | break; | 1185 | break; |
1174 | } | 1186 | } |
1175 | 1187 | ||
1188 | up_write(&snap->lock); | ||
1189 | |||
1176 | return 0; | 1190 | return 0; |
1177 | } | 1191 | } |
1178 | 1192 | ||
@@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti) | |||
1388 | struct dm_dev *dev = ti->private; | 1402 | struct dm_dev *dev = ti->private; |
1389 | struct dm_snapshot *snap; | 1403 | struct dm_snapshot *snap; |
1390 | struct origin *o; | 1404 | struct origin *o; |
1391 | chunk_t chunk_size = 0; | 1405 | unsigned chunk_size = 0; |
1392 | 1406 | ||
1393 | down_read(&_origins_lock); | 1407 | down_read(&_origins_lock); |
1394 | o = __lookup_origin(dev->bdev); | 1408 | o = __lookup_origin(dev->bdev); |
@@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void) | |||
1465 | r = dm_register_target(&snapshot_target); | 1479 | r = dm_register_target(&snapshot_target); |
1466 | if (r) { | 1480 | if (r) { |
1467 | DMERR("snapshot target register failed %d", r); | 1481 | DMERR("snapshot target register failed %d", r); |
1468 | return r; | 1482 | goto bad_register_snapshot_target; |
1469 | } | 1483 | } |
1470 | 1484 | ||
1471 | r = dm_register_target(&origin_target); | 1485 | r = dm_register_target(&origin_target); |
@@ -1522,6 +1536,9 @@ bad2: | |||
1522 | dm_unregister_target(&origin_target); | 1536 | dm_unregister_target(&origin_target); |
1523 | bad1: | 1537 | bad1: |
1524 | dm_unregister_target(&snapshot_target); | 1538 | dm_unregister_target(&snapshot_target); |
1539 | |||
1540 | bad_register_snapshot_target: | ||
1541 | dm_exception_store_exit(); | ||
1525 | return r; | 1542 | return r; |
1526 | } | 1543 | } |
1527 | 1544 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 23e76fe0d359..724efc63904d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -47,6 +47,7 @@ struct dm_io { | |||
47 | atomic_t io_count; | 47 | atomic_t io_count; |
48 | struct bio *bio; | 48 | struct bio *bio; |
49 | unsigned long start_time; | 49 | unsigned long start_time; |
50 | spinlock_t endio_lock; | ||
50 | }; | 51 | }; |
51 | 52 | ||
52 | /* | 53 | /* |
@@ -130,7 +131,7 @@ struct mapped_device { | |||
130 | /* | 131 | /* |
131 | * A list of ios that arrived while we were suspended. | 132 | * A list of ios that arrived while we were suspended. |
132 | */ | 133 | */ |
133 | atomic_t pending; | 134 | atomic_t pending[2]; |
134 | wait_queue_head_t wait; | 135 | wait_queue_head_t wait; |
135 | struct work_struct work; | 136 | struct work_struct work; |
136 | struct bio_list deferred; | 137 | struct bio_list deferred; |
@@ -453,13 +454,14 @@ static void start_io_acct(struct dm_io *io) | |||
453 | { | 454 | { |
454 | struct mapped_device *md = io->md; | 455 | struct mapped_device *md = io->md; |
455 | int cpu; | 456 | int cpu; |
457 | int rw = bio_data_dir(io->bio); | ||
456 | 458 | ||
457 | io->start_time = jiffies; | 459 | io->start_time = jiffies; |
458 | 460 | ||
459 | cpu = part_stat_lock(); | 461 | cpu = part_stat_lock(); |
460 | part_round_stats(cpu, &dm_disk(md)->part0); | 462 | part_round_stats(cpu, &dm_disk(md)->part0); |
461 | part_stat_unlock(); | 463 | part_stat_unlock(); |
462 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); | 464 | dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); |
463 | } | 465 | } |
464 | 466 | ||
465 | static void end_io_acct(struct dm_io *io) | 467 | static void end_io_acct(struct dm_io *io) |
@@ -479,8 +481,9 @@ static void end_io_acct(struct dm_io *io) | |||
479 | * After this is decremented the bio must not be touched if it is | 481 | * After this is decremented the bio must not be touched if it is |
480 | * a barrier. | 482 | * a barrier. |
481 | */ | 483 | */ |
482 | dm_disk(md)->part0.in_flight = pending = | 484 | dm_disk(md)->part0.in_flight[rw] = pending = |
483 | atomic_dec_return(&md->pending); | 485 | atomic_dec_return(&md->pending[rw]); |
486 | pending += atomic_read(&md->pending[rw^0x1]); | ||
484 | 487 | ||
485 | /* nudge anyone waiting on suspend queue */ | 488 | /* nudge anyone waiting on suspend queue */ |
486 | if (!pending) | 489 | if (!pending) |
@@ -576,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error) | |||
576 | struct mapped_device *md = io->md; | 579 | struct mapped_device *md = io->md; |
577 | 580 | ||
578 | /* Push-back supersedes any I/O errors */ | 581 | /* Push-back supersedes any I/O errors */ |
579 | if (error && !(io->error > 0 && __noflush_suspending(md))) | 582 | if (unlikely(error)) { |
580 | io->error = error; | 583 | spin_lock_irqsave(&io->endio_lock, flags); |
584 | if (!(io->error > 0 && __noflush_suspending(md))) | ||
585 | io->error = error; | ||
586 | spin_unlock_irqrestore(&io->endio_lock, flags); | ||
587 | } | ||
581 | 588 | ||
582 | if (atomic_dec_and_test(&io->io_count)) { | 589 | if (atomic_dec_and_test(&io->io_count)) { |
583 | if (io->error == DM_ENDIO_REQUEUE) { | 590 | if (io->error == DM_ENDIO_REQUEUE) { |
@@ -1224,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
1224 | atomic_set(&ci.io->io_count, 1); | 1231 | atomic_set(&ci.io->io_count, 1); |
1225 | ci.io->bio = bio; | 1232 | ci.io->bio = bio; |
1226 | ci.io->md = md; | 1233 | ci.io->md = md; |
1234 | spin_lock_init(&ci.io->endio_lock); | ||
1227 | ci.sector = bio->bi_sector; | 1235 | ci.sector = bio->bi_sector; |
1228 | ci.sector_count = bio_sectors(bio); | 1236 | ci.sector_count = bio_sectors(bio); |
1229 | if (unlikely(bio_empty_barrier(bio))) | 1237 | if (unlikely(bio_empty_barrier(bio))) |
@@ -1785,7 +1793,8 @@ static struct mapped_device *alloc_dev(int minor) | |||
1785 | if (!md->disk) | 1793 | if (!md->disk) |
1786 | goto bad_disk; | 1794 | goto bad_disk; |
1787 | 1795 | ||
1788 | atomic_set(&md->pending, 0); | 1796 | atomic_set(&md->pending[0], 0); |
1797 | atomic_set(&md->pending[1], 0); | ||
1789 | init_waitqueue_head(&md->wait); | 1798 | init_waitqueue_head(&md->wait); |
1790 | INIT_WORK(&md->work, dm_wq_work); | 1799 | INIT_WORK(&md->work, dm_wq_work); |
1791 | init_waitqueue_head(&md->eventq); | 1800 | init_waitqueue_head(&md->eventq); |
@@ -1819,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
1819 | bad_bdev: | 1828 | bad_bdev: |
1820 | destroy_workqueue(md->wq); | 1829 | destroy_workqueue(md->wq); |
1821 | bad_thread: | 1830 | bad_thread: |
1831 | del_gendisk(md->disk); | ||
1822 | put_disk(md->disk); | 1832 | put_disk(md->disk); |
1823 | bad_disk: | 1833 | bad_disk: |
1824 | blk_cleanup_queue(md->queue); | 1834 | blk_cleanup_queue(md->queue); |
@@ -2088,7 +2098,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2088 | break; | 2098 | break; |
2089 | } | 2099 | } |
2090 | spin_unlock_irqrestore(q->queue_lock, flags); | 2100 | spin_unlock_irqrestore(q->queue_lock, flags); |
2091 | } else if (!atomic_read(&md->pending)) | 2101 | } else if (!atomic_read(&md->pending[0]) && |
2102 | !atomic_read(&md->pending[1])) | ||
2092 | break; | 2103 | break; |
2093 | 2104 | ||
2094 | if (interruptible == TASK_INTERRUPTIBLE && | 2105 | if (interruptible == TASK_INTERRUPTIBLE && |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 26ba42a79129..10eb1fce975e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev) | |||
2631 | rdev->desc_nr = i++; | 2631 | rdev->desc_nr = i++; |
2632 | rdev->raid_disk = rdev->desc_nr; | 2632 | rdev->raid_disk = rdev->desc_nr; |
2633 | set_bit(In_sync, &rdev->flags); | 2633 | set_bit(In_sync, &rdev->flags); |
2634 | } else if (rdev->raid_disk >= mddev->raid_disks) { | 2634 | } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { |
2635 | rdev->raid_disk = -1; | 2635 | rdev->raid_disk = -1; |
2636 | clear_bit(In_sync, &rdev->flags); | 2636 | clear_bit(In_sync, &rdev->flags); |
2637 | } | 2637 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d1b9bd5fd4f6..a053423785c9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | |||
64 | 64 | ||
65 | /* allocate a r1bio with room for raid_disks entries in the bios array */ | 65 | /* allocate a r1bio with room for raid_disks entries in the bios array */ |
66 | r1_bio = kzalloc(size, gfp_flags); | 66 | r1_bio = kzalloc(size, gfp_flags); |
67 | if (!r1_bio) | 67 | if (!r1_bio && pi->mddev) |
68 | unplug_slaves(pi->mddev); | 68 | unplug_slaves(pi->mddev); |
69 | 69 | ||
70 | return r1_bio; | 70 | return r1_bio; |
@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev) | |||
1683 | generic_make_request(bio); | 1683 | generic_make_request(bio); |
1684 | } | 1684 | } |
1685 | } | 1685 | } |
1686 | cond_resched(); | ||
1686 | } | 1687 | } |
1687 | if (unplug) | 1688 | if (unplug) |
1688 | unplug_slaves(mddev); | 1689 | unplug_slaves(mddev); |
@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev) | |||
1978 | conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); | 1979 | conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); |
1979 | if (!conf->poolinfo) | 1980 | if (!conf->poolinfo) |
1980 | goto out_no_mem; | 1981 | goto out_no_mem; |
1981 | conf->poolinfo->mddev = mddev; | 1982 | conf->poolinfo->mddev = NULL; |
1982 | conf->poolinfo->raid_disks = mddev->raid_disks; | 1983 | conf->poolinfo->raid_disks = mddev->raid_disks; |
1983 | conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, | 1984 | conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, |
1984 | r1bio_pool_free, | 1985 | r1bio_pool_free, |
1985 | conf->poolinfo); | 1986 | conf->poolinfo); |
1986 | if (!conf->r1bio_pool) | 1987 | if (!conf->r1bio_pool) |
1987 | goto out_no_mem; | 1988 | goto out_no_mem; |
1989 | conf->poolinfo->mddev = mddev; | ||
1988 | 1990 | ||
1989 | spin_lock_init(&conf->device_lock); | 1991 | spin_lock_init(&conf->device_lock); |
1990 | mddev->queue->queue_lock = &conf->device_lock; | 1992 | mddev->queue->queue_lock = &conf->device_lock; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 51c4c5c4d87a..c2cb7b87b440 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) | |||
68 | 68 | ||
69 | /* allocate a r10bio with room for raid_disks entries in the bios array */ | 69 | /* allocate a r10bio with room for raid_disks entries in the bios array */ |
70 | r10_bio = kzalloc(size, gfp_flags); | 70 | r10_bio = kzalloc(size, gfp_flags); |
71 | if (!r10_bio) | 71 | if (!r10_bio && conf->mddev) |
72 | unplug_slaves(conf->mddev); | 72 | unplug_slaves(conf->mddev); |
73 | 73 | ||
74 | return r10_bio; | 74 | return r10_bio; |
@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev) | |||
1632 | generic_make_request(bio); | 1632 | generic_make_request(bio); |
1633 | } | 1633 | } |
1634 | } | 1634 | } |
1635 | cond_resched(); | ||
1635 | } | 1636 | } |
1636 | if (unplug) | 1637 | if (unplug) |
1637 | unplug_slaves(mddev); | 1638 | unplug_slaves(mddev); |
@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev) | |||
2095 | if (!conf->tmppage) | 2096 | if (!conf->tmppage) |
2096 | goto out_free_conf; | 2097 | goto out_free_conf; |
2097 | 2098 | ||
2098 | conf->mddev = mddev; | ||
2099 | conf->raid_disks = mddev->raid_disks; | 2099 | conf->raid_disks = mddev->raid_disks; |
2100 | conf->near_copies = nc; | 2100 | conf->near_copies = nc; |
2101 | conf->far_copies = fc; | 2101 | conf->far_copies = fc; |
@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev) | |||
2132 | goto out_free_conf; | 2132 | goto out_free_conf; |
2133 | } | 2133 | } |
2134 | 2134 | ||
2135 | conf->mddev = mddev; | ||
2135 | spin_lock_init(&conf->device_lock); | 2136 | spin_lock_init(&conf->device_lock); |
2136 | mddev->queue->queue_lock = &conf->device_lock; | 2137 | mddev->queue->queue_lock = &conf->device_lock; |
2137 | 2138 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 94829804ab7f..81abefc172d9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks) | |||
156 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, | 156 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, |
157 | int *count, int syndrome_disks) | 157 | int *count, int syndrome_disks) |
158 | { | 158 | { |
159 | int slot; | 159 | int slot = *count; |
160 | 160 | ||
161 | if (sh->ddf_layout) | ||
162 | (*count)++; | ||
161 | if (idx == sh->pd_idx) | 163 | if (idx == sh->pd_idx) |
162 | return syndrome_disks; | 164 | return syndrome_disks; |
163 | if (idx == sh->qd_idx) | 165 | if (idx == sh->qd_idx) |
164 | return syndrome_disks + 1; | 166 | return syndrome_disks + 1; |
165 | slot = (*count)++; | 167 | if (!sh->ddf_layout) |
168 | (*count)++; | ||
166 | return slot; | 169 | return slot; |
167 | } | 170 | } |
168 | 171 | ||
@@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) | |||
717 | int i; | 720 | int i; |
718 | 721 | ||
719 | for (i = 0; i < disks; i++) | 722 | for (i = 0; i < disks; i++) |
720 | srcs[i] = (void *)raid6_empty_zero_page; | 723 | srcs[i] = NULL; |
721 | 724 | ||
722 | count = 0; | 725 | count = 0; |
723 | i = d0_idx; | 726 | i = d0_idx; |
@@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) | |||
727 | srcs[slot] = sh->dev[i].page; | 730 | srcs[slot] = sh->dev[i].page; |
728 | i = raid6_next_disk(i, disks); | 731 | i = raid6_next_disk(i, disks); |
729 | } while (i != d0_idx); | 732 | } while (i != d0_idx); |
730 | BUG_ON(count != syndrome_disks); | ||
731 | 733 | ||
732 | return count; | 734 | return syndrome_disks; |
733 | } | 735 | } |
734 | 736 | ||
735 | static struct dma_async_tx_descriptor * | 737 | static struct dma_async_tx_descriptor * |
@@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
814 | * slot number conversion for 'faila' and 'failb' | 816 | * slot number conversion for 'faila' and 'failb' |
815 | */ | 817 | */ |
816 | for (i = 0; i < disks ; i++) | 818 | for (i = 0; i < disks ; i++) |
817 | blocks[i] = (void *)raid6_empty_zero_page; | 819 | blocks[i] = NULL; |
818 | count = 0; | 820 | count = 0; |
819 | i = d0_idx; | 821 | i = d0_idx; |
820 | do { | 822 | do { |
@@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
828 | failb = slot; | 830 | failb = slot; |
829 | i = raid6_next_disk(i, disks); | 831 | i = raid6_next_disk(i, disks); |
830 | } while (i != d0_idx); | 832 | } while (i != d0_idx); |
831 | BUG_ON(count != syndrome_disks); | ||
832 | 833 | ||
833 | BUG_ON(faila == failb); | 834 | BUG_ON(faila == failb); |
834 | if (failb < faila) | 835 | if (failb < faila) |
@@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
845 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, | 846 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
846 | ops_complete_compute, sh, | 847 | ops_complete_compute, sh, |
847 | to_addr_conv(sh, percpu)); | 848 | to_addr_conv(sh, percpu)); |
848 | return async_gen_syndrome(blocks, 0, count+2, | 849 | return async_gen_syndrome(blocks, 0, syndrome_disks+2, |
849 | STRIPE_SIZE, &submit); | 850 | STRIPE_SIZE, &submit); |
850 | } else { | 851 | } else { |
851 | struct page *dest; | 852 | struct page *dest; |
@@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu | |||
1139 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); | 1140 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); |
1140 | } | 1141 | } |
1141 | 1142 | ||
1142 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | 1143 | static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
1143 | { | 1144 | { |
1144 | int overlap_clear = 0, i, disks = sh->disks; | 1145 | int overlap_clear = 0, i, disks = sh->disks; |
1145 | struct dma_async_tx_descriptor *tx = NULL; | 1146 | struct dma_async_tx_descriptor *tx = NULL; |
@@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1204 | put_cpu(); | 1205 | put_cpu(); |
1205 | } | 1206 | } |
1206 | 1207 | ||
1208 | #ifdef CONFIG_MULTICORE_RAID456 | ||
1209 | static void async_run_ops(void *param, async_cookie_t cookie) | ||
1210 | { | ||
1211 | struct stripe_head *sh = param; | ||
1212 | unsigned long ops_request = sh->ops.request; | ||
1213 | |||
1214 | clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); | ||
1215 | wake_up(&sh->ops.wait_for_ops); | ||
1216 | |||
1217 | __raid_run_ops(sh, ops_request); | ||
1218 | release_stripe(sh); | ||
1219 | } | ||
1220 | |||
1221 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | ||
1222 | { | ||
1223 | /* since handle_stripe can be called outside of raid5d context | ||
1224 | * we need to ensure sh->ops.request is de-staged before another | ||
1225 | * request arrives | ||
1226 | */ | ||
1227 | wait_event(sh->ops.wait_for_ops, | ||
1228 | !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); | ||
1229 | sh->ops.request = ops_request; | ||
1230 | |||
1231 | atomic_inc(&sh->count); | ||
1232 | async_schedule(async_run_ops, sh); | ||
1233 | } | ||
1234 | #else | ||
1235 | #define raid_run_ops __raid_run_ops | ||
1236 | #endif | ||
1237 | |||
1207 | static int grow_one_stripe(raid5_conf_t *conf) | 1238 | static int grow_one_stripe(raid5_conf_t *conf) |
1208 | { | 1239 | { |
1209 | struct stripe_head *sh; | 1240 | struct stripe_head *sh; |
1241 | int disks = max(conf->raid_disks, conf->previous_raid_disks); | ||
1210 | sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); | 1242 | sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); |
1211 | if (!sh) | 1243 | if (!sh) |
1212 | return 0; | 1244 | return 0; |
1213 | memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); | 1245 | memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev)); |
1214 | sh->raid_conf = conf; | 1246 | sh->raid_conf = conf; |
1215 | spin_lock_init(&sh->lock); | 1247 | spin_lock_init(&sh->lock); |
1248 | #ifdef CONFIG_MULTICORE_RAID456 | ||
1249 | init_waitqueue_head(&sh->ops.wait_for_ops); | ||
1250 | #endif | ||
1216 | 1251 | ||
1217 | if (grow_buffers(sh, conf->raid_disks)) { | 1252 | if (grow_buffers(sh, disks)) { |
1218 | shrink_buffers(sh, conf->raid_disks); | 1253 | shrink_buffers(sh, disks); |
1219 | kmem_cache_free(conf->slab_cache, sh); | 1254 | kmem_cache_free(conf->slab_cache, sh); |
1220 | return 0; | 1255 | return 0; |
1221 | } | 1256 | } |
1222 | sh->disks = conf->raid_disks; | ||
1223 | /* we just created an active stripe so... */ | 1257 | /* we just created an active stripe so... */ |
1224 | atomic_set(&sh->count, 1); | 1258 | atomic_set(&sh->count, 1); |
1225 | atomic_inc(&conf->active_stripes); | 1259 | atomic_inc(&conf->active_stripes); |
@@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf) | |||
1231 | static int grow_stripes(raid5_conf_t *conf, int num) | 1265 | static int grow_stripes(raid5_conf_t *conf, int num) |
1232 | { | 1266 | { |
1233 | struct kmem_cache *sc; | 1267 | struct kmem_cache *sc; |
1234 | int devs = conf->raid_disks; | 1268 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
1235 | 1269 | ||
1236 | sprintf(conf->cache_name[0], | 1270 | sprintf(conf->cache_name[0], |
1237 | "raid%d-%s", conf->level, mdname(conf->mddev)); | 1271 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
@@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1329 | 1363 | ||
1330 | nsh->raid_conf = conf; | 1364 | nsh->raid_conf = conf; |
1331 | spin_lock_init(&nsh->lock); | 1365 | spin_lock_init(&nsh->lock); |
1366 | #ifdef CONFIG_MULTICORE_RAID456 | ||
1367 | init_waitqueue_head(&nsh->ops.wait_for_ops); | ||
1368 | #endif | ||
1332 | 1369 | ||
1333 | list_add(&nsh->lru, &newstripes); | 1370 | list_add(&nsh->lru, &newstripes); |
1334 | } | 1371 | } |
@@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1899 | case ALGORITHM_PARITY_N: | 1936 | case ALGORITHM_PARITY_N: |
1900 | break; | 1937 | break; |
1901 | case ALGORITHM_ROTATING_N_CONTINUE: | 1938 | case ALGORITHM_ROTATING_N_CONTINUE: |
1939 | /* Like left_symmetric, but P is before Q */ | ||
1902 | if (sh->pd_idx == 0) | 1940 | if (sh->pd_idx == 0) |
1903 | i--; /* P D D D Q */ | 1941 | i--; /* P D D D Q */ |
1904 | else if (i > sh->pd_idx) | 1942 | else { |
1905 | i -= 2; /* D D Q P D */ | 1943 | /* D D Q P D */ |
1944 | if (i < sh->pd_idx) | ||
1945 | i += raid_disks; | ||
1946 | i -= (sh->pd_idx + 1); | ||
1947 | } | ||
1906 | break; | 1948 | break; |
1907 | case ALGORITHM_LEFT_ASYMMETRIC_6: | 1949 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
1908 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | 1950 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
@@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2896 | * | 2938 | * |
2897 | */ | 2939 | */ |
2898 | 2940 | ||
2899 | static bool handle_stripe5(struct stripe_head *sh) | 2941 | static void handle_stripe5(struct stripe_head *sh) |
2900 | { | 2942 | { |
2901 | raid5_conf_t *conf = sh->raid_conf; | 2943 | raid5_conf_t *conf = sh->raid_conf; |
2902 | int disks = sh->disks, i; | 2944 | int disks = sh->disks, i; |
@@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
3167 | ops_run_io(sh, &s); | 3209 | ops_run_io(sh, &s); |
3168 | 3210 | ||
3169 | return_io(return_bi); | 3211 | return_io(return_bi); |
3170 | |||
3171 | return blocked_rdev == NULL; | ||
3172 | } | 3212 | } |
3173 | 3213 | ||
3174 | static bool handle_stripe6(struct stripe_head *sh) | 3214 | static void handle_stripe6(struct stripe_head *sh) |
3175 | { | 3215 | { |
3176 | raid5_conf_t *conf = sh->raid_conf; | 3216 | raid5_conf_t *conf = sh->raid_conf; |
3177 | int disks = sh->disks; | 3217 | int disks = sh->disks; |
@@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh) | |||
3455 | ops_run_io(sh, &s); | 3495 | ops_run_io(sh, &s); |
3456 | 3496 | ||
3457 | return_io(return_bi); | 3497 | return_io(return_bi); |
3458 | |||
3459 | return blocked_rdev == NULL; | ||
3460 | } | 3498 | } |
3461 | 3499 | ||
3462 | /* returns true if the stripe was handled */ | 3500 | static void handle_stripe(struct stripe_head *sh) |
3463 | static bool handle_stripe(struct stripe_head *sh) | ||
3464 | { | 3501 | { |
3465 | if (sh->raid_conf->level == 6) | 3502 | if (sh->raid_conf->level == 6) |
3466 | return handle_stripe6(sh); | 3503 | handle_stripe6(sh); |
3467 | else | 3504 | else |
3468 | return handle_stripe5(sh); | 3505 | handle_stripe5(sh); |
3469 | } | 3506 | } |
3470 | 3507 | ||
3471 | static void raid5_activate_delayed(raid5_conf_t *conf) | 3508 | static void raid5_activate_delayed(raid5_conf_t *conf) |
@@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev) | |||
3503 | { | 3540 | { |
3504 | raid5_conf_t *conf = mddev->private; | 3541 | raid5_conf_t *conf = mddev->private; |
3505 | int i; | 3542 | int i; |
3543 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | ||
3506 | 3544 | ||
3507 | rcu_read_lock(); | 3545 | rcu_read_lock(); |
3508 | for (i = 0; i < conf->raid_disks; i++) { | 3546 | for (i = 0; i < devs; i++) { |
3509 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 3547 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); |
3510 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | 3548 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { |
3511 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | 3549 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); |
@@ -4277,9 +4315,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4277 | clear_bit(STRIPE_INSYNC, &sh->state); | 4315 | clear_bit(STRIPE_INSYNC, &sh->state); |
4278 | spin_unlock(&sh->lock); | 4316 | spin_unlock(&sh->lock); |
4279 | 4317 | ||
4280 | /* wait for any blocked device to be handled */ | 4318 | handle_stripe(sh); |
4281 | while (unlikely(!handle_stripe(sh))) | ||
4282 | ; | ||
4283 | release_stripe(sh); | 4319 | release_stripe(sh); |
4284 | 4320 | ||
4285 | return STRIPE_SECTORS; | 4321 | return STRIPE_SECTORS; |
@@ -4349,37 +4385,6 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4349 | return handled; | 4385 | return handled; |
4350 | } | 4386 | } |
4351 | 4387 | ||
4352 | #ifdef CONFIG_MULTICORE_RAID456 | ||
4353 | static void __process_stripe(void *param, async_cookie_t cookie) | ||
4354 | { | ||
4355 | struct stripe_head *sh = param; | ||
4356 | |||
4357 | handle_stripe(sh); | ||
4358 | release_stripe(sh); | ||
4359 | } | ||
4360 | |||
4361 | static void process_stripe(struct stripe_head *sh, struct list_head *domain) | ||
4362 | { | ||
4363 | async_schedule_domain(__process_stripe, sh, domain); | ||
4364 | } | ||
4365 | |||
4366 | static void synchronize_stripe_processing(struct list_head *domain) | ||
4367 | { | ||
4368 | async_synchronize_full_domain(domain); | ||
4369 | } | ||
4370 | #else | ||
4371 | static void process_stripe(struct stripe_head *sh, struct list_head *domain) | ||
4372 | { | ||
4373 | handle_stripe(sh); | ||
4374 | release_stripe(sh); | ||
4375 | cond_resched(); | ||
4376 | } | ||
4377 | |||
4378 | static void synchronize_stripe_processing(struct list_head *domain) | ||
4379 | { | ||
4380 | } | ||
4381 | #endif | ||
4382 | |||
4383 | 4388 | ||
4384 | /* | 4389 | /* |
4385 | * This is our raid5 kernel thread. | 4390 | * This is our raid5 kernel thread. |
@@ -4393,7 +4398,6 @@ static void raid5d(mddev_t *mddev) | |||
4393 | struct stripe_head *sh; | 4398 | struct stripe_head *sh; |
4394 | raid5_conf_t *conf = mddev->private; | 4399 | raid5_conf_t *conf = mddev->private; |
4395 | int handled; | 4400 | int handled; |
4396 | LIST_HEAD(raid_domain); | ||
4397 | 4401 | ||
4398 | pr_debug("+++ raid5d active\n"); | 4402 | pr_debug("+++ raid5d active\n"); |
4399 | 4403 | ||
@@ -4430,7 +4434,9 @@ static void raid5d(mddev_t *mddev) | |||
4430 | spin_unlock_irq(&conf->device_lock); | 4434 | spin_unlock_irq(&conf->device_lock); |
4431 | 4435 | ||
4432 | handled++; | 4436 | handled++; |
4433 | process_stripe(sh, &raid_domain); | 4437 | handle_stripe(sh); |
4438 | release_stripe(sh); | ||
4439 | cond_resched(); | ||
4434 | 4440 | ||
4435 | spin_lock_irq(&conf->device_lock); | 4441 | spin_lock_irq(&conf->device_lock); |
4436 | } | 4442 | } |
@@ -4438,7 +4444,6 @@ static void raid5d(mddev_t *mddev) | |||
4438 | 4444 | ||
4439 | spin_unlock_irq(&conf->device_lock); | 4445 | spin_unlock_irq(&conf->device_lock); |
4440 | 4446 | ||
4441 | synchronize_stripe_processing(&raid_domain); | ||
4442 | async_tx_issue_pending_all(); | 4447 | async_tx_issue_pending_all(); |
4443 | unplug_slaves(mddev); | 4448 | unplug_slaves(mddev); |
4444 | 4449 | ||
@@ -4558,13 +4563,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
4558 | 4563 | ||
4559 | if (!sectors) | 4564 | if (!sectors) |
4560 | sectors = mddev->dev_sectors; | 4565 | sectors = mddev->dev_sectors; |
4561 | if (!raid_disks) { | 4566 | if (!raid_disks) |
4562 | /* size is defined by the smallest of previous and new size */ | 4567 | /* size is defined by the smallest of previous and new size */ |
4563 | if (conf->raid_disks < conf->previous_raid_disks) | 4568 | raid_disks = min(conf->raid_disks, conf->previous_raid_disks); |
4564 | raid_disks = conf->raid_disks; | ||
4565 | else | ||
4566 | raid_disks = conf->previous_raid_disks; | ||
4567 | } | ||
4568 | 4569 | ||
4569 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); | 4570 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); |
4570 | sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); | 4571 | sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); |
@@ -4665,7 +4666,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) | |||
4665 | } | 4666 | } |
4666 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; | 4667 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; |
4667 | } | 4668 | } |
4668 | scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); | 4669 | scribble = kmalloc(conf->scribble_len, GFP_KERNEL); |
4669 | if (!scribble) { | 4670 | if (!scribble) { |
4670 | err = -ENOMEM; | 4671 | err = -ENOMEM; |
4671 | break; | 4672 | break; |
@@ -4686,7 +4687,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) | |||
4686 | static raid5_conf_t *setup_conf(mddev_t *mddev) | 4687 | static raid5_conf_t *setup_conf(mddev_t *mddev) |
4687 | { | 4688 | { |
4688 | raid5_conf_t *conf; | 4689 | raid5_conf_t *conf; |
4689 | int raid_disk, memory; | 4690 | int raid_disk, memory, max_disks; |
4690 | mdk_rdev_t *rdev; | 4691 | mdk_rdev_t *rdev; |
4691 | struct disk_info *disk; | 4692 | struct disk_info *disk; |
4692 | 4693 | ||
@@ -4722,15 +4723,28 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4722 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); | 4723 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); |
4723 | if (conf == NULL) | 4724 | if (conf == NULL) |
4724 | goto abort; | 4725 | goto abort; |
4726 | spin_lock_init(&conf->device_lock); | ||
4727 | init_waitqueue_head(&conf->wait_for_stripe); | ||
4728 | init_waitqueue_head(&conf->wait_for_overlap); | ||
4729 | INIT_LIST_HEAD(&conf->handle_list); | ||
4730 | INIT_LIST_HEAD(&conf->hold_list); | ||
4731 | INIT_LIST_HEAD(&conf->delayed_list); | ||
4732 | INIT_LIST_HEAD(&conf->bitmap_list); | ||
4733 | INIT_LIST_HEAD(&conf->inactive_list); | ||
4734 | atomic_set(&conf->active_stripes, 0); | ||
4735 | atomic_set(&conf->preread_active_stripes, 0); | ||
4736 | atomic_set(&conf->active_aligned_reads, 0); | ||
4737 | conf->bypass_threshold = BYPASS_THRESHOLD; | ||
4725 | 4738 | ||
4726 | conf->raid_disks = mddev->raid_disks; | 4739 | conf->raid_disks = mddev->raid_disks; |
4727 | conf->scribble_len = scribble_len(conf->raid_disks); | ||
4728 | if (mddev->reshape_position == MaxSector) | 4740 | if (mddev->reshape_position == MaxSector) |
4729 | conf->previous_raid_disks = mddev->raid_disks; | 4741 | conf->previous_raid_disks = mddev->raid_disks; |
4730 | else | 4742 | else |
4731 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; | 4743 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; |
4744 | max_disks = max(conf->raid_disks, conf->previous_raid_disks); | ||
4745 | conf->scribble_len = scribble_len(max_disks); | ||
4732 | 4746 | ||
4733 | conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), | 4747 | conf->disks = kzalloc(max_disks * sizeof(struct disk_info), |
4734 | GFP_KERNEL); | 4748 | GFP_KERNEL); |
4735 | if (!conf->disks) | 4749 | if (!conf->disks) |
4736 | goto abort; | 4750 | goto abort; |
@@ -4744,24 +4758,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4744 | if (raid5_alloc_percpu(conf) != 0) | 4758 | if (raid5_alloc_percpu(conf) != 0) |
4745 | goto abort; | 4759 | goto abort; |
4746 | 4760 | ||
4747 | spin_lock_init(&conf->device_lock); | ||
4748 | init_waitqueue_head(&conf->wait_for_stripe); | ||
4749 | init_waitqueue_head(&conf->wait_for_overlap); | ||
4750 | INIT_LIST_HEAD(&conf->handle_list); | ||
4751 | INIT_LIST_HEAD(&conf->hold_list); | ||
4752 | INIT_LIST_HEAD(&conf->delayed_list); | ||
4753 | INIT_LIST_HEAD(&conf->bitmap_list); | ||
4754 | INIT_LIST_HEAD(&conf->inactive_list); | ||
4755 | atomic_set(&conf->active_stripes, 0); | ||
4756 | atomic_set(&conf->preread_active_stripes, 0); | ||
4757 | atomic_set(&conf->active_aligned_reads, 0); | ||
4758 | conf->bypass_threshold = BYPASS_THRESHOLD; | ||
4759 | |||
4760 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); | 4761 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); |
4761 | 4762 | ||
4762 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 4763 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
4763 | raid_disk = rdev->raid_disk; | 4764 | raid_disk = rdev->raid_disk; |
4764 | if (raid_disk >= conf->raid_disks | 4765 | if (raid_disk >= max_disks |
4765 | || raid_disk < 0) | 4766 | || raid_disk < 0) |
4766 | continue; | 4767 | continue; |
4767 | disk = conf->disks + raid_disk; | 4768 | disk = conf->disks + raid_disk; |
@@ -4793,7 +4794,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4793 | } | 4794 | } |
4794 | 4795 | ||
4795 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + | 4796 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + |
4796 | conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; | 4797 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
4797 | if (grow_stripes(conf, conf->max_nr_stripes)) { | 4798 | if (grow_stripes(conf, conf->max_nr_stripes)) { |
4798 | printk(KERN_ERR | 4799 | printk(KERN_ERR |
4799 | "raid5: couldn't allocate %dkB for buffers\n", memory); | 4800 | "raid5: couldn't allocate %dkB for buffers\n", memory); |
@@ -4918,7 +4919,8 @@ static int run(mddev_t *mddev) | |||
4918 | test_bit(In_sync, &rdev->flags)) | 4919 | test_bit(In_sync, &rdev->flags)) |
4919 | working_disks++; | 4920 | working_disks++; |
4920 | 4921 | ||
4921 | mddev->degraded = conf->raid_disks - working_disks; | 4922 | mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) |
4923 | - working_disks); | ||
4922 | 4924 | ||
4923 | if (mddev->degraded > conf->max_degraded) { | 4925 | if (mddev->degraded > conf->max_degraded) { |
4924 | printk(KERN_ERR "raid5: not enough operational devices for %s" | 4926 | printk(KERN_ERR "raid5: not enough operational devices for %s" |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2390e0e83daf..dd708359b451 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -214,12 +214,20 @@ struct stripe_head { | |||
214 | int disks; /* disks in stripe */ | 214 | int disks; /* disks in stripe */ |
215 | enum check_states check_state; | 215 | enum check_states check_state; |
216 | enum reconstruct_states reconstruct_state; | 216 | enum reconstruct_states reconstruct_state; |
217 | /* stripe_operations | 217 | /** |
218 | * struct stripe_operations | ||
218 | * @target - STRIPE_OP_COMPUTE_BLK target | 219 | * @target - STRIPE_OP_COMPUTE_BLK target |
220 | * @target2 - 2nd compute target in the raid6 case | ||
221 | * @zero_sum_result - P and Q verification flags | ||
222 | * @request - async service request flags for raid_run_ops | ||
219 | */ | 223 | */ |
220 | struct stripe_operations { | 224 | struct stripe_operations { |
221 | int target, target2; | 225 | int target, target2; |
222 | enum sum_check_flags zero_sum_result; | 226 | enum sum_check_flags zero_sum_result; |
227 | #ifdef CONFIG_MULTICORE_RAID456 | ||
228 | unsigned long request; | ||
229 | wait_queue_head_t wait_for_ops; | ||
230 | #endif | ||
223 | } ops; | 231 | } ops; |
224 | struct r5dev { | 232 | struct r5dev { |
225 | struct bio req; | 233 | struct bio req; |
@@ -294,6 +302,8 @@ struct r6_state { | |||
294 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ | 302 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ |
295 | #define STRIPE_BIOFILL_RUN 14 | 303 | #define STRIPE_BIOFILL_RUN 14 |
296 | #define STRIPE_COMPUTE_RUN 15 | 304 | #define STRIPE_COMPUTE_RUN 15 |
305 | #define STRIPE_OPS_REQ_PENDING 16 | ||
306 | |||
297 | /* | 307 | /* |
298 | * Operation request flags | 308 | * Operation request flags |
299 | */ | 309 | */ |
@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout) | |||
478 | { | 488 | { |
479 | return (layout >= 0 && layout <= 5) | 489 | return (layout >= 0 && layout <= 5) |
480 | || | 490 | || |
481 | (layout == 8 || layout == 10) | 491 | (layout >= 8 && layout <= 10) |
482 | || | 492 | || |
483 | (layout >= 16 && layout <= 20); | 493 | (layout >= 16 && layout <= 20); |
484 | } | 494 | } |
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc index 699dfeee4944..2654d5c854be 100644 --- a/drivers/md/raid6altivec.uc +++ b/drivers/md/raid6altivec.uc | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |
18 | * This file is postprocessed using unroll.pl | 18 | * This file is postprocessed using unroll.awk |
19 | * | 19 | * |
20 | * <benh> hpa: in process, | 20 | * <benh> hpa: in process, |
21 | * you can just "steal" the vec unit with enable_kernel_altivec() (but | 21 | * you can just "steal" the vec unit with enable_kernel_altivec() (but |
diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc index f9bf9cba357f..d1e276a14fab 100644 --- a/drivers/md/raid6int.uc +++ b/drivers/md/raid6int.uc | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |
18 | * This file is postprocessed using unroll.pl | 18 | * This file is postprocessed using unroll.awk |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/raid/pq.h> | 21 | #include <linux/raid/pq.h> |
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile index 58ffdf4f5161..2874cbef529d 100644 --- a/drivers/md/raid6test/Makefile +++ b/drivers/md/raid6test/Makefile | |||
@@ -7,7 +7,7 @@ CC = gcc | |||
7 | OPTFLAGS = -O2 # Adjust as desired | 7 | OPTFLAGS = -O2 # Adjust as desired |
8 | CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) | 8 | CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) |
9 | LD = ld | 9 | LD = ld |
10 | PERL = perl | 10 | AWK = awk |
11 | AR = ar | 11 | AR = ar |
12 | RANLIB = ranlib | 12 | RANLIB = ranlib |
13 | 13 | ||
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ | |||
35 | raid6test: test.c raid6.a | 35 | raid6test: test.c raid6.a |
36 | $(CC) $(CFLAGS) -o raid6test $^ | 36 | $(CC) $(CFLAGS) -o raid6test $^ |
37 | 37 | ||
38 | raid6altivec1.c: raid6altivec.uc ../unroll.pl | 38 | raid6altivec1.c: raid6altivec.uc ../unroll.awk |
39 | $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ | 39 | $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@ |
40 | 40 | ||
41 | raid6altivec2.c: raid6altivec.uc ../unroll.pl | 41 | raid6altivec2.c: raid6altivec.uc ../unroll.awk |
42 | $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ | 42 | $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@ |
43 | 43 | ||
44 | raid6altivec4.c: raid6altivec.uc ../unroll.pl | 44 | raid6altivec4.c: raid6altivec.uc ../unroll.awk |
45 | $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ | 45 | $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@ |
46 | 46 | ||
47 | raid6altivec8.c: raid6altivec.uc ../unroll.pl | 47 | raid6altivec8.c: raid6altivec.uc ../unroll.awk |
48 | $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ | 48 | $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@ |
49 | 49 | ||
50 | raid6int1.c: raid6int.uc ../unroll.pl | 50 | raid6int1.c: raid6int.uc ../unroll.awk |
51 | $(PERL) ../unroll.pl 1 < raid6int.uc > $@ | 51 | $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@ |
52 | 52 | ||
53 | raid6int2.c: raid6int.uc ../unroll.pl | 53 | raid6int2.c: raid6int.uc ../unroll.awk |
54 | $(PERL) ../unroll.pl 2 < raid6int.uc > $@ | 54 | $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@ |
55 | 55 | ||
56 | raid6int4.c: raid6int.uc ../unroll.pl | 56 | raid6int4.c: raid6int.uc ../unroll.awk |
57 | $(PERL) ../unroll.pl 4 < raid6int.uc > $@ | 57 | $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@ |
58 | 58 | ||
59 | raid6int8.c: raid6int.uc ../unroll.pl | 59 | raid6int8.c: raid6int.uc ../unroll.awk |
60 | $(PERL) ../unroll.pl 8 < raid6int.uc > $@ | 60 | $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@ |
61 | 61 | ||
62 | raid6int16.c: raid6int.uc ../unroll.pl | 62 | raid6int16.c: raid6int.uc ../unroll.awk |
63 | $(PERL) ../unroll.pl 16 < raid6int.uc > $@ | 63 | $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@ |
64 | 64 | ||
65 | raid6int32.c: raid6int.uc ../unroll.pl | 65 | raid6int32.c: raid6int.uc ../unroll.awk |
66 | $(PERL) ../unroll.pl 32 < raid6int.uc > $@ | 66 | $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@ |
67 | 67 | ||
68 | raid6tables.c: mktables | 68 | raid6tables.c: mktables |
69 | ./mktables > raid6tables.c | 69 | ./mktables > raid6tables.c |
diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk new file mode 100644 index 000000000000..c6aa03631df8 --- /dev/null +++ b/drivers/md/unroll.awk | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | # This filter requires one command line option of form -vN=n | ||
3 | # where n must be a decimal number. | ||
4 | # | ||
5 | # Repeat each input line containing $$ n times, replacing $$ with 0...n-1. | ||
6 | # Replace each $# with n, and each $* with a single $. | ||
7 | |||
8 | BEGIN { | ||
9 | n = N + 0 | ||
10 | } | ||
11 | { | ||
12 | if (/\$\$/) { rep = n } else { rep = 1 } | ||
13 | for (i = 0; i < rep; ++i) { | ||
14 | tmp = $0 | ||
15 | gsub(/\$\$/, i, tmp) | ||
16 | gsub(/\$\#/, n, tmp) | ||
17 | gsub(/\$\*/, "$", tmp) | ||
18 | print tmp | ||
19 | } | ||
20 | } | ||
diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl deleted file mode 100644 index 3acc710a20ea..000000000000 --- a/drivers/md/unroll.pl +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Take a piece of C code and for each line which contains the sequence $$ | ||
4 | # repeat n times with $ replaced by 0...n-1; the sequence $# is replaced | ||
5 | # by the unrolling factor, and $* with a single $ | ||
6 | # | ||
7 | |||
8 | ($n) = @ARGV; | ||
9 | $n += 0; | ||
10 | |||
11 | while ( defined($line = <STDIN>) ) { | ||
12 | if ( $line =~ /\$\$/ ) { | ||
13 | $rep = $n; | ||
14 | } else { | ||
15 | $rep = 1; | ||
16 | } | ||
17 | for ( $i = 0 ; $i < $rep ; $i++ ) { | ||
18 | $tmp = $line; | ||
19 | $tmp =~ s/\$\$/$i/g; | ||
20 | $tmp =~ s/\$\#/$n/g; | ||
21 | $tmp =~ s/\$\*/\$/g; | ||
22 | print $tmp; | ||
23 | } | ||
24 | } | ||