diff options
author | NeilBrown <neilb@suse.de> | 2009-06-16 02:54:21 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-06-16 02:54:21 -0400 |
commit | 070ec55d07157a3041f92654135c3c6e2eaaf901 (patch) | |
tree | 10f24d859e669ba4a671204ce4176a2b43fdaae5 /drivers/md | |
parent | a6b3deafe0c50e3e873e8ed5cc8abfcb25c05eff (diff) |
md: remove mddev_to_conf "helper" macro
Having a macro just to cast a void* isn't really helpful.
I would must rather see that we are simply de-referencing ->private,
than have to know what the macro does.
So open code the macro everywhere and remove the pointless cast.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/linear.c | 12 | ||||
-rw-r--r-- | drivers/md/linear.h | 2 | ||||
-rw-r--r-- | drivers/md/multipath.c | 20 | ||||
-rw-r--r-- | drivers/md/multipath.h | 6 | ||||
-rw-r--r-- | drivers/md/raid0.c | 10 | ||||
-rw-r--r-- | drivers/md/raid0.h | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 38 | ||||
-rw-r--r-- | drivers/md/raid1.h | 6 | ||||
-rw-r--r-- | drivers/md/raid10.c | 42 | ||||
-rw-r--r-- | drivers/md/raid10.h | 6 | ||||
-rw-r--r-- | drivers/md/raid5.c | 36 | ||||
-rw-r--r-- | drivers/md/raid5.h | 2 |
12 files changed, 79 insertions, 103 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 64f1f3e046e0..31f8ec7131bd 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) | 28 | static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) |
29 | { | 29 | { |
30 | dev_info_t *hash; | 30 | dev_info_t *hash; |
31 | linear_conf_t *conf = mddev_to_conf(mddev); | 31 | linear_conf_t *conf = mddev->private; |
32 | sector_t idx = sector >> conf->sector_shift; | 32 | sector_t idx = sector >> conf->sector_shift; |
33 | 33 | ||
34 | /* | 34 | /* |
@@ -79,7 +79,7 @@ static int linear_mergeable_bvec(struct request_queue *q, | |||
79 | static void linear_unplug(struct request_queue *q) | 79 | static void linear_unplug(struct request_queue *q) |
80 | { | 80 | { |
81 | mddev_t *mddev = q->queuedata; | 81 | mddev_t *mddev = q->queuedata; |
82 | linear_conf_t *conf = mddev_to_conf(mddev); | 82 | linear_conf_t *conf = mddev->private; |
83 | int i; | 83 | int i; |
84 | 84 | ||
85 | for (i=0; i < mddev->raid_disks; i++) { | 85 | for (i=0; i < mddev->raid_disks; i++) { |
@@ -91,7 +91,7 @@ static void linear_unplug(struct request_queue *q) | |||
91 | static int linear_congested(void *data, int bits) | 91 | static int linear_congested(void *data, int bits) |
92 | { | 92 | { |
93 | mddev_t *mddev = data; | 93 | mddev_t *mddev = data; |
94 | linear_conf_t *conf = mddev_to_conf(mddev); | 94 | linear_conf_t *conf = mddev->private; |
95 | int i, ret = 0; | 95 | int i, ret = 0; |
96 | 96 | ||
97 | for (i = 0; i < mddev->raid_disks && !ret ; i++) { | 97 | for (i = 0; i < mddev->raid_disks && !ret ; i++) { |
@@ -103,7 +103,7 @@ static int linear_congested(void *data, int bits) | |||
103 | 103 | ||
104 | static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 104 | static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) |
105 | { | 105 | { |
106 | linear_conf_t *conf = mddev_to_conf(mddev); | 106 | linear_conf_t *conf = mddev->private; |
107 | 107 | ||
108 | WARN_ONCE(sectors || raid_disks, | 108 | WARN_ONCE(sectors || raid_disks, |
109 | "%s does not support generic reshape\n", __func__); | 109 | "%s does not support generic reshape\n", __func__); |
@@ -294,7 +294,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | |||
294 | if (!newconf) | 294 | if (!newconf) |
295 | return -ENOMEM; | 295 | return -ENOMEM; |
296 | 296 | ||
297 | newconf->prev = mddev_to_conf(mddev); | 297 | newconf->prev = mddev->private; |
298 | mddev->private = newconf; | 298 | mddev->private = newconf; |
299 | mddev->raid_disks++; | 299 | mddev->raid_disks++; |
300 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); | 300 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
@@ -304,7 +304,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | |||
304 | 304 | ||
305 | static int linear_stop (mddev_t *mddev) | 305 | static int linear_stop (mddev_t *mddev) |
306 | { | 306 | { |
307 | linear_conf_t *conf = mddev_to_conf(mddev); | 307 | linear_conf_t *conf = mddev->private; |
308 | 308 | ||
309 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 309 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
310 | do { | 310 | do { |
diff --git a/drivers/md/linear.h b/drivers/md/linear.h index bf8179587f95..76078f1cded0 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h | |||
@@ -24,6 +24,4 @@ struct linear_private_data | |||
24 | 24 | ||
25 | typedef struct linear_private_data linear_conf_t; | 25 | typedef struct linear_private_data linear_conf_t; |
26 | 26 | ||
27 | #define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) | ||
28 | |||
29 | #endif | 27 | #endif |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 4ee31aa13c40..c1ca63f278a9 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -58,7 +58,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | |||
58 | { | 58 | { |
59 | unsigned long flags; | 59 | unsigned long flags; |
60 | mddev_t *mddev = mp_bh->mddev; | 60 | mddev_t *mddev = mp_bh->mddev; |
61 | multipath_conf_t *conf = mddev_to_conf(mddev); | 61 | multipath_conf_t *conf = mddev->private; |
62 | 62 | ||
63 | spin_lock_irqsave(&conf->device_lock, flags); | 63 | spin_lock_irqsave(&conf->device_lock, flags); |
64 | list_add(&mp_bh->retry_list, &conf->retry_list); | 64 | list_add(&mp_bh->retry_list, &conf->retry_list); |
@@ -75,7 +75,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | |||
75 | static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) | 75 | static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) |
76 | { | 76 | { |
77 | struct bio *bio = mp_bh->master_bio; | 77 | struct bio *bio = mp_bh->master_bio; |
78 | multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); | 78 | multipath_conf_t *conf = mp_bh->mddev->private; |
79 | 79 | ||
80 | bio_endio(bio, err); | 80 | bio_endio(bio, err); |
81 | mempool_free(mp_bh, conf->pool); | 81 | mempool_free(mp_bh, conf->pool); |
@@ -85,7 +85,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
85 | { | 85 | { |
86 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 86 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
87 | struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); | 87 | struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); |
88 | multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); | 88 | multipath_conf_t *conf = mp_bh->mddev->private; |
89 | mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; | 89 | mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; |
90 | 90 | ||
91 | if (uptodate) | 91 | if (uptodate) |
@@ -107,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
107 | 107 | ||
108 | static void unplug_slaves(mddev_t *mddev) | 108 | static void unplug_slaves(mddev_t *mddev) |
109 | { | 109 | { |
110 | multipath_conf_t *conf = mddev_to_conf(mddev); | 110 | multipath_conf_t *conf = mddev->private; |
111 | int i; | 111 | int i; |
112 | 112 | ||
113 | rcu_read_lock(); | 113 | rcu_read_lock(); |
@@ -138,7 +138,7 @@ static void multipath_unplug(struct request_queue *q) | |||
138 | static int multipath_make_request (struct request_queue *q, struct bio * bio) | 138 | static int multipath_make_request (struct request_queue *q, struct bio * bio) |
139 | { | 139 | { |
140 | mddev_t *mddev = q->queuedata; | 140 | mddev_t *mddev = q->queuedata; |
141 | multipath_conf_t *conf = mddev_to_conf(mddev); | 141 | multipath_conf_t *conf = mddev->private; |
142 | struct multipath_bh * mp_bh; | 142 | struct multipath_bh * mp_bh; |
143 | struct multipath_info *multipath; | 143 | struct multipath_info *multipath; |
144 | const int rw = bio_data_dir(bio); | 144 | const int rw = bio_data_dir(bio); |
@@ -180,7 +180,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
180 | 180 | ||
181 | static void multipath_status (struct seq_file *seq, mddev_t *mddev) | 181 | static void multipath_status (struct seq_file *seq, mddev_t *mddev) |
182 | { | 182 | { |
183 | multipath_conf_t *conf = mddev_to_conf(mddev); | 183 | multipath_conf_t *conf = mddev->private; |
184 | int i; | 184 | int i; |
185 | 185 | ||
186 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, | 186 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, |
@@ -195,7 +195,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) | |||
195 | static int multipath_congested(void *data, int bits) | 195 | static int multipath_congested(void *data, int bits) |
196 | { | 196 | { |
197 | mddev_t *mddev = data; | 197 | mddev_t *mddev = data; |
198 | multipath_conf_t *conf = mddev_to_conf(mddev); | 198 | multipath_conf_t *conf = mddev->private; |
199 | int i, ret = 0; | 199 | int i, ret = 0; |
200 | 200 | ||
201 | rcu_read_lock(); | 201 | rcu_read_lock(); |
@@ -220,7 +220,7 @@ static int multipath_congested(void *data, int bits) | |||
220 | */ | 220 | */ |
221 | static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) | 221 | static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) |
222 | { | 222 | { |
223 | multipath_conf_t *conf = mddev_to_conf(mddev); | 223 | multipath_conf_t *conf = mddev->private; |
224 | 224 | ||
225 | if (conf->working_disks <= 1) { | 225 | if (conf->working_disks <= 1) { |
226 | /* | 226 | /* |
@@ -367,7 +367,7 @@ static void multipathd (mddev_t *mddev) | |||
367 | struct multipath_bh *mp_bh; | 367 | struct multipath_bh *mp_bh; |
368 | struct bio *bio; | 368 | struct bio *bio; |
369 | unsigned long flags; | 369 | unsigned long flags; |
370 | multipath_conf_t *conf = mddev_to_conf(mddev); | 370 | multipath_conf_t *conf = mddev->private; |
371 | struct list_head *head = &conf->retry_list; | 371 | struct list_head *head = &conf->retry_list; |
372 | 372 | ||
373 | md_check_recovery(mddev); | 373 | md_check_recovery(mddev); |
@@ -531,7 +531,7 @@ out: | |||
531 | 531 | ||
532 | static int multipath_stop (mddev_t *mddev) | 532 | static int multipath_stop (mddev_t *mddev) |
533 | { | 533 | { |
534 | multipath_conf_t *conf = mddev_to_conf(mddev); | 534 | multipath_conf_t *conf = mddev->private; |
535 | 535 | ||
536 | md_unregister_thread(mddev->thread); | 536 | md_unregister_thread(mddev->thread); |
537 | mddev->thread = NULL; | 537 | mddev->thread = NULL; |
diff --git a/drivers/md/multipath.h b/drivers/md/multipath.h index 6fa70b400cda..d1c2a8d78395 100644 --- a/drivers/md/multipath.h +++ b/drivers/md/multipath.h | |||
@@ -19,12 +19,6 @@ struct multipath_private_data { | |||
19 | typedef struct multipath_private_data multipath_conf_t; | 19 | typedef struct multipath_private_data multipath_conf_t; |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * this is the only point in the RAID code where we violate | ||
23 | * C type safety. mddev->private is an 'opaque' pointer. | ||
24 | */ | ||
25 | #define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) | ||
26 | |||
27 | /* | ||
28 | * this is our 'private' 'collective' MULTIPATH buffer head. | 22 | * this is our 'private' 'collective' MULTIPATH buffer head. |
29 | * it contains information about what kind of IO operations were started | 23 | * it contains information about what kind of IO operations were started |
30 | * for this MULTIPATH operation, and about their status: | 24 | * for this MULTIPATH operation, and about their status: |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e2e9c1833336..77764dad1bcb 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -26,7 +26,7 @@ | |||
26 | static void raid0_unplug(struct request_queue *q) | 26 | static void raid0_unplug(struct request_queue *q) |
27 | { | 27 | { |
28 | mddev_t *mddev = q->queuedata; | 28 | mddev_t *mddev = q->queuedata; |
29 | raid0_conf_t *conf = mddev_to_conf(mddev); | 29 | raid0_conf_t *conf = mddev->private; |
30 | mdk_rdev_t **devlist = conf->devlist; | 30 | mdk_rdev_t **devlist = conf->devlist; |
31 | int i; | 31 | int i; |
32 | 32 | ||
@@ -40,7 +40,7 @@ static void raid0_unplug(struct request_queue *q) | |||
40 | static int raid0_congested(void *data, int bits) | 40 | static int raid0_congested(void *data, int bits) |
41 | { | 41 | { |
42 | mddev_t *mddev = data; | 42 | mddev_t *mddev = data; |
43 | raid0_conf_t *conf = mddev_to_conf(mddev); | 43 | raid0_conf_t *conf = mddev->private; |
44 | mdk_rdev_t **devlist = conf->devlist; | 44 | mdk_rdev_t **devlist = conf->devlist; |
45 | int i, ret = 0; | 45 | int i, ret = 0; |
46 | 46 | ||
@@ -294,7 +294,7 @@ static int raid0_run(mddev_t *mddev) | |||
294 | 294 | ||
295 | static int raid0_stop(mddev_t *mddev) | 295 | static int raid0_stop(mddev_t *mddev) |
296 | { | 296 | { |
297 | raid0_conf_t *conf = mddev_to_conf(mddev); | 297 | raid0_conf_t *conf = mddev->private; |
298 | 298 | ||
299 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 299 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
300 | kfree(conf->strip_zone); | 300 | kfree(conf->strip_zone); |
@@ -327,7 +327,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
327 | { | 327 | { |
328 | mddev_t *mddev = q->queuedata; | 328 | mddev_t *mddev = q->queuedata; |
329 | unsigned int sect_in_chunk, chunksect_bits, chunk_sects; | 329 | unsigned int sect_in_chunk, chunksect_bits, chunk_sects; |
330 | raid0_conf_t *conf = mddev_to_conf(mddev); | 330 | raid0_conf_t *conf = mddev->private; |
331 | struct strip_zone *zone; | 331 | struct strip_zone *zone; |
332 | mdk_rdev_t *tmp_dev; | 332 | mdk_rdev_t *tmp_dev; |
333 | sector_t chunk; | 333 | sector_t chunk; |
@@ -406,7 +406,7 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev) | |||
406 | #ifdef MD_DEBUG | 406 | #ifdef MD_DEBUG |
407 | int j, k, h; | 407 | int j, k, h; |
408 | char b[BDEVNAME_SIZE]; | 408 | char b[BDEVNAME_SIZE]; |
409 | raid0_conf_t *conf = mddev_to_conf(mddev); | 409 | raid0_conf_t *conf = mddev->private; |
410 | 410 | ||
411 | h = 0; | 411 | h = 0; |
412 | for (j = 0; j < conf->nr_strip_zones; j++) { | 412 | for (j = 0; j < conf->nr_strip_zones; j++) { |
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 7b3605e570c0..91f8e876ee64 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h | |||
@@ -17,6 +17,4 @@ struct raid0_private_data | |||
17 | 17 | ||
18 | typedef struct raid0_private_data raid0_conf_t; | 18 | typedef struct raid0_private_data raid0_conf_t; |
19 | 19 | ||
20 | #define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) | ||
21 | |||
22 | #endif | 20 | #endif |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e23758b4a34e..5ea5bca53a5e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -182,7 +182,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) | |||
182 | 182 | ||
183 | static void free_r1bio(r1bio_t *r1_bio) | 183 | static void free_r1bio(r1bio_t *r1_bio) |
184 | { | 184 | { |
185 | conf_t *conf = mddev_to_conf(r1_bio->mddev); | 185 | conf_t *conf = r1_bio->mddev->private; |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Wake up any possible resync thread that waits for the device | 188 | * Wake up any possible resync thread that waits for the device |
@@ -196,7 +196,7 @@ static void free_r1bio(r1bio_t *r1_bio) | |||
196 | 196 | ||
197 | static void put_buf(r1bio_t *r1_bio) | 197 | static void put_buf(r1bio_t *r1_bio) |
198 | { | 198 | { |
199 | conf_t *conf = mddev_to_conf(r1_bio->mddev); | 199 | conf_t *conf = r1_bio->mddev->private; |
200 | int i; | 200 | int i; |
201 | 201 | ||
202 | for (i=0; i<conf->raid_disks; i++) { | 202 | for (i=0; i<conf->raid_disks; i++) { |
@@ -214,7 +214,7 @@ static void reschedule_retry(r1bio_t *r1_bio) | |||
214 | { | 214 | { |
215 | unsigned long flags; | 215 | unsigned long flags; |
216 | mddev_t *mddev = r1_bio->mddev; | 216 | mddev_t *mddev = r1_bio->mddev; |
217 | conf_t *conf = mddev_to_conf(mddev); | 217 | conf_t *conf = mddev->private; |
218 | 218 | ||
219 | spin_lock_irqsave(&conf->device_lock, flags); | 219 | spin_lock_irqsave(&conf->device_lock, flags); |
220 | list_add(&r1_bio->retry_list, &conf->retry_list); | 220 | list_add(&r1_bio->retry_list, &conf->retry_list); |
@@ -253,7 +253,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio) | |||
253 | */ | 253 | */ |
254 | static inline void update_head_pos(int disk, r1bio_t *r1_bio) | 254 | static inline void update_head_pos(int disk, r1bio_t *r1_bio) |
255 | { | 255 | { |
256 | conf_t *conf = mddev_to_conf(r1_bio->mddev); | 256 | conf_t *conf = r1_bio->mddev->private; |
257 | 257 | ||
258 | conf->mirrors[disk].head_position = | 258 | conf->mirrors[disk].head_position = |
259 | r1_bio->sector + (r1_bio->sectors); | 259 | r1_bio->sector + (r1_bio->sectors); |
@@ -264,7 +264,7 @@ static void raid1_end_read_request(struct bio *bio, int error) | |||
264 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 264 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
265 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); | 265 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); |
266 | int mirror; | 266 | int mirror; |
267 | conf_t *conf = mddev_to_conf(r1_bio->mddev); | 267 | conf_t *conf = r1_bio->mddev->private; |
268 | 268 | ||
269 | mirror = r1_bio->read_disk; | 269 | mirror = r1_bio->read_disk; |
270 | /* | 270 | /* |
@@ -309,7 +309,7 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
309 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 309 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
310 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); | 310 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); |
311 | int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); | 311 | int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); |
312 | conf_t *conf = mddev_to_conf(r1_bio->mddev); | 312 | conf_t *conf = r1_bio->mddev->private; |
313 | struct bio *to_put = NULL; | 313 | struct bio *to_put = NULL; |
314 | 314 | ||
315 | 315 | ||
@@ -541,7 +541,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) | |||
541 | 541 | ||
542 | static void unplug_slaves(mddev_t *mddev) | 542 | static void unplug_slaves(mddev_t *mddev) |
543 | { | 543 | { |
544 | conf_t *conf = mddev_to_conf(mddev); | 544 | conf_t *conf = mddev->private; |
545 | int i; | 545 | int i; |
546 | 546 | ||
547 | rcu_read_lock(); | 547 | rcu_read_lock(); |
@@ -573,7 +573,7 @@ static void raid1_unplug(struct request_queue *q) | |||
573 | static int raid1_congested(void *data, int bits) | 573 | static int raid1_congested(void *data, int bits) |
574 | { | 574 | { |
575 | mddev_t *mddev = data; | 575 | mddev_t *mddev = data; |
576 | conf_t *conf = mddev_to_conf(mddev); | 576 | conf_t *conf = mddev->private; |
577 | int i, ret = 0; | 577 | int i, ret = 0; |
578 | 578 | ||
579 | rcu_read_lock(); | 579 | rcu_read_lock(); |
@@ -772,7 +772,7 @@ do_sync_io: | |||
772 | static int make_request(struct request_queue *q, struct bio * bio) | 772 | static int make_request(struct request_queue *q, struct bio * bio) |
773 | { | 773 | { |
774 | mddev_t *mddev = q->queuedata; | 774 | mddev_t *mddev = q->queuedata; |
775 | conf_t *conf = mddev_to_conf(mddev); | 775 | conf_t *conf = mddev->private; |
776 | mirror_info_t *mirror; | 776 | mirror_info_t *mirror; |
777 | r1bio_t *r1_bio; | 777 | r1bio_t *r1_bio; |
778 | struct bio *read_bio; | 778 | struct bio *read_bio; |
@@ -991,7 +991,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
991 | 991 | ||
992 | static void status(struct seq_file *seq, mddev_t *mddev) | 992 | static void status(struct seq_file *seq, mddev_t *mddev) |
993 | { | 993 | { |
994 | conf_t *conf = mddev_to_conf(mddev); | 994 | conf_t *conf = mddev->private; |
995 | int i; | 995 | int i; |
996 | 996 | ||
997 | seq_printf(seq, " [%d/%d] [", conf->raid_disks, | 997 | seq_printf(seq, " [%d/%d] [", conf->raid_disks, |
@@ -1010,7 +1010,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
1010 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1010 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) |
1011 | { | 1011 | { |
1012 | char b[BDEVNAME_SIZE]; | 1012 | char b[BDEVNAME_SIZE]; |
1013 | conf_t *conf = mddev_to_conf(mddev); | 1013 | conf_t *conf = mddev->private; |
1014 | 1014 | ||
1015 | /* | 1015 | /* |
1016 | * If it is not operational, then we have already marked it as dead | 1016 | * If it is not operational, then we have already marked it as dead |
@@ -1214,7 +1214,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1214 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1214 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1215 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); | 1215 | r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); |
1216 | mddev_t *mddev = r1_bio->mddev; | 1216 | mddev_t *mddev = r1_bio->mddev; |
1217 | conf_t *conf = mddev_to_conf(mddev); | 1217 | conf_t *conf = mddev->private; |
1218 | int i; | 1218 | int i; |
1219 | int mirror=0; | 1219 | int mirror=0; |
1220 | 1220 | ||
@@ -1248,7 +1248,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1248 | 1248 | ||
1249 | static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | 1249 | static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) |
1250 | { | 1250 | { |
1251 | conf_t *conf = mddev_to_conf(mddev); | 1251 | conf_t *conf = mddev->private; |
1252 | int i; | 1252 | int i; |
1253 | int disks = conf->raid_disks; | 1253 | int disks = conf->raid_disks; |
1254 | struct bio *bio, *wbio; | 1254 | struct bio *bio, *wbio; |
@@ -1562,7 +1562,7 @@ static void raid1d(mddev_t *mddev) | |||
1562 | r1bio_t *r1_bio; | 1562 | r1bio_t *r1_bio; |
1563 | struct bio *bio; | 1563 | struct bio *bio; |
1564 | unsigned long flags; | 1564 | unsigned long flags; |
1565 | conf_t *conf = mddev_to_conf(mddev); | 1565 | conf_t *conf = mddev->private; |
1566 | struct list_head *head = &conf->retry_list; | 1566 | struct list_head *head = &conf->retry_list; |
1567 | int unplug=0; | 1567 | int unplug=0; |
1568 | mdk_rdev_t *rdev; | 1568 | mdk_rdev_t *rdev; |
@@ -1585,7 +1585,7 @@ static void raid1d(mddev_t *mddev) | |||
1585 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1585 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1586 | 1586 | ||
1587 | mddev = r1_bio->mddev; | 1587 | mddev = r1_bio->mddev; |
1588 | conf = mddev_to_conf(mddev); | 1588 | conf = mddev->private; |
1589 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) { | 1589 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) { |
1590 | sync_request_write(mddev, r1_bio); | 1590 | sync_request_write(mddev, r1_bio); |
1591 | unplug = 1; | 1591 | unplug = 1; |
@@ -1706,7 +1706,7 @@ static int init_resync(conf_t *conf) | |||
1706 | 1706 | ||
1707 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) | 1707 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
1708 | { | 1708 | { |
1709 | conf_t *conf = mddev_to_conf(mddev); | 1709 | conf_t *conf = mddev->private; |
1710 | r1bio_t *r1_bio; | 1710 | r1bio_t *r1_bio; |
1711 | struct bio *bio; | 1711 | struct bio *bio; |
1712 | sector_t max_sector, nr_sectors; | 1712 | sector_t max_sector, nr_sectors; |
@@ -2087,7 +2087,7 @@ out: | |||
2087 | 2087 | ||
2088 | static int stop(mddev_t *mddev) | 2088 | static int stop(mddev_t *mddev) |
2089 | { | 2089 | { |
2090 | conf_t *conf = mddev_to_conf(mddev); | 2090 | conf_t *conf = mddev->private; |
2091 | struct bitmap *bitmap = mddev->bitmap; | 2091 | struct bitmap *bitmap = mddev->bitmap; |
2092 | int behind_wait = 0; | 2092 | int behind_wait = 0; |
2093 | 2093 | ||
@@ -2155,7 +2155,7 @@ static int raid1_reshape(mddev_t *mddev) | |||
2155 | mempool_t *newpool, *oldpool; | 2155 | mempool_t *newpool, *oldpool; |
2156 | struct pool_info *newpoolinfo; | 2156 | struct pool_info *newpoolinfo; |
2157 | mirror_info_t *newmirrors; | 2157 | mirror_info_t *newmirrors; |
2158 | conf_t *conf = mddev_to_conf(mddev); | 2158 | conf_t *conf = mddev->private; |
2159 | int cnt, raid_disks; | 2159 | int cnt, raid_disks; |
2160 | unsigned long flags; | 2160 | unsigned long flags; |
2161 | int d, d2, err; | 2161 | int d, d2, err; |
@@ -2252,7 +2252,7 @@ static int raid1_reshape(mddev_t *mddev) | |||
2252 | 2252 | ||
2253 | static void raid1_quiesce(mddev_t *mddev, int state) | 2253 | static void raid1_quiesce(mddev_t *mddev, int state) |
2254 | { | 2254 | { |
2255 | conf_t *conf = mddev_to_conf(mddev); | 2255 | conf_t *conf = mddev->private; |
2256 | 2256 | ||
2257 | switch(state) { | 2257 | switch(state) { |
2258 | case 1: | 2258 | case 1: |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 1620eea3d57c..e87b84deff68 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -64,12 +64,6 @@ struct r1_private_data_s { | |||
64 | typedef struct r1_private_data_s conf_t; | 64 | typedef struct r1_private_data_s conf_t; |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * this is the only point in the RAID code where we violate | ||
68 | * C type safety. mddev->private is an 'opaque' pointer. | ||
69 | */ | ||
70 | #define mddev_to_conf(mddev) ((conf_t *) mddev->private) | ||
71 | |||
72 | /* | ||
73 | * this is our 'private' RAID1 bio. | 67 | * this is our 'private' RAID1 bio. |
74 | * | 68 | * |
75 | * it contains information about what kind of IO operations were started | 69 | * it contains information about what kind of IO operations were started |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 750550c1166f..9a5beb4fd954 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) | |||
188 | 188 | ||
189 | static void free_r10bio(r10bio_t *r10_bio) | 189 | static void free_r10bio(r10bio_t *r10_bio) |
190 | { | 190 | { |
191 | conf_t *conf = mddev_to_conf(r10_bio->mddev); | 191 | conf_t *conf = r10_bio->mddev->private; |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Wake up any possible resync thread that waits for the device | 194 | * Wake up any possible resync thread that waits for the device |
@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio) | |||
202 | 202 | ||
203 | static void put_buf(r10bio_t *r10_bio) | 203 | static void put_buf(r10bio_t *r10_bio) |
204 | { | 204 | { |
205 | conf_t *conf = mddev_to_conf(r10_bio->mddev); | 205 | conf_t *conf = r10_bio->mddev->private; |
206 | 206 | ||
207 | mempool_free(r10_bio, conf->r10buf_pool); | 207 | mempool_free(r10_bio, conf->r10buf_pool); |
208 | 208 | ||
@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio) | |||
213 | { | 213 | { |
214 | unsigned long flags; | 214 | unsigned long flags; |
215 | mddev_t *mddev = r10_bio->mddev; | 215 | mddev_t *mddev = r10_bio->mddev; |
216 | conf_t *conf = mddev_to_conf(mddev); | 216 | conf_t *conf = mddev->private; |
217 | 217 | ||
218 | spin_lock_irqsave(&conf->device_lock, flags); | 218 | spin_lock_irqsave(&conf->device_lock, flags); |
219 | list_add(&r10_bio->retry_list, &conf->retry_list); | 219 | list_add(&r10_bio->retry_list, &conf->retry_list); |
@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio) | |||
245 | */ | 245 | */ |
246 | static inline void update_head_pos(int slot, r10bio_t *r10_bio) | 246 | static inline void update_head_pos(int slot, r10bio_t *r10_bio) |
247 | { | 247 | { |
248 | conf_t *conf = mddev_to_conf(r10_bio->mddev); | 248 | conf_t *conf = r10_bio->mddev->private; |
249 | 249 | ||
250 | conf->mirrors[r10_bio->devs[slot].devnum].head_position = | 250 | conf->mirrors[r10_bio->devs[slot].devnum].head_position = |
251 | r10_bio->devs[slot].addr + (r10_bio->sectors); | 251 | r10_bio->devs[slot].addr + (r10_bio->sectors); |
@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error) | |||
256 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 256 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
257 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); | 257 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); |
258 | int slot, dev; | 258 | int slot, dev; |
259 | conf_t *conf = mddev_to_conf(r10_bio->mddev); | 259 | conf_t *conf = r10_bio->mddev->private; |
260 | 260 | ||
261 | 261 | ||
262 | slot = r10_bio->read_slot; | 262 | slot = r10_bio->read_slot; |
@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error) | |||
297 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 297 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
298 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); | 298 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); |
299 | int slot, dev; | 299 | int slot, dev; |
300 | conf_t *conf = mddev_to_conf(r10_bio->mddev); | 300 | conf_t *conf = r10_bio->mddev->private; |
301 | 301 | ||
302 | for (slot = 0; slot < conf->copies; slot++) | 302 | for (slot = 0; slot < conf->copies; slot++) |
303 | if (r10_bio->devs[slot].bio == bio) | 303 | if (r10_bio->devs[slot].bio == bio) |
@@ -596,7 +596,7 @@ rb_out: | |||
596 | 596 | ||
597 | static void unplug_slaves(mddev_t *mddev) | 597 | static void unplug_slaves(mddev_t *mddev) |
598 | { | 598 | { |
599 | conf_t *conf = mddev_to_conf(mddev); | 599 | conf_t *conf = mddev->private; |
600 | int i; | 600 | int i; |
601 | 601 | ||
602 | rcu_read_lock(); | 602 | rcu_read_lock(); |
@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q) | |||
628 | static int raid10_congested(void *data, int bits) | 628 | static int raid10_congested(void *data, int bits) |
629 | { | 629 | { |
630 | mddev_t *mddev = data; | 630 | mddev_t *mddev = data; |
631 | conf_t *conf = mddev_to_conf(mddev); | 631 | conf_t *conf = mddev->private; |
632 | int i, ret = 0; | 632 | int i, ret = 0; |
633 | 633 | ||
634 | rcu_read_lock(); | 634 | rcu_read_lock(); |
@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf) | |||
788 | static int make_request(struct request_queue *q, struct bio * bio) | 788 | static int make_request(struct request_queue *q, struct bio * bio) |
789 | { | 789 | { |
790 | mddev_t *mddev = q->queuedata; | 790 | mddev_t *mddev = q->queuedata; |
791 | conf_t *conf = mddev_to_conf(mddev); | 791 | conf_t *conf = mddev->private; |
792 | mirror_info_t *mirror; | 792 | mirror_info_t *mirror; |
793 | r10bio_t *r10_bio; | 793 | r10bio_t *r10_bio; |
794 | struct bio *read_bio; | 794 | struct bio *read_bio; |
@@ -981,7 +981,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
981 | 981 | ||
982 | static void status(struct seq_file *seq, mddev_t *mddev) | 982 | static void status(struct seq_file *seq, mddev_t *mddev) |
983 | { | 983 | { |
984 | conf_t *conf = mddev_to_conf(mddev); | 984 | conf_t *conf = mddev->private; |
985 | int i; | 985 | int i; |
986 | 986 | ||
987 | if (conf->near_copies < conf->raid_disks) | 987 | if (conf->near_copies < conf->raid_disks) |
@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
1006 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1006 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) |
1007 | { | 1007 | { |
1008 | char b[BDEVNAME_SIZE]; | 1008 | char b[BDEVNAME_SIZE]; |
1009 | conf_t *conf = mddev_to_conf(mddev); | 1009 | conf_t *conf = mddev->private; |
1010 | 1010 | ||
1011 | /* | 1011 | /* |
1012 | * If it is not operational, then we have already marked it as dead | 1012 | * If it is not operational, then we have already marked it as dead |
@@ -1215,7 +1215,7 @@ abort: | |||
1215 | static void end_sync_read(struct bio *bio, int error) | 1215 | static void end_sync_read(struct bio *bio, int error) |
1216 | { | 1216 | { |
1217 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); | 1217 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); |
1218 | conf_t *conf = mddev_to_conf(r10_bio->mddev); | 1218 | conf_t *conf = r10_bio->mddev->private; |
1219 | int i,d; | 1219 | int i,d; |
1220 | 1220 | ||
1221 | for (i=0; i<conf->copies; i++) | 1221 | for (i=0; i<conf->copies; i++) |
@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1253 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 1253 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
1254 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); | 1254 | r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); |
1255 | mddev_t *mddev = r10_bio->mddev; | 1255 | mddev_t *mddev = r10_bio->mddev; |
1256 | conf_t *conf = mddev_to_conf(mddev); | 1256 | conf_t *conf = mddev->private; |
1257 | int i,d; | 1257 | int i,d; |
1258 | 1258 | ||
1259 | for (i = 0; i < conf->copies; i++) | 1259 | for (i = 0; i < conf->copies; i++) |
@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1300 | */ | 1300 | */ |
1301 | static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) | 1301 | static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) |
1302 | { | 1302 | { |
1303 | conf_t *conf = mddev_to_conf(mddev); | 1303 | conf_t *conf = mddev->private; |
1304 | int i, first; | 1304 | int i, first; |
1305 | struct bio *tbio, *fbio; | 1305 | struct bio *tbio, *fbio; |
1306 | 1306 | ||
@@ -1400,7 +1400,7 @@ done: | |||
1400 | 1400 | ||
1401 | static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) | 1401 | static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) |
1402 | { | 1402 | { |
1403 | conf_t *conf = mddev_to_conf(mddev); | 1403 | conf_t *conf = mddev->private; |
1404 | int i, d; | 1404 | int i, d; |
1405 | struct bio *bio, *wbio; | 1405 | struct bio *bio, *wbio; |
1406 | 1406 | ||
@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev) | |||
1549 | r10bio_t *r10_bio; | 1549 | r10bio_t *r10_bio; |
1550 | struct bio *bio; | 1550 | struct bio *bio; |
1551 | unsigned long flags; | 1551 | unsigned long flags; |
1552 | conf_t *conf = mddev_to_conf(mddev); | 1552 | conf_t *conf = mddev->private; |
1553 | struct list_head *head = &conf->retry_list; | 1553 | struct list_head *head = &conf->retry_list; |
1554 | int unplug=0; | 1554 | int unplug=0; |
1555 | mdk_rdev_t *rdev; | 1555 | mdk_rdev_t *rdev; |
@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev) | |||
1572 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1572 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1573 | 1573 | ||
1574 | mddev = r10_bio->mddev; | 1574 | mddev = r10_bio->mddev; |
1575 | conf = mddev_to_conf(mddev); | 1575 | conf = mddev->private; |
1576 | if (test_bit(R10BIO_IsSync, &r10_bio->state)) { | 1576 | if (test_bit(R10BIO_IsSync, &r10_bio->state)) { |
1577 | sync_request_write(mddev, r10_bio); | 1577 | sync_request_write(mddev, r10_bio); |
1578 | unplug = 1; | 1578 | unplug = 1; |
@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf) | |||
1680 | 1680 | ||
1681 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) | 1681 | static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
1682 | { | 1682 | { |
1683 | conf_t *conf = mddev_to_conf(mddev); | 1683 | conf_t *conf = mddev->private; |
1684 | r10bio_t *r10_bio; | 1684 | r10bio_t *r10_bio; |
1685 | struct bio *biolist = NULL, *bio; | 1685 | struct bio *biolist = NULL, *bio; |
1686 | sector_t max_sector, nr_sectors; | 1686 | sector_t max_sector, nr_sectors; |
@@ -2026,7 +2026,7 @@ static sector_t | |||
2026 | raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 2026 | raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) |
2027 | { | 2027 | { |
2028 | sector_t size; | 2028 | sector_t size; |
2029 | conf_t *conf = mddev_to_conf(mddev); | 2029 | conf_t *conf = mddev->private; |
2030 | 2030 | ||
2031 | if (!raid_disks) | 2031 | if (!raid_disks) |
2032 | raid_disks = mddev->raid_disks; | 2032 | raid_disks = mddev->raid_disks; |
@@ -2227,7 +2227,7 @@ out: | |||
2227 | 2227 | ||
2228 | static int stop(mddev_t *mddev) | 2228 | static int stop(mddev_t *mddev) |
2229 | { | 2229 | { |
2230 | conf_t *conf = mddev_to_conf(mddev); | 2230 | conf_t *conf = mddev->private; |
2231 | 2231 | ||
2232 | raise_barrier(conf, 0); | 2232 | raise_barrier(conf, 0); |
2233 | lower_barrier(conf); | 2233 | lower_barrier(conf); |
@@ -2245,7 +2245,7 @@ static int stop(mddev_t *mddev) | |||
2245 | 2245 | ||
2246 | static void raid10_quiesce(mddev_t *mddev, int state) | 2246 | static void raid10_quiesce(mddev_t *mddev, int state) |
2247 | { | 2247 | { |
2248 | conf_t *conf = mddev_to_conf(mddev); | 2248 | conf_t *conf = mddev->private; |
2249 | 2249 | ||
2250 | switch(state) { | 2250 | switch(state) { |
2251 | case 1: | 2251 | case 1: |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 244dbe507a54..59cd1efb8d30 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -62,12 +62,6 @@ struct r10_private_data_s { | |||
62 | typedef struct r10_private_data_s conf_t; | 62 | typedef struct r10_private_data_s conf_t; |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * this is the only point in the RAID code where we violate | ||
66 | * C type safety. mddev->private is an 'opaque' pointer. | ||
67 | */ | ||
68 | #define mddev_to_conf(mddev) ((conf_t *) mddev->private) | ||
69 | |||
70 | /* | ||
71 | * this is our 'private' RAID10 bio. | 65 | * this is our 'private' RAID10 bio. |
72 | * | 66 | * |
73 | * it contains information about what kind of IO operations were started | 67 | * it contains information about what kind of IO operations were started |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index bef876698232..7fb97c65ad37 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3284,7 +3284,7 @@ static void activate_bit_delay(raid5_conf_t *conf) | |||
3284 | 3284 | ||
3285 | static void unplug_slaves(mddev_t *mddev) | 3285 | static void unplug_slaves(mddev_t *mddev) |
3286 | { | 3286 | { |
3287 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3287 | raid5_conf_t *conf = mddev->private; |
3288 | int i; | 3288 | int i; |
3289 | 3289 | ||
3290 | rcu_read_lock(); | 3290 | rcu_read_lock(); |
@@ -3308,7 +3308,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
3308 | static void raid5_unplug_device(struct request_queue *q) | 3308 | static void raid5_unplug_device(struct request_queue *q) |
3309 | { | 3309 | { |
3310 | mddev_t *mddev = q->queuedata; | 3310 | mddev_t *mddev = q->queuedata; |
3311 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3311 | raid5_conf_t *conf = mddev->private; |
3312 | unsigned long flags; | 3312 | unsigned long flags; |
3313 | 3313 | ||
3314 | spin_lock_irqsave(&conf->device_lock, flags); | 3314 | spin_lock_irqsave(&conf->device_lock, flags); |
@@ -3327,7 +3327,7 @@ static void raid5_unplug_device(struct request_queue *q) | |||
3327 | static int raid5_congested(void *data, int bits) | 3327 | static int raid5_congested(void *data, int bits) |
3328 | { | 3328 | { |
3329 | mddev_t *mddev = data; | 3329 | mddev_t *mddev = data; |
3330 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3330 | raid5_conf_t *conf = mddev->private; |
3331 | 3331 | ||
3332 | /* No difference between reads and writes. Just check | 3332 | /* No difference between reads and writes. Just check |
3333 | * how busy the stripe_cache is | 3333 | * how busy the stripe_cache is |
@@ -3440,7 +3440,7 @@ static void raid5_align_endio(struct bio *bi, int error) | |||
3440 | bio_put(bi); | 3440 | bio_put(bi); |
3441 | 3441 | ||
3442 | mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; | 3442 | mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; |
3443 | conf = mddev_to_conf(mddev); | 3443 | conf = mddev->private; |
3444 | rdev = (void*)raid_bi->bi_next; | 3444 | rdev = (void*)raid_bi->bi_next; |
3445 | raid_bi->bi_next = NULL; | 3445 | raid_bi->bi_next = NULL; |
3446 | 3446 | ||
@@ -3482,7 +3482,7 @@ static int bio_fits_rdev(struct bio *bi) | |||
3482 | static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) | 3482 | static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) |
3483 | { | 3483 | { |
3484 | mddev_t *mddev = q->queuedata; | 3484 | mddev_t *mddev = q->queuedata; |
3485 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3485 | raid5_conf_t *conf = mddev->private; |
3486 | unsigned int dd_idx; | 3486 | unsigned int dd_idx; |
3487 | struct bio* align_bi; | 3487 | struct bio* align_bi; |
3488 | mdk_rdev_t *rdev; | 3488 | mdk_rdev_t *rdev; |
@@ -3599,7 +3599,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) | |||
3599 | static int make_request(struct request_queue *q, struct bio * bi) | 3599 | static int make_request(struct request_queue *q, struct bio * bi) |
3600 | { | 3600 | { |
3601 | mddev_t *mddev = q->queuedata; | 3601 | mddev_t *mddev = q->queuedata; |
3602 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3602 | raid5_conf_t *conf = mddev->private; |
3603 | int dd_idx; | 3603 | int dd_idx; |
3604 | sector_t new_sector; | 3604 | sector_t new_sector; |
3605 | sector_t logical_sector, last_sector; | 3605 | sector_t logical_sector, last_sector; |
@@ -4129,7 +4129,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4129 | static void raid5d(mddev_t *mddev) | 4129 | static void raid5d(mddev_t *mddev) |
4130 | { | 4130 | { |
4131 | struct stripe_head *sh; | 4131 | struct stripe_head *sh; |
4132 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4132 | raid5_conf_t *conf = mddev->private; |
4133 | int handled; | 4133 | int handled; |
4134 | 4134 | ||
4135 | pr_debug("+++ raid5d active\n"); | 4135 | pr_debug("+++ raid5d active\n"); |
@@ -4185,7 +4185,7 @@ static void raid5d(mddev_t *mddev) | |||
4185 | static ssize_t | 4185 | static ssize_t |
4186 | raid5_show_stripe_cache_size(mddev_t *mddev, char *page) | 4186 | raid5_show_stripe_cache_size(mddev_t *mddev, char *page) |
4187 | { | 4187 | { |
4188 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4188 | raid5_conf_t *conf = mddev->private; |
4189 | if (conf) | 4189 | if (conf) |
4190 | return sprintf(page, "%d\n", conf->max_nr_stripes); | 4190 | return sprintf(page, "%d\n", conf->max_nr_stripes); |
4191 | else | 4191 | else |
@@ -4195,7 +4195,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page) | |||
4195 | static ssize_t | 4195 | static ssize_t |
4196 | raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) | 4196 | raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) |
4197 | { | 4197 | { |
4198 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4198 | raid5_conf_t *conf = mddev->private; |
4199 | unsigned long new; | 4199 | unsigned long new; |
4200 | int err; | 4200 | int err; |
4201 | 4201 | ||
@@ -4233,7 +4233,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, | |||
4233 | static ssize_t | 4233 | static ssize_t |
4234 | raid5_show_preread_threshold(mddev_t *mddev, char *page) | 4234 | raid5_show_preread_threshold(mddev_t *mddev, char *page) |
4235 | { | 4235 | { |
4236 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4236 | raid5_conf_t *conf = mddev->private; |
4237 | if (conf) | 4237 | if (conf) |
4238 | return sprintf(page, "%d\n", conf->bypass_threshold); | 4238 | return sprintf(page, "%d\n", conf->bypass_threshold); |
4239 | else | 4239 | else |
@@ -4243,7 +4243,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page) | |||
4243 | static ssize_t | 4243 | static ssize_t |
4244 | raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) | 4244 | raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) |
4245 | { | 4245 | { |
4246 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4246 | raid5_conf_t *conf = mddev->private; |
4247 | unsigned long new; | 4247 | unsigned long new; |
4248 | if (len >= PAGE_SIZE) | 4248 | if (len >= PAGE_SIZE) |
4249 | return -EINVAL; | 4249 | return -EINVAL; |
@@ -4267,7 +4267,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, | |||
4267 | static ssize_t | 4267 | static ssize_t |
4268 | stripe_cache_active_show(mddev_t *mddev, char *page) | 4268 | stripe_cache_active_show(mddev_t *mddev, char *page) |
4269 | { | 4269 | { |
4270 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4270 | raid5_conf_t *conf = mddev->private; |
4271 | if (conf) | 4271 | if (conf) |
4272 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); | 4272 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); |
4273 | else | 4273 | else |
@@ -4291,7 +4291,7 @@ static struct attribute_group raid5_attrs_group = { | |||
4291 | static sector_t | 4291 | static sector_t |
4292 | raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 4292 | raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) |
4293 | { | 4293 | { |
4294 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4294 | raid5_conf_t *conf = mddev->private; |
4295 | 4295 | ||
4296 | if (!sectors) | 4296 | if (!sectors) |
4297 | sectors = mddev->dev_sectors; | 4297 | sectors = mddev->dev_sectors; |
@@ -4845,7 +4845,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) | |||
4845 | 4845 | ||
4846 | static int raid5_check_reshape(mddev_t *mddev) | 4846 | static int raid5_check_reshape(mddev_t *mddev) |
4847 | { | 4847 | { |
4848 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4848 | raid5_conf_t *conf = mddev->private; |
4849 | 4849 | ||
4850 | if (mddev->delta_disks == 0 && | 4850 | if (mddev->delta_disks == 0 && |
4851 | mddev->new_layout == mddev->layout && | 4851 | mddev->new_layout == mddev->layout && |
@@ -4890,7 +4890,7 @@ static int raid5_check_reshape(mddev_t *mddev) | |||
4890 | 4890 | ||
4891 | static int raid5_start_reshape(mddev_t *mddev) | 4891 | static int raid5_start_reshape(mddev_t *mddev) |
4892 | { | 4892 | { |
4893 | raid5_conf_t *conf = mddev_to_conf(mddev); | 4893 | raid5_conf_t *conf = mddev->private; |
4894 | mdk_rdev_t *rdev; | 4894 | mdk_rdev_t *rdev; |
4895 | int spares = 0; | 4895 | int spares = 0; |
4896 | int added_devices = 0; | 4896 | int added_devices = 0; |
@@ -5022,7 +5022,7 @@ static void end_reshape(raid5_conf_t *conf) | |||
5022 | static void raid5_finish_reshape(mddev_t *mddev) | 5022 | static void raid5_finish_reshape(mddev_t *mddev) |
5023 | { | 5023 | { |
5024 | struct block_device *bdev; | 5024 | struct block_device *bdev; |
5025 | raid5_conf_t *conf = mddev_to_conf(mddev); | 5025 | raid5_conf_t *conf = mddev->private; |
5026 | 5026 | ||
5027 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 5027 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
5028 | 5028 | ||
@@ -5061,7 +5061,7 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5061 | 5061 | ||
5062 | static void raid5_quiesce(mddev_t *mddev, int state) | 5062 | static void raid5_quiesce(mddev_t *mddev, int state) |
5063 | { | 5063 | { |
5064 | raid5_conf_t *conf = mddev_to_conf(mddev); | 5064 | raid5_conf_t *conf = mddev->private; |
5065 | 5065 | ||
5066 | switch(state) { | 5066 | switch(state) { |
5067 | case 2: /* resume for a suspend */ | 5067 | case 2: /* resume for a suspend */ |
@@ -5157,7 +5157,7 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) | |||
5157 | * For larger arrays we record the new value - after validation | 5157 | * For larger arrays we record the new value - after validation |
5158 | * to be used by a reshape pass. | 5158 | * to be used by a reshape pass. |
5159 | */ | 5159 | */ |
5160 | raid5_conf_t *conf = mddev_to_conf(mddev); | 5160 | raid5_conf_t *conf = mddev->private; |
5161 | 5161 | ||
5162 | if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) | 5162 | if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) |
5163 | return -EINVAL; | 5163 | return -EINVAL; |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 52ba99954dec..1a25c9e252b4 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -408,8 +408,6 @@ struct raid5_private_data { | |||
408 | 408 | ||
409 | typedef struct raid5_private_data raid5_conf_t; | 409 | typedef struct raid5_private_data raid5_conf_t; |
410 | 410 | ||
411 | #define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) | ||
412 | |||
413 | /* | 411 | /* |
414 | * Our supported algorithms | 412 | * Our supported algorithms |
415 | */ | 413 | */ |