aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c8
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c14
-rw-r--r--drivers/md/raid1.c12
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c18
10 files changed, 53 insertions, 53 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2fc199b0016b..2bcde5798b5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
526 526
527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) 527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
528{ 528{
529 request_queue_t *q = bdev_get_queue(bdev); 529 struct request_queue *q = bdev_get_queue(bdev);
530 struct io_restrictions *rs = &ti->limits; 530 struct io_restrictions *rs = &ti->limits;
531 531
532 /* 532 /*
@@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
979 devices = dm_table_get_devices(t); 979 devices = dm_table_get_devices(t);
980 for (d = devices->next; d != devices; d = d->next) { 980 for (d = devices->next; d != devices; d = d->next) {
981 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 981 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
982 request_queue_t *q = bdev_get_queue(dd->bdev); 982 struct request_queue *q = bdev_get_queue(dd->bdev);
983 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 983 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
984 } 984 }
985 985
@@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t)
992 992
993 for (d = devices->next; d != devices; d = d->next) { 993 for (d = devices->next; d != devices; d = d->next) {
994 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 994 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
995 request_queue_t *q = bdev_get_queue(dd->bdev); 995 struct request_queue *q = bdev_get_queue(dd->bdev);
996 996
997 if (q->unplug_fn) 997 if (q->unplug_fn)
998 q->unplug_fn(q); 998 q->unplug_fn(q);
@@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t)
1011 1011
1012 for (d = devices->next; d != devices; d = d->next) { 1012 for (d = devices->next; d != devices; d = d->next) {
1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1014 request_queue_t *q = bdev_get_queue(dd->bdev); 1014 struct request_queue *q = bdev_get_queue(dd->bdev);
1015 int err; 1015 int err;
1016 1016
1017 if (!q->issue_flush_fn) 1017 if (!q->issue_flush_fn)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 846614e676c6..141ff9fa296e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -80,7 +80,7 @@ struct mapped_device {
80 80
81 unsigned long flags; 81 unsigned long flags;
82 82
83 request_queue_t *queue; 83 struct request_queue *queue;
84 struct gendisk *disk; 84 struct gendisk *disk;
85 char name[16]; 85 char name[16];
86 86
@@ -792,7 +792,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
792 * The request function that just remaps the bio built up by 792 * The request function that just remaps the bio built up by
793 * dm_merge_bvec. 793 * dm_merge_bvec.
794 */ 794 */
795static int dm_request(request_queue_t *q, struct bio *bio) 795static int dm_request(struct request_queue *q, struct bio *bio)
796{ 796{
797 int r; 797 int r;
798 int rw = bio_data_dir(bio); 798 int rw = bio_data_dir(bio);
@@ -844,7 +844,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
844 return 0; 844 return 0;
845} 845}
846 846
847static int dm_flush_all(request_queue_t *q, struct gendisk *disk, 847static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
848 sector_t *error_sector) 848 sector_t *error_sector)
849{ 849{
850 struct mapped_device *md = q->queuedata; 850 struct mapped_device *md = q->queuedata;
@@ -859,7 +859,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
859 return ret; 859 return ret;
860} 860}
861 861
862static void dm_unplug_all(request_queue_t *q) 862static void dm_unplug_all(struct request_queue *q)
863{ 863{
864 struct mapped_device *md = q->queuedata; 864 struct mapped_device *md = q->queuedata;
865 struct dm_table *map = dm_get_table(md); 865 struct dm_table *map = dm_get_table(md);
@@ -1110,7 +1110,7 @@ static void __set_size(struct mapped_device *md, sector_t size)
1110 1110
1111static int __bind(struct mapped_device *md, struct dm_table *t) 1111static int __bind(struct mapped_device *md, struct dm_table *t)
1112{ 1112{
1113 request_queue_t *q = md->queue; 1113 struct request_queue *q = md->queue;
1114 sector_t size; 1114 sector_t size;
1115 1115
1116 size = dm_table_get_size(t); 1116 size = dm_table_get_size(t);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4ebd0f2a75ec..cb059cf14c2e 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -167,7 +167,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
167 conf->nfaults = n+1; 167 conf->nfaults = n+1;
168} 168}
169 169
170static int make_request(request_queue_t *q, struct bio *bio) 170static int make_request(struct request_queue *q, struct bio *bio)
171{ 171{
172 mddev_t *mddev = q->queuedata; 172 mddev_t *mddev = q->queuedata;
173 conf_t *conf = (conf_t*)mddev->private; 173 conf_t *conf = (conf_t*)mddev->private;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 192741083196..17f795c3e0ab 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
55 * 55 *
56 * Return amount of bytes we can take at this offset 56 * Return amount of bytes we can take at this offset
57 */ 57 */
58static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
59{ 59{
60 mddev_t *mddev = q->queuedata; 60 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0; 61 dev_info_t *dev0;
@@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio
79 return maxsectors << 9; 79 return maxsectors << 9;
80} 80}
81 81
82static void linear_unplug(request_queue_t *q) 82static void linear_unplug(struct request_queue *q)
83{ 83{
84 mddev_t *mddev = q->queuedata; 84 mddev_t *mddev = q->queuedata;
85 linear_conf_t *conf = mddev_to_conf(mddev); 85 linear_conf_t *conf = mddev_to_conf(mddev);
86 int i; 86 int i;
87 87
88 for (i=0; i < mddev->raid_disks; i++) { 88 for (i=0; i < mddev->raid_disks; i++) {
89 request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 89 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
90 if (r_queue->unplug_fn) 90 if (r_queue->unplug_fn)
91 r_queue->unplug_fn(r_queue); 91 r_queue->unplug_fn(r_queue);
92 } 92 }
93} 93}
94 94
95static int linear_issue_flush(request_queue_t *q, struct gendisk *disk, 95static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
96 sector_t *error_sector) 96 sector_t *error_sector)
97{ 97{
98 mddev_t *mddev = q->queuedata; 98 mddev_t *mddev = q->queuedata;
@@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
101 101
102 for (i=0; i < mddev->raid_disks && ret == 0; i++) { 102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {
103 struct block_device *bdev = conf->disks[i].rdev->bdev; 103 struct block_device *bdev = conf->disks[i].rdev->bdev;
104 request_queue_t *r_queue = bdev_get_queue(bdev); 104 struct request_queue *r_queue = bdev_get_queue(bdev);
105 105
106 if (!r_queue->issue_flush_fn) 106 if (!r_queue->issue_flush_fn)
107 ret = -EOPNOTSUPP; 107 ret = -EOPNOTSUPP;
@@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits)
118 int i, ret = 0; 118 int i, ret = 0;
119 119
120 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 120 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
121 request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev); 121 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
122 ret |= bdi_congested(&q->backing_dev_info, bits); 122 ret |= bdi_congested(&q->backing_dev_info, bits);
123 } 123 }
124 return ret; 124 return ret;
@@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev)
330 return 0; 330 return 0;
331} 331}
332 332
333static int linear_make_request (request_queue_t *q, struct bio *bio) 333static int linear_make_request (struct request_queue *q, struct bio *bio)
334{ 334{
335 const int rw = bio_data_dir(bio); 335 const int rw = bio_data_dir(bio);
336 mddev_t *mddev = q->queuedata; 336 mddev_t *mddev = q->queuedata;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65ddc887dfd7..f883b7e37f3d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -211,7 +211,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
211 ) 211 )
212 212
213 213
214static int md_fail_request (request_queue_t *q, struct bio *bio) 214static int md_fail_request (struct request_queue *q, struct bio *bio)
215{ 215{
216 bio_io_error(bio, bio->bi_size); 216 bio_io_error(bio, bio->bi_size);
217 return 0; 217 return 0;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 14da37fee37b..1e2af43a73b9 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,7 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
126 if (rdev && !test_bit(Faulty, &rdev->flags) 126 if (rdev && !test_bit(Faulty, &rdev->flags)
127 && atomic_read(&rdev->nr_pending)) { 127 && atomic_read(&rdev->nr_pending)) {
128 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 128 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
129 129
130 atomic_inc(&rdev->nr_pending); 130 atomic_inc(&rdev->nr_pending);
131 rcu_read_unlock(); 131 rcu_read_unlock();
@@ -140,13 +140,13 @@ static void unplug_slaves(mddev_t *mddev)
140 rcu_read_unlock(); 140 rcu_read_unlock();
141} 141}
142 142
143static void multipath_unplug(request_queue_t *q) 143static void multipath_unplug(struct request_queue *q)
144{ 144{
145 unplug_slaves(q->queuedata); 145 unplug_slaves(q->queuedata);
146} 146}
147 147
148 148
149static int multipath_make_request (request_queue_t *q, struct bio * bio) 149static int multipath_make_request (struct request_queue *q, struct bio * bio)
150{ 150{
151 mddev_t *mddev = q->queuedata; 151 mddev_t *mddev = q->queuedata;
152 multipath_conf_t *conf = mddev_to_conf(mddev); 152 multipath_conf_t *conf = mddev_to_conf(mddev);
@@ -199,7 +199,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
199 seq_printf (seq, "]"); 199 seq_printf (seq, "]");
200} 200}
201 201
202static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, 202static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
203 sector_t *error_sector) 203 sector_t *error_sector)
204{ 204{
205 mddev_t *mddev = q->queuedata; 205 mddev_t *mddev = q->queuedata;
@@ -211,7 +211,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
212 if (rdev && !test_bit(Faulty, &rdev->flags)) { 212 if (rdev && !test_bit(Faulty, &rdev->flags)) {
213 struct block_device *bdev = rdev->bdev; 213 struct block_device *bdev = rdev->bdev;
214 request_queue_t *r_queue = bdev_get_queue(bdev); 214 struct request_queue *r_queue = bdev_get_queue(bdev);
215 215
216 if (!r_queue->issue_flush_fn) 216 if (!r_queue->issue_flush_fn)
217 ret = -EOPNOTSUPP; 217 ret = -EOPNOTSUPP;
@@ -238,7 +238,7 @@ static int multipath_congested(void *data, int bits)
238 for (i = 0; i < mddev->raid_disks ; i++) { 238 for (i = 0; i < mddev->raid_disks ; i++) {
239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
240 if (rdev && !test_bit(Faulty, &rdev->flags)) { 240 if (rdev && !test_bit(Faulty, &rdev->flags)) {
241 request_queue_t *q = bdev_get_queue(rdev->bdev); 241 struct request_queue *q = bdev_get_queue(rdev->bdev);
242 242
243 ret |= bdi_congested(&q->backing_dev_info, bits); 243 ret |= bdi_congested(&q->backing_dev_info, bits);
244 /* Just like multipath_map, we just check the 244 /* Just like multipath_map, we just check the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2c404f73a377..b8216bc6db45 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,7 +25,7 @@
25#define MD_DRIVER 25#define MD_DRIVER
26#define MD_PERSONALITY 26#define MD_PERSONALITY
27 27
28static void raid0_unplug(request_queue_t *q) 28static void raid0_unplug(struct request_queue *q)
29{ 29{
30 mddev_t *mddev = q->queuedata; 30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev_to_conf(mddev); 31 raid0_conf_t *conf = mddev_to_conf(mddev);
@@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q)
33 int i; 33 int i;
34 34
35 for (i=0; i<mddev->raid_disks; i++) { 35 for (i=0; i<mddev->raid_disks; i++) {
36 request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev); 36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
37 37
38 if (r_queue->unplug_fn) 38 if (r_queue->unplug_fn)
39 r_queue->unplug_fn(r_queue); 39 r_queue->unplug_fn(r_queue);
40 } 40 }
41} 41}
42 42
43static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, 43static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
44 sector_t *error_sector) 44 sector_t *error_sector)
45{ 45{
46 mddev_t *mddev = q->queuedata; 46 mddev_t *mddev = q->queuedata;
@@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
50 50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev; 52 struct block_device *bdev = devlist[i]->bdev;
53 request_queue_t *r_queue = bdev_get_queue(bdev); 53 struct request_queue *r_queue = bdev_get_queue(bdev);
54 54
55 if (!r_queue->issue_flush_fn) 55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP; 56 ret = -EOPNOTSUPP;
@@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits)
68 int i, ret = 0; 68 int i, ret = 0;
69 69
70 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 70 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
71 request_queue_t *q = bdev_get_queue(devlist[i]->bdev); 71 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
72 72
73 ret |= bdi_congested(&q->backing_dev_info, bits); 73 ret |= bdi_congested(&q->backing_dev_info, bits);
74 } 74 }
@@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev)
268 * 268 *
269 * Return amount of bytes we can accept at this offset 269 * Return amount of bytes we can accept at this offset
270 */ 270 */
271static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 271static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
272{ 272{
273 mddev_t *mddev = q->queuedata; 273 mddev_t *mddev = q->queuedata;
274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev)
408 return 0; 408 return 0;
409} 409}
410 410
411static int raid0_make_request (request_queue_t *q, struct bio *bio) 411static int raid0_make_request (struct request_queue *q, struct bio *bio)
412{ 412{
413 mddev_t *mddev = q->queuedata; 413 mddev_t *mddev = q->queuedata;
414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; 414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00c78b77b13d..650991bddd8e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -552,7 +552,7 @@ static void unplug_slaves(mddev_t *mddev)
552 for (i=0; i<mddev->raid_disks; i++) { 552 for (i=0; i<mddev->raid_disks; i++) {
553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
555 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 555 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
556 556
557 atomic_inc(&rdev->nr_pending); 557 atomic_inc(&rdev->nr_pending);
558 rcu_read_unlock(); 558 rcu_read_unlock();
@@ -567,7 +567,7 @@ static void unplug_slaves(mddev_t *mddev)
567 rcu_read_unlock(); 567 rcu_read_unlock();
568} 568}
569 569
570static void raid1_unplug(request_queue_t *q) 570static void raid1_unplug(struct request_queue *q)
571{ 571{
572 mddev_t *mddev = q->queuedata; 572 mddev_t *mddev = q->queuedata;
573 573
@@ -575,7 +575,7 @@ static void raid1_unplug(request_queue_t *q)
575 md_wakeup_thread(mddev->thread); 575 md_wakeup_thread(mddev->thread);
576} 576}
577 577
578static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, 578static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
579 sector_t *error_sector) 579 sector_t *error_sector)
580{ 580{
581 mddev_t *mddev = q->queuedata; 581 mddev_t *mddev = q->queuedata;
@@ -587,7 +587,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
588 if (rdev && !test_bit(Faulty, &rdev->flags)) { 588 if (rdev && !test_bit(Faulty, &rdev->flags)) {
589 struct block_device *bdev = rdev->bdev; 589 struct block_device *bdev = rdev->bdev;
590 request_queue_t *r_queue = bdev_get_queue(bdev); 590 struct request_queue *r_queue = bdev_get_queue(bdev);
591 591
592 if (!r_queue->issue_flush_fn) 592 if (!r_queue->issue_flush_fn)
593 ret = -EOPNOTSUPP; 593 ret = -EOPNOTSUPP;
@@ -615,7 +615,7 @@ static int raid1_congested(void *data, int bits)
615 for (i = 0; i < mddev->raid_disks; i++) { 615 for (i = 0; i < mddev->raid_disks; i++) {
616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
617 if (rdev && !test_bit(Faulty, &rdev->flags)) { 617 if (rdev && !test_bit(Faulty, &rdev->flags)) {
618 request_queue_t *q = bdev_get_queue(rdev->bdev); 618 struct request_queue *q = bdev_get_queue(rdev->bdev);
619 619
620 /* Note the '|| 1' - when read_balance prefers 620 /* Note the '|| 1' - when read_balance prefers
621 * non-congested targets, it can be removed 621 * non-congested targets, it can be removed
@@ -765,7 +765,7 @@ do_sync_io:
765 return NULL; 765 return NULL;
766} 766}
767 767
768static int make_request(request_queue_t *q, struct bio * bio) 768static int make_request(struct request_queue *q, struct bio * bio)
769{ 769{
770 mddev_t *mddev = q->queuedata; 770 mddev_t *mddev = q->queuedata;
771 conf_t *conf = mddev_to_conf(mddev); 771 conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95ada1cfac4..f730a144baf1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
453 * If near_copies == raid_disk, there are no striping issues, 453 * If near_copies == raid_disk, there are no striping issues,
454 * but in that case, the function isn't called at all. 454 * but in that case, the function isn't called at all.
455 */ 455 */
456static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio, 456static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
457 struct bio_vec *bio_vec) 457 struct bio_vec *bio_vec)
458{ 458{
459 mddev_t *mddev = q->queuedata; 459 mddev_t *mddev = q->queuedata;
@@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev)
595 for (i=0; i<mddev->raid_disks; i++) { 595 for (i=0; i<mddev->raid_disks; i++) {
596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
598 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 598 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
599 599
600 atomic_inc(&rdev->nr_pending); 600 atomic_inc(&rdev->nr_pending);
601 rcu_read_unlock(); 601 rcu_read_unlock();
@@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev)
610 rcu_read_unlock(); 610 rcu_read_unlock();
611} 611}
612 612
613static void raid10_unplug(request_queue_t *q) 613static void raid10_unplug(struct request_queue *q)
614{ 614{
615 mddev_t *mddev = q->queuedata; 615 mddev_t *mddev = q->queuedata;
616 616
@@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q)
618 md_wakeup_thread(mddev->thread); 618 md_wakeup_thread(mddev->thread);
619} 619}
620 620
621static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, 621static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
622 sector_t *error_sector) 622 sector_t *error_sector)
623{ 623{
624 mddev_t *mddev = q->queuedata; 624 mddev_t *mddev = q->queuedata;
@@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
631 if (rdev && !test_bit(Faulty, &rdev->flags)) { 631 if (rdev && !test_bit(Faulty, &rdev->flags)) {
632 struct block_device *bdev = rdev->bdev; 632 struct block_device *bdev = rdev->bdev;
633 request_queue_t *r_queue = bdev_get_queue(bdev); 633 struct request_queue *r_queue = bdev_get_queue(bdev);
634 634
635 if (!r_queue->issue_flush_fn) 635 if (!r_queue->issue_flush_fn)
636 ret = -EOPNOTSUPP; 636 ret = -EOPNOTSUPP;
@@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits)
658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) { 658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
660 if (rdev && !test_bit(Faulty, &rdev->flags)) { 660 if (rdev && !test_bit(Faulty, &rdev->flags)) {
661 request_queue_t *q = bdev_get_queue(rdev->bdev); 661 struct request_queue *q = bdev_get_queue(rdev->bdev);
662 662
663 ret |= bdi_congested(&q->backing_dev_info, bits); 663 ret |= bdi_congested(&q->backing_dev_info, bits);
664 } 664 }
@@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf)
772 spin_unlock_irq(&conf->resync_lock); 772 spin_unlock_irq(&conf->resync_lock);
773} 773}
774 774
775static int make_request(request_queue_t *q, struct bio * bio) 775static int make_request(struct request_queue *q, struct bio * bio)
776{ 776{
777 mddev_t *mddev = q->queuedata; 777 mddev_t *mddev = q->queuedata;
778 conf_t *conf = mddev_to_conf(mddev); 778 conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d90ee145effe..2aff4be35dc4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
289} 289}
290 290
291static void unplug_slaves(mddev_t *mddev); 291static void unplug_slaves(mddev_t *mddev);
292static void raid5_unplug_device(request_queue_t *q); 292static void raid5_unplug_device(struct request_queue *q);
293 293
294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
295 int pd_idx, int noblock) 295 int pd_idx, int noblock)
@@ -3182,7 +3182,7 @@ static void unplug_slaves(mddev_t *mddev)
3182 for (i=0; i<mddev->raid_disks; i++) { 3182 for (i=0; i<mddev->raid_disks; i++) {
3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3185 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 3185 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3186 3186
3187 atomic_inc(&rdev->nr_pending); 3187 atomic_inc(&rdev->nr_pending);
3188 rcu_read_unlock(); 3188 rcu_read_unlock();
@@ -3197,7 +3197,7 @@ static void unplug_slaves(mddev_t *mddev)
3197 rcu_read_unlock(); 3197 rcu_read_unlock();
3198} 3198}
3199 3199
3200static void raid5_unplug_device(request_queue_t *q) 3200static void raid5_unplug_device(struct request_queue *q)
3201{ 3201{
3202 mddev_t *mddev = q->queuedata; 3202 mddev_t *mddev = q->queuedata;
3203 raid5_conf_t *conf = mddev_to_conf(mddev); 3203 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3216,7 @@ static void raid5_unplug_device(request_queue_t *q)
3216 unplug_slaves(mddev); 3216 unplug_slaves(mddev);
3217} 3217}
3218 3218
3219static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 3219static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
3220 sector_t *error_sector) 3220 sector_t *error_sector)
3221{ 3221{
3222 mddev_t *mddev = q->queuedata; 3222 mddev_t *mddev = q->queuedata;
@@ -3228,7 +3228,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3229 if (rdev && !test_bit(Faulty, &rdev->flags)) { 3229 if (rdev && !test_bit(Faulty, &rdev->flags)) {
3230 struct block_device *bdev = rdev->bdev; 3230 struct block_device *bdev = rdev->bdev;
3231 request_queue_t *r_queue = bdev_get_queue(bdev); 3231 struct request_queue *r_queue = bdev_get_queue(bdev);
3232 3232
3233 if (!r_queue->issue_flush_fn) 3233 if (!r_queue->issue_flush_fn)
3234 ret = -EOPNOTSUPP; 3234 ret = -EOPNOTSUPP;
@@ -3267,7 +3267,7 @@ static int raid5_congested(void *data, int bits)
3267/* We want read requests to align with chunks where possible, 3267/* We want read requests to align with chunks where possible,
3268 * but write requests don't need to. 3268 * but write requests don't need to.
3269 */ 3269 */
3270static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 3270static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
3271{ 3271{
3272 mddev_t *mddev = q->queuedata; 3272 mddev_t *mddev = q->queuedata;
3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3377,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3377 3377
3378static int bio_fits_rdev(struct bio *bi) 3378static int bio_fits_rdev(struct bio *bi)
3379{ 3379{
3380 request_queue_t *q = bdev_get_queue(bi->bi_bdev); 3380 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3381 3381
3382 if ((bi->bi_size>>9) > q->max_sectors) 3382 if ((bi->bi_size>>9) > q->max_sectors)
3383 return 0; 3383 return 0;
@@ -3396,7 +3396,7 @@ static int bio_fits_rdev(struct bio *bi)
3396} 3396}
3397 3397
3398 3398
3399static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) 3399static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3400{ 3400{
3401 mddev_t *mddev = q->queuedata; 3401 mddev_t *mddev = q->queuedata;
3402 raid5_conf_t *conf = mddev_to_conf(mddev); 3402 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3466,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
3466} 3466}
3467 3467
3468 3468
3469static int make_request(request_queue_t *q, struct bio * bi) 3469static int make_request(struct request_queue *q, struct bio * bi)
3470{ 3470{
3471 mddev_t *mddev = q->queuedata; 3471 mddev_t *mddev = q->queuedata;
3472 raid5_conf_t *conf = mddev_to_conf(mddev); 3472 raid5_conf_t *conf = mddev_to_conf(mddev);