aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 50bd7c9411b9..6022111a4b28 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,7 +52,7 @@ static void lower_barrier(conf_t *conf);
52static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 52static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
53{ 53{
54 struct pool_info *pi = data; 54 struct pool_info *pi = data;
55 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 55 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
56 56
57 /* allocate a r1bio with room for raid_disks entries in the bios array */ 57 /* allocate a r1bio with room for raid_disks entries in the bios array */
58 return kzalloc(size, gfp_flags); 58 return kzalloc(size, gfp_flags);
@@ -73,7 +73,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
73{ 73{
74 struct pool_info *pi = data; 74 struct pool_info *pi = data;
75 struct page *page; 75 struct page *page;
76 r1bio_t *r1_bio; 76 struct r1bio *r1_bio;
77 struct bio *bio; 77 struct bio *bio;
78 int i, j; 78 int i, j;
79 79
@@ -139,7 +139,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
139{ 139{
140 struct pool_info *pi = data; 140 struct pool_info *pi = data;
141 int i,j; 141 int i,j;
142 r1bio_t *r1bio = __r1_bio; 142 struct r1bio *r1bio = __r1_bio;
143 143
144 for (i = 0; i < RESYNC_PAGES; i++) 144 for (i = 0; i < RESYNC_PAGES; i++)
145 for (j = pi->raid_disks; j-- ;) { 145 for (j = pi->raid_disks; j-- ;) {
@@ -154,7 +154,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
154 r1bio_pool_free(r1bio, data); 154 r1bio_pool_free(r1bio, data);
155} 155}
156 156
157static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) 157static void put_all_bios(conf_t *conf, struct r1bio *r1_bio)
158{ 158{
159 int i; 159 int i;
160 160
@@ -166,7 +166,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
166 } 166 }
167} 167}
168 168
169static void free_r1bio(r1bio_t *r1_bio) 169static void free_r1bio(struct r1bio *r1_bio)
170{ 170{
171 conf_t *conf = r1_bio->mddev->private; 171 conf_t *conf = r1_bio->mddev->private;
172 172
@@ -174,7 +174,7 @@ static void free_r1bio(r1bio_t *r1_bio)
174 mempool_free(r1_bio, conf->r1bio_pool); 174 mempool_free(r1_bio, conf->r1bio_pool);
175} 175}
176 176
177static void put_buf(r1bio_t *r1_bio) 177static void put_buf(struct r1bio *r1_bio)
178{ 178{
179 conf_t *conf = r1_bio->mddev->private; 179 conf_t *conf = r1_bio->mddev->private;
180 int i; 180 int i;
@@ -190,7 +190,7 @@ static void put_buf(r1bio_t *r1_bio)
190 lower_barrier(conf); 190 lower_barrier(conf);
191} 191}
192 192
193static void reschedule_retry(r1bio_t *r1_bio) 193static void reschedule_retry(struct r1bio *r1_bio)
194{ 194{
195 unsigned long flags; 195 unsigned long flags;
196 struct mddev *mddev = r1_bio->mddev; 196 struct mddev *mddev = r1_bio->mddev;
@@ -210,7 +210,7 @@ static void reschedule_retry(r1bio_t *r1_bio)
210 * operation and are ready to return a success/failure code to the buffer 210 * operation and are ready to return a success/failure code to the buffer
211 * cache layer. 211 * cache layer.
212 */ 212 */
213static void call_bio_endio(r1bio_t *r1_bio) 213static void call_bio_endio(struct r1bio *r1_bio)
214{ 214{
215 struct bio *bio = r1_bio->master_bio; 215 struct bio *bio = r1_bio->master_bio;
216 int done; 216 int done;
@@ -237,7 +237,7 @@ static void call_bio_endio(r1bio_t *r1_bio)
237 } 237 }
238} 238}
239 239
240static void raid_end_bio_io(r1bio_t *r1_bio) 240static void raid_end_bio_io(struct r1bio *r1_bio)
241{ 241{
242 struct bio *bio = r1_bio->master_bio; 242 struct bio *bio = r1_bio->master_bio;
243 243
@@ -257,7 +257,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
257/* 257/*
258 * Update disk head position estimator based on IRQ completion info. 258 * Update disk head position estimator based on IRQ completion info.
259 */ 259 */
260static inline void update_head_pos(int disk, r1bio_t *r1_bio) 260static inline void update_head_pos(int disk, struct r1bio *r1_bio)
261{ 261{
262 conf_t *conf = r1_bio->mddev->private; 262 conf_t *conf = r1_bio->mddev->private;
263 263
@@ -268,7 +268,7 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
268/* 268/*
269 * Find the disk number which triggered given bio 269 * Find the disk number which triggered given bio
270 */ 270 */
271static int find_bio_disk(r1bio_t *r1_bio, struct bio *bio) 271static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
272{ 272{
273 int mirror; 273 int mirror;
274 int raid_disks = r1_bio->mddev->raid_disks; 274 int raid_disks = r1_bio->mddev->raid_disks;
@@ -286,7 +286,7 @@ static int find_bio_disk(r1bio_t *r1_bio, struct bio *bio)
286static void raid1_end_read_request(struct bio *bio, int error) 286static void raid1_end_read_request(struct bio *bio, int error)
287{ 287{
288 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 288 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
289 r1bio_t *r1_bio = bio->bi_private; 289 struct r1bio *r1_bio = bio->bi_private;
290 int mirror; 290 int mirror;
291 conf_t *conf = r1_bio->mddev->private; 291 conf_t *conf = r1_bio->mddev->private;
292 292
@@ -333,7 +333,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
333 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 333 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
334} 334}
335 335
336static void close_write(r1bio_t *r1_bio) 336static void close_write(struct r1bio *r1_bio)
337{ 337{
338 /* it really is the end of this request */ 338 /* it really is the end of this request */
339 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 339 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
@@ -352,7 +352,7 @@ static void close_write(r1bio_t *r1_bio)
352 md_write_end(r1_bio->mddev); 352 md_write_end(r1_bio->mddev);
353} 353}
354 354
355static void r1_bio_write_done(r1bio_t *r1_bio) 355static void r1_bio_write_done(struct r1bio *r1_bio)
356{ 356{
357 if (!atomic_dec_and_test(&r1_bio->remaining)) 357 if (!atomic_dec_and_test(&r1_bio->remaining))
358 return; 358 return;
@@ -371,7 +371,7 @@ static void r1_bio_write_done(r1bio_t *r1_bio)
371static void raid1_end_write_request(struct bio *bio, int error) 371static void raid1_end_write_request(struct bio *bio, int error)
372{ 372{
373 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 373 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
374 r1bio_t *r1_bio = bio->bi_private; 374 struct r1bio *r1_bio = bio->bi_private;
375 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 375 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
376 conf_t *conf = r1_bio->mddev->private; 376 conf_t *conf = r1_bio->mddev->private;
377 struct bio *to_put = NULL; 377 struct bio *to_put = NULL;
@@ -466,7 +466,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
466 * 466 *
467 * The rdev for the device selected will have nr_pending incremented. 467 * The rdev for the device selected will have nr_pending incremented.
468 */ 468 */
469static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) 469static int read_balance(conf_t *conf, struct r1bio *r1_bio, int *max_sectors)
470{ 470{
471 const sector_t this_sector = r1_bio->sector; 471 const sector_t this_sector = r1_bio->sector;
472 int sectors; 472 int sectors;
@@ -764,7 +764,7 @@ static void unfreeze_array(conf_t *conf)
764 764
765/* duplicate the data pages for behind I/O 765/* duplicate the data pages for behind I/O
766 */ 766 */
767static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio) 767static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
768{ 768{
769 int i; 769 int i;
770 struct bio_vec *bvec; 770 struct bio_vec *bvec;
@@ -800,7 +800,7 @@ static int make_request(struct mddev *mddev, struct bio * bio)
800{ 800{
801 conf_t *conf = mddev->private; 801 conf_t *conf = mddev->private;
802 mirror_info_t *mirror; 802 mirror_info_t *mirror;
803 r1bio_t *r1_bio; 803 struct r1bio *r1_bio;
804 struct bio *read_bio; 804 struct bio *read_bio;
805 int i, disks; 805 int i, disks;
806 struct bitmap *bitmap; 806 struct bitmap *bitmap;
@@ -1354,7 +1354,7 @@ abort:
1354 1354
1355static void end_sync_read(struct bio *bio, int error) 1355static void end_sync_read(struct bio *bio, int error)
1356{ 1356{
1357 r1bio_t *r1_bio = bio->bi_private; 1357 struct r1bio *r1_bio = bio->bi_private;
1358 1358
1359 update_head_pos(r1_bio->read_disk, r1_bio); 1359 update_head_pos(r1_bio->read_disk, r1_bio);
1360 1360
@@ -1373,7 +1373,7 @@ static void end_sync_read(struct bio *bio, int error)
1373static void end_sync_write(struct bio *bio, int error) 1373static void end_sync_write(struct bio *bio, int error)
1374{ 1374{
1375 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1375 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1376 r1bio_t *r1_bio = bio->bi_private; 1376 struct r1bio *r1_bio = bio->bi_private;
1377 struct mddev *mddev = r1_bio->mddev; 1377 struct mddev *mddev = r1_bio->mddev;
1378 conf_t *conf = mddev->private; 1378 conf_t *conf = mddev->private;
1379 int mirror=0; 1379 int mirror=0;
@@ -1433,7 +1433,7 @@ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1433 return 0; 1433 return 0;
1434} 1434}
1435 1435
1436static int fix_sync_read_error(r1bio_t *r1_bio) 1436static int fix_sync_read_error(struct r1bio *r1_bio)
1437{ 1437{
1438 /* Try some synchronous reads of other devices to get 1438 /* Try some synchronous reads of other devices to get
1439 * good data, much like with normal read errors. Only 1439 * good data, much like with normal read errors. Only
@@ -1553,7 +1553,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
1553 return 1; 1553 return 1;
1554} 1554}
1555 1555
1556static int process_checks(r1bio_t *r1_bio) 1556static int process_checks(struct r1bio *r1_bio)
1557{ 1557{
1558 /* We have read all readable devices. If we haven't 1558 /* We have read all readable devices. If we haven't
1559 * got the block, then there is no hope left. 1559 * got the block, then there is no hope left.
@@ -1635,7 +1635,7 @@ static int process_checks(r1bio_t *r1_bio)
1635 return 0; 1635 return 0;
1636} 1636}
1637 1637
1638static void sync_request_write(struct mddev *mddev, r1bio_t *r1_bio) 1638static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1639{ 1639{
1640 conf_t *conf = mddev->private; 1640 conf_t *conf = mddev->private;
1641 int i; 1641 int i;
@@ -1790,7 +1790,7 @@ static int submit_bio_wait(int rw, struct bio *bio)
1790 return test_bit(BIO_UPTODATE, &bio->bi_flags); 1790 return test_bit(BIO_UPTODATE, &bio->bi_flags);
1791} 1791}
1792 1792
1793static int narrow_write_error(r1bio_t *r1_bio, int i) 1793static int narrow_write_error(struct r1bio *r1_bio, int i)
1794{ 1794{
1795 struct mddev *mddev = r1_bio->mddev; 1795 struct mddev *mddev = r1_bio->mddev;
1796 conf_t *conf = mddev->private; 1796 conf_t *conf = mddev->private;
@@ -1866,7 +1866,7 @@ static int narrow_write_error(r1bio_t *r1_bio, int i)
1866 return ok; 1866 return ok;
1867} 1867}
1868 1868
1869static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio) 1869static void handle_sync_write_finished(conf_t *conf, struct r1bio *r1_bio)
1870{ 1870{
1871 int m; 1871 int m;
1872 int s = r1_bio->sectors; 1872 int s = r1_bio->sectors;
@@ -1889,7 +1889,7 @@ static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
1889 md_done_sync(conf->mddev, s, 1); 1889 md_done_sync(conf->mddev, s, 1);
1890} 1890}
1891 1891
1892static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio) 1892static void handle_write_finished(conf_t *conf, struct r1bio *r1_bio)
1893{ 1893{
1894 int m; 1894 int m;
1895 for (m = 0; m < conf->raid_disks ; m++) 1895 for (m = 0; m < conf->raid_disks ; m++)
@@ -1918,7 +1918,7 @@ static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
1918 raid_end_bio_io(r1_bio); 1918 raid_end_bio_io(r1_bio);
1919} 1919}
1920 1920
1921static void handle_read_error(conf_t *conf, r1bio_t *r1_bio) 1921static void handle_read_error(conf_t *conf, struct r1bio *r1_bio)
1922{ 1922{
1923 int disk; 1923 int disk;
1924 int max_sectors; 1924 int max_sectors;
@@ -2010,7 +2010,7 @@ read_more:
2010 2010
2011static void raid1d(struct mddev *mddev) 2011static void raid1d(struct mddev *mddev)
2012{ 2012{
2013 r1bio_t *r1_bio; 2013 struct r1bio *r1_bio;
2014 unsigned long flags; 2014 unsigned long flags;
2015 conf_t *conf = mddev->private; 2015 conf_t *conf = mddev->private;
2016 struct list_head *head = &conf->retry_list; 2016 struct list_head *head = &conf->retry_list;
@@ -2029,7 +2029,7 @@ static void raid1d(struct mddev *mddev)
2029 spin_unlock_irqrestore(&conf->device_lock, flags); 2029 spin_unlock_irqrestore(&conf->device_lock, flags);
2030 break; 2030 break;
2031 } 2031 }
2032 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 2032 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2033 list_del(head->prev); 2033 list_del(head->prev);
2034 conf->nr_queued--; 2034 conf->nr_queued--;
2035 spin_unlock_irqrestore(&conf->device_lock, flags); 2035 spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -2088,7 +2088,7 @@ static int init_resync(conf_t *conf)
2088static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 2088static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
2089{ 2089{
2090 conf_t *conf = mddev->private; 2090 conf_t *conf = mddev->private;
2091 r1bio_t *r1_bio; 2091 struct r1bio *r1_bio;
2092 struct bio *bio; 2092 struct bio *bio;
2093 sector_t max_sector, nr_sectors; 2093 sector_t max_sector, nr_sectors;
2094 int disk = -1; 2094 int disk = -1;