diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-log.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 38 | ||||
-rw-r--r-- | drivers/md/dm.c | 8 | ||||
-rw-r--r-- | drivers/md/linear.c | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/multipath.c | 4 | ||||
-rw-r--r-- | drivers/md/raid0.c | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 4 | ||||
-rw-r--r-- | drivers/md/raid10.c | 8 | ||||
-rw-r--r-- | drivers/md/raid5.c | 32 |
13 files changed, 56 insertions, 55 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 56df1cee8fb3..3319c2fec28e 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, | |||
232 | target = rdev->sb_start + offset + index * (PAGE_SIZE/512); | 232 | target = rdev->sb_start + offset + index * (PAGE_SIZE/512); |
233 | 233 | ||
234 | if (sync_page_io(rdev->bdev, target, | 234 | if (sync_page_io(rdev->bdev, target, |
235 | roundup(size, bdev_hardsect_size(rdev->bdev)), | 235 | roundup(size, bdev_logical_block_size(rdev->bdev)), |
236 | page, READ)) { | 236 | page, READ)) { |
237 | page->index = index; | 237 | page->index = index; |
238 | attach_page_buffers(page, NULL); /* so that free_buffer will | 238 | attach_page_buffers(page, NULL); /* so that free_buffer will |
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | |||
287 | int size = PAGE_SIZE; | 287 | int size = PAGE_SIZE; |
288 | if (page->index == bitmap->file_pages-1) | 288 | if (page->index == bitmap->file_pages-1) |
289 | size = roundup(bitmap->last_page_size, | 289 | size = roundup(bitmap->last_page_size, |
290 | bdev_hardsect_size(rdev->bdev)); | 290 | bdev_logical_block_size(rdev->bdev)); |
291 | /* Just make sure we aren't corrupting data or | 291 | /* Just make sure we aren't corrupting data or |
292 | * metadata | 292 | * metadata |
293 | */ | 293 | */ |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index a2e26c242141..75d8081a9041 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /* Validate the chunk size against the device block size */ | 180 | /* Validate the chunk size against the device block size */ |
181 | if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { | 181 | if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { |
182 | *error = "Chunk size is not a multiple of device blocksize"; | 182 | *error = "Chunk size is not a multiple of device blocksize"; |
183 | return -EINVAL; | 183 | return -EINVAL; |
184 | } | 184 | } |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index be233bc4d917..6fa8ccf91c70 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, | |||
413 | * Buffer holds both header and bitset. | 413 | * Buffer holds both header and bitset. |
414 | */ | 414 | */ |
415 | buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + | 415 | buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + |
416 | bitset_size, ti->limits.hardsect_size); | 416 | bitset_size, |
417 | ti->limits.logical_block_size); | ||
417 | 418 | ||
418 | if (buf_size > dev->bdev->bd_inode->i_size) { | 419 | if (buf_size > dev->bdev->bd_inode->i_size) { |
419 | DMWARN("log device %s too small: need %llu bytes", | 420 | DMWARN("log device %s too small: need %llu bytes", |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index e75c6dd76a9a..2662a41337e7 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
282 | */ | 282 | */ |
283 | if (!ps->store->chunk_size) { | 283 | if (!ps->store->chunk_size) { |
284 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, | 284 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, |
285 | bdev_hardsect_size(ps->store->cow->bdev) >> 9); | 285 | bdev_logical_block_size(ps->store->cow->bdev) >> 9); |
286 | ps->store->chunk_mask = ps->store->chunk_size - 1; | 286 | ps->store->chunk_mask = ps->store->chunk_size - 1; |
287 | ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; | 287 | ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; |
288 | chunk_size_supplied = 0; | 288 | chunk_size_supplied = 0; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 429b50b975d5..e9a73bb242b0 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs, | |||
108 | lhs->max_hw_segments = | 108 | lhs->max_hw_segments = |
109 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | 109 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); |
110 | 110 | ||
111 | lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); | 111 | lhs->logical_block_size = max(lhs->logical_block_size, |
112 | rhs->logical_block_size); | ||
112 | 113 | ||
113 | lhs->max_segment_size = | 114 | lhs->max_segment_size = |
114 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | 115 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); |
@@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
509 | * combine_restrictions_low() | 510 | * combine_restrictions_low() |
510 | */ | 511 | */ |
511 | rs->max_sectors = | 512 | rs->max_sectors = |
512 | min_not_zero(rs->max_sectors, q->max_sectors); | 513 | min_not_zero(rs->max_sectors, queue_max_sectors(q)); |
513 | 514 | ||
514 | /* | 515 | /* |
515 | * Check if merge fn is supported. | 516 | * Check if merge fn is supported. |
@@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
524 | 525 | ||
525 | rs->max_phys_segments = | 526 | rs->max_phys_segments = |
526 | min_not_zero(rs->max_phys_segments, | 527 | min_not_zero(rs->max_phys_segments, |
527 | q->max_phys_segments); | 528 | queue_max_phys_segments(q)); |
528 | 529 | ||
529 | rs->max_hw_segments = | 530 | rs->max_hw_segments = |
530 | min_not_zero(rs->max_hw_segments, q->max_hw_segments); | 531 | min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); |
531 | 532 | ||
532 | rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); | 533 | rs->logical_block_size = max(rs->logical_block_size, |
534 | queue_logical_block_size(q)); | ||
533 | 535 | ||
534 | rs->max_segment_size = | 536 | rs->max_segment_size = |
535 | min_not_zero(rs->max_segment_size, q->max_segment_size); | 537 | min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); |
536 | 538 | ||
537 | rs->max_hw_sectors = | 539 | rs->max_hw_sectors = |
538 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | 540 | min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); |
539 | 541 | ||
540 | rs->seg_boundary_mask = | 542 | rs->seg_boundary_mask = |
541 | min_not_zero(rs->seg_boundary_mask, | 543 | min_not_zero(rs->seg_boundary_mask, |
542 | q->seg_boundary_mask); | 544 | queue_segment_boundary(q)); |
543 | 545 | ||
544 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); | 546 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); |
545 | 547 | ||
546 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 548 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
547 | } | 549 | } |
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) | |||
683 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 685 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; |
684 | if (!rs->max_hw_segments) | 686 | if (!rs->max_hw_segments) |
685 | rs->max_hw_segments = MAX_HW_SEGMENTS; | 687 | rs->max_hw_segments = MAX_HW_SEGMENTS; |
686 | if (!rs->hardsect_size) | 688 | if (!rs->logical_block_size) |
687 | rs->hardsect_size = 1 << SECTOR_SHIFT; | 689 | rs->logical_block_size = 1 << SECTOR_SHIFT; |
688 | if (!rs->max_segment_size) | 690 | if (!rs->max_segment_size) |
689 | rs->max_segment_size = MAX_SEGMENT_SIZE; | 691 | rs->max_segment_size = MAX_SEGMENT_SIZE; |
690 | if (!rs->seg_boundary_mask) | 692 | if (!rs->seg_boundary_mask) |
@@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
912 | * restrictions. | 914 | * restrictions. |
913 | */ | 915 | */ |
914 | blk_queue_max_sectors(q, t->limits.max_sectors); | 916 | blk_queue_max_sectors(q, t->limits.max_sectors); |
915 | q->max_phys_segments = t->limits.max_phys_segments; | 917 | blk_queue_max_phys_segments(q, t->limits.max_phys_segments); |
916 | q->max_hw_segments = t->limits.max_hw_segments; | 918 | blk_queue_max_hw_segments(q, t->limits.max_hw_segments); |
917 | q->hardsect_size = t->limits.hardsect_size; | 919 | blk_queue_logical_block_size(q, t->limits.logical_block_size); |
918 | q->max_segment_size = t->limits.max_segment_size; | 920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); |
919 | q->max_hw_sectors = t->limits.max_hw_sectors; | 921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); |
920 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); |
921 | q->bounce_pfn = t->limits.bounce_pfn; | 923 | blk_queue_bounce_limit(q, t->limits.bounce_pfn); |
922 | 924 | ||
923 | if (t->limits.no_cluster) | 925 | if (t->limits.no_cluster) |
924 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 424f7b048c30..3fd8b1e65483 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -20,7 +20,8 @@ | |||
20 | #include <linux/idr.h> | 20 | #include <linux/idr.h> |
21 | #include <linux/hdreg.h> | 21 | #include <linux/hdreg.h> |
22 | #include <linux/blktrace_api.h> | 22 | #include <linux/blktrace_api.h> |
23 | #include <trace/block.h> | 23 | |
24 | #include <trace/events/block.h> | ||
24 | 25 | ||
25 | #define DM_MSG_PREFIX "core" | 26 | #define DM_MSG_PREFIX "core" |
26 | 27 | ||
@@ -53,8 +54,6 @@ struct dm_target_io { | |||
53 | union map_info info; | 54 | union map_info info; |
54 | }; | 55 | }; |
55 | 56 | ||
56 | DEFINE_TRACE(block_bio_complete); | ||
57 | |||
58 | /* | 57 | /* |
59 | * For request-based dm. | 58 | * For request-based dm. |
60 | * One of these is allocated per request. | 59 | * One of these is allocated per request. |
@@ -656,8 +655,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
656 | /* the bio has been remapped so dispatch it */ | 655 | /* the bio has been remapped so dispatch it */ |
657 | 656 | ||
658 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, | 657 | trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, |
659 | tio->io->bio->bi_bdev->bd_dev, | 658 | tio->io->bio->bi_bdev->bd_dev, sector); |
660 | clone->bi_sector, sector); | ||
661 | 659 | ||
662 | generic_make_request(clone); | 660 | generic_make_request(clone); |
663 | } else if (r < 0 || r == DM_MAPIO_REQUEUE) { | 661 | } else if (r < 0 || r == DM_MAPIO_REQUEUE) { |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 7a36e38393a1..64f1f3e046e0 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
146 | * a one page request is never in violation. | 146 | * a one page request is never in violation. |
147 | */ | 147 | */ |
148 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 148 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
149 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 149 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
150 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 150 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
151 | 151 | ||
152 | disk->num_sectors = rdev->sectors; | 152 | disk->num_sectors = rdev->sectors; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 641b211fe3fe..20f6ac338349 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1202 | atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); | 1202 | atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); |
1203 | 1203 | ||
1204 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; | 1204 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; |
1205 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; | 1205 | bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; |
1206 | if (rdev->sb_size & bmask) | 1206 | if (rdev->sb_size & bmask) |
1207 | rdev->sb_size = (rdev->sb_size | bmask) + 1; | 1207 | rdev->sb_size = (rdev->sb_size | bmask) + 1; |
1208 | 1208 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 41ced0cbe823..4ee31aa13c40 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
303 | * merge_bvec_fn will be involved in multipath.) | 303 | * merge_bvec_fn will be involved in multipath.) |
304 | */ | 304 | */ |
305 | if (q->merge_bvec_fn && | 305 | if (q->merge_bvec_fn && |
306 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 306 | queue_max_sectors(q) > (PAGE_SIZE>>9)) |
307 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 307 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
308 | 308 | ||
309 | conf->working_disks++; | 309 | conf->working_disks++; |
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev) | |||
467 | * violating it, not that we ever expect a device with | 467 | * violating it, not that we ever expect a device with |
468 | * a merge_bvec_fn to be involved in multipath */ | 468 | * a merge_bvec_fn to be involved in multipath */ |
469 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 469 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
470 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 470 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
471 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 471 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
472 | 472 | ||
473 | if (!test_bit(Faulty, &rdev->flags)) | 473 | if (!test_bit(Faulty, &rdev->flags)) |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c08d7559be55..925507e7d673 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev) | |||
144 | */ | 144 | */ |
145 | 145 | ||
146 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && | 146 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && |
147 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 147 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
148 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 148 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
149 | 149 | ||
150 | if (!smallest || (rdev1->sectors < smallest->sectors)) | 150 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 36df9109cde1..e23758b4a34e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1130 | * a one page request is never in violation. | 1130 | * a one page request is never in violation. |
1131 | */ | 1131 | */ |
1132 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 1132 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
1133 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 1133 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
1134 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 1134 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
1135 | 1135 | ||
1136 | p->head_position = 0; | 1136 | p->head_position = 0; |
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev) | |||
1996 | * a one page request is never in violation. | 1996 | * a one page request is never in violation. |
1997 | */ | 1997 | */ |
1998 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 1998 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
1999 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 1999 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
2000 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 2000 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
2001 | 2001 | ||
2002 | disk->head_position = 0; | 2002 | disk->head_position = 0; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 499620afb44b..750550c1166f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1158 | * a one page request is never in violation. | 1158 | * a one page request is never in violation. |
1159 | */ | 1159 | */ |
1160 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 1160 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
1161 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 1161 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
1162 | mddev->queue->max_sectors = (PAGE_SIZE>>9); | 1162 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
1163 | 1163 | ||
1164 | p->head_position = 0; | 1164 | p->head_position = 0; |
1165 | rdev->raid_disk = mirror; | 1165 | rdev->raid_disk = mirror; |
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev) | |||
2145 | * a one page request is never in violation. | 2145 | * a one page request is never in violation. |
2146 | */ | 2146 | */ |
2147 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 2147 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
2148 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | 2148 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
2149 | mddev->queue->max_sectors = (PAGE_SIZE>>9); | 2149 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); |
2150 | 2150 | ||
2151 | disk->head_position = 0; | 2151 | disk->head_position = 0; |
2152 | } | 2152 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5d400aef8d9b..bef876698232 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q); | |||
362 | 362 | ||
363 | static struct stripe_head * | 363 | static struct stripe_head * |
364 | get_active_stripe(raid5_conf_t *conf, sector_t sector, | 364 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
365 | int previous, int noblock) | 365 | int previous, int noblock, int noquiesce) |
366 | { | 366 | { |
367 | struct stripe_head *sh; | 367 | struct stripe_head *sh; |
368 | 368 | ||
@@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, | |||
372 | 372 | ||
373 | do { | 373 | do { |
374 | wait_event_lock_irq(conf->wait_for_stripe, | 374 | wait_event_lock_irq(conf->wait_for_stripe, |
375 | conf->quiesce == 0, | 375 | conf->quiesce == 0 || noquiesce, |
376 | conf->device_lock, /* nothing */); | 376 | conf->device_lock, /* nothing */); |
377 | sh = __find_stripe(conf, sector, conf->generation - previous); | 377 | sh = __find_stripe(conf, sector, conf->generation - previous); |
378 | if (!sh) { | 378 | if (!sh) { |
@@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2671 | sector_t bn = compute_blocknr(sh, i, 1); | 2671 | sector_t bn = compute_blocknr(sh, i, 1); |
2672 | sector_t s = raid5_compute_sector(conf, bn, 0, | 2672 | sector_t s = raid5_compute_sector(conf, bn, 0, |
2673 | &dd_idx, NULL); | 2673 | &dd_idx, NULL); |
2674 | sh2 = get_active_stripe(conf, s, 0, 1); | 2674 | sh2 = get_active_stripe(conf, s, 0, 1, 1); |
2675 | if (sh2 == NULL) | 2675 | if (sh2 == NULL) |
2676 | /* so far only the early blocks of this stripe | 2676 | /* so far only the early blocks of this stripe |
2677 | * have been requested. When later blocks | 2677 | * have been requested. When later blocks |
@@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2944 | /* Finish reconstruct operations initiated by the expansion process */ | 2944 | /* Finish reconstruct operations initiated by the expansion process */ |
2945 | if (sh->reconstruct_state == reconstruct_state_result) { | 2945 | if (sh->reconstruct_state == reconstruct_state_result) { |
2946 | struct stripe_head *sh2 | 2946 | struct stripe_head *sh2 |
2947 | = get_active_stripe(conf, sh->sector, 1, 1); | 2947 | = get_active_stripe(conf, sh->sector, 1, 1, 1); |
2948 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | 2948 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { |
2949 | /* sh cannot be written until sh2 has been read. | 2949 | /* sh cannot be written until sh2 has been read. |
2950 | * so arrange for sh to be delayed a little | 2950 | * so arrange for sh to be delayed a little |
@@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3189 | 3189 | ||
3190 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { | 3190 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { |
3191 | struct stripe_head *sh2 | 3191 | struct stripe_head *sh2 |
3192 | = get_active_stripe(conf, sh->sector, 1, 1); | 3192 | = get_active_stripe(conf, sh->sector, 1, 1, 1); |
3193 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { | 3193 | if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { |
3194 | /* sh cannot be written until sh2 has been read. | 3194 | /* sh cannot be written until sh2 has been read. |
3195 | * so arrange for sh to be delayed a little | 3195 | * so arrange for sh to be delayed a little |
@@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
3288 | int i; | 3288 | int i; |
3289 | 3289 | ||
3290 | rcu_read_lock(); | 3290 | rcu_read_lock(); |
3291 | for (i=0; i<mddev->raid_disks; i++) { | 3291 | for (i = 0; i < conf->raid_disks; i++) { |
3292 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 3292 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); |
3293 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | 3293 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { |
3294 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | 3294 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); |
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi) | |||
3463 | { | 3463 | { |
3464 | struct request_queue *q = bdev_get_queue(bi->bi_bdev); | 3464 | struct request_queue *q = bdev_get_queue(bi->bi_bdev); |
3465 | 3465 | ||
3466 | if ((bi->bi_size>>9) > q->max_sectors) | 3466 | if ((bi->bi_size>>9) > queue_max_sectors(q)) |
3467 | return 0; | 3467 | return 0; |
3468 | blk_recount_segments(q, bi); | 3468 | blk_recount_segments(q, bi); |
3469 | if (bi->bi_phys_segments > q->max_phys_segments) | 3469 | if (bi->bi_phys_segments > queue_max_phys_segments(q)) |
3470 | return 0; | 3470 | return 0; |
3471 | 3471 | ||
3472 | if (q->merge_bvec_fn) | 3472 | if (q->merge_bvec_fn) |
@@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3675 | (unsigned long long)logical_sector); | 3675 | (unsigned long long)logical_sector); |
3676 | 3676 | ||
3677 | sh = get_active_stripe(conf, new_sector, previous, | 3677 | sh = get_active_stripe(conf, new_sector, previous, |
3678 | (bi->bi_rw&RWA_MASK)); | 3678 | (bi->bi_rw&RWA_MASK), 0); |
3679 | if (sh) { | 3679 | if (sh) { |
3680 | if (unlikely(previous)) { | 3680 | if (unlikely(previous)) { |
3681 | /* expansion might have moved on while waiting for a | 3681 | /* expansion might have moved on while waiting for a |
@@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3873 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { | 3873 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { |
3874 | int j; | 3874 | int j; |
3875 | int skipped = 0; | 3875 | int skipped = 0; |
3876 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0); | 3876 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); |
3877 | set_bit(STRIPE_EXPANDING, &sh->state); | 3877 | set_bit(STRIPE_EXPANDING, &sh->state); |
3878 | atomic_inc(&conf->reshape_stripes); | 3878 | atomic_inc(&conf->reshape_stripes); |
3879 | /* If any of this stripe is beyond the end of the old | 3879 | /* If any of this stripe is beyond the end of the old |
@@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3916 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), | 3916 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), |
3917 | 1, &dd_idx, NULL); | 3917 | 1, &dd_idx, NULL); |
3918 | last_sector = | 3918 | last_sector = |
3919 | raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) | 3919 | raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) |
3920 | *(new_data_disks) - 1), | 3920 | *(new_data_disks) - 1), |
3921 | 1, &dd_idx, NULL); | 3921 | 1, &dd_idx, NULL); |
3922 | if (last_sector >= mddev->dev_sectors) | 3922 | if (last_sector >= mddev->dev_sectors) |
3923 | last_sector = mddev->dev_sectors - 1; | 3923 | last_sector = mddev->dev_sectors - 1; |
3924 | while (first_sector <= last_sector) { | 3924 | while (first_sector <= last_sector) { |
3925 | sh = get_active_stripe(conf, first_sector, 1, 0); | 3925 | sh = get_active_stripe(conf, first_sector, 1, 0, 1); |
3926 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); | 3926 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
3927 | set_bit(STRIPE_HANDLE, &sh->state); | 3927 | set_bit(STRIPE_HANDLE, &sh->state); |
3928 | release_stripe(sh); | 3928 | release_stripe(sh); |
@@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4022 | 4022 | ||
4023 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | 4023 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
4024 | 4024 | ||
4025 | sh = get_active_stripe(conf, sector_nr, 0, 1); | 4025 | sh = get_active_stripe(conf, sector_nr, 0, 1, 0); |
4026 | if (sh == NULL) { | 4026 | if (sh == NULL) { |
4027 | sh = get_active_stripe(conf, sector_nr, 0, 0); | 4027 | sh = get_active_stripe(conf, sector_nr, 0, 0, 0); |
4028 | /* make sure we don't swamp the stripe cache if someone else | 4028 | /* make sure we don't swamp the stripe cache if someone else |
4029 | * is trying to get access | 4029 | * is trying to get access |
4030 | */ | 4030 | */ |
@@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4034 | * We don't need to check the 'failed' flag as when that gets set, | 4034 | * We don't need to check the 'failed' flag as when that gets set, |
4035 | * recovery aborts. | 4035 | * recovery aborts. |
4036 | */ | 4036 | */ |
4037 | for (i=0; i<mddev->raid_disks; i++) | 4037 | for (i = 0; i < conf->raid_disks; i++) |
4038 | if (conf->disks[i].rdev == NULL) | 4038 | if (conf->disks[i].rdev == NULL) |
4039 | still_degraded = 1; | 4039 | still_degraded = 1; |
4040 | 4040 | ||
@@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
4086 | /* already done this stripe */ | 4086 | /* already done this stripe */ |
4087 | continue; | 4087 | continue; |
4088 | 4088 | ||
4089 | sh = get_active_stripe(conf, sector, 0, 1); | 4089 | sh = get_active_stripe(conf, sector, 0, 1, 0); |
4090 | 4090 | ||
4091 | if (!sh) { | 4091 | if (!sh) { |
4092 | /* failed to get a stripe - must wait */ | 4092 | /* failed to get a stripe - must wait */ |