aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2015-08-13 17:31:57 -0400
committerNeilBrown <neilb@suse.com>2015-10-24 02:16:18 -0400
commit6d036f7d52e5a9c3b2ff77883db4c34620681804 (patch)
treeddfd13c18855035a9e97423142da8df6f70ddc66
parent3069aa8def32b0c2b83cd27d1c37ed30b47ce879 (diff)
raid5: export some functions
Next several patches use some raid5 functions, rename them with raid5 prefix and export out. Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: NeilBrown <neilb@suse.com>
-rw-r--r--drivers/md/raid5.c100
-rw-r--r--drivers/md/raid5.h8
2 files changed, 57 insertions, 51 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5b79770c4f08..b200c195160c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -353,7 +353,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
353 struct list_head *list = &temp_inactive_list[size - 1]; 353 struct list_head *list = &temp_inactive_list[size - 1];
354 354
355 /* 355 /*
356 * We don't hold any lock here yet, get_active_stripe() might 356 * We don't hold any lock here yet, raid5_get_active_stripe() might
357 * remove stripes from the list 357 * remove stripes from the list
358 */ 358 */
359 if (!list_empty_careful(list)) { 359 if (!list_empty_careful(list)) {
@@ -413,7 +413,7 @@ static int release_stripe_list(struct r5conf *conf,
413 return count; 413 return count;
414} 414}
415 415
416static void release_stripe(struct stripe_head *sh) 416void raid5_release_stripe(struct stripe_head *sh)
417{ 417{
418 struct r5conf *conf = sh->raid_conf; 418 struct r5conf *conf = sh->raid_conf;
419 unsigned long flags; 419 unsigned long flags;
@@ -658,9 +658,9 @@ static int has_failed(struct r5conf *conf)
658 return 0; 658 return 0;
659} 659}
660 660
661static struct stripe_head * 661struct stripe_head *
662get_active_stripe(struct r5conf *conf, sector_t sector, 662raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
663 int previous, int noblock, int noquiesce) 663 int previous, int noblock, int noquiesce)
664{ 664{
665 struct stripe_head *sh; 665 struct stripe_head *sh;
666 int hash = stripe_hash_locks_hash(sector); 666 int hash = stripe_hash_locks_hash(sector);
@@ -858,7 +858,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
858unlock_out: 858unlock_out:
859 unlock_two_stripes(head, sh); 859 unlock_two_stripes(head, sh);
860out: 860out:
861 release_stripe(head); 861 raid5_release_stripe(head);
862} 862}
863 863
864/* Determine if 'data_offset' or 'new_data_offset' should be used 864/* Determine if 'data_offset' or 'new_data_offset' should be used
@@ -1208,7 +1208,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1208 return_io(&return_bi); 1208 return_io(&return_bi);
1209 1209
1210 set_bit(STRIPE_HANDLE, &sh->state); 1210 set_bit(STRIPE_HANDLE, &sh->state);
1211 release_stripe(sh); 1211 raid5_release_stripe(sh);
1212} 1212}
1213 1213
1214static void ops_run_biofill(struct stripe_head *sh) 1214static void ops_run_biofill(struct stripe_head *sh)
@@ -1271,7 +1271,7 @@ static void ops_complete_compute(void *stripe_head_ref)
1271 if (sh->check_state == check_state_compute_run) 1271 if (sh->check_state == check_state_compute_run)
1272 sh->check_state = check_state_compute_result; 1272 sh->check_state = check_state_compute_result;
1273 set_bit(STRIPE_HANDLE, &sh->state); 1273 set_bit(STRIPE_HANDLE, &sh->state);
1274 release_stripe(sh); 1274 raid5_release_stripe(sh);
1275} 1275}
1276 1276
1277/* return a pointer to the address conversion region of the scribble buffer */ 1277/* return a pointer to the address conversion region of the scribble buffer */
@@ -1697,7 +1697,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
1697 } 1697 }
1698 1698
1699 set_bit(STRIPE_HANDLE, &sh->state); 1699 set_bit(STRIPE_HANDLE, &sh->state);
1700 release_stripe(sh); 1700 raid5_release_stripe(sh);
1701} 1701}
1702 1702
1703static void 1703static void
@@ -1855,7 +1855,7 @@ static void ops_complete_check(void *stripe_head_ref)
1855 1855
1856 sh->check_state = check_state_check_result; 1856 sh->check_state = check_state_check_result;
1857 set_bit(STRIPE_HANDLE, &sh->state); 1857 set_bit(STRIPE_HANDLE, &sh->state);
1858 release_stripe(sh); 1858 raid5_release_stripe(sh);
1859} 1859}
1860 1860
1861static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1861static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
@@ -2017,7 +2017,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2017 /* we just created an active stripe so... */ 2017 /* we just created an active stripe so... */
2018 atomic_inc(&conf->active_stripes); 2018 atomic_inc(&conf->active_stripes);
2019 2019
2020 release_stripe(sh); 2020 raid5_release_stripe(sh);
2021 conf->max_nr_stripes++; 2021 conf->max_nr_stripes++;
2022 return 1; 2022 return 1;
2023} 2023}
@@ -2236,7 +2236,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2236 if (!p) 2236 if (!p)
2237 err = -ENOMEM; 2237 err = -ENOMEM;
2238 } 2238 }
2239 release_stripe(nsh); 2239 raid5_release_stripe(nsh);
2240 } 2240 }
2241 /* critical section pass, GFP_NOIO no longer needed */ 2241 /* critical section pass, GFP_NOIO no longer needed */
2242 2242
@@ -2394,7 +2394,7 @@ static void raid5_end_read_request(struct bio * bi)
2394 rdev_dec_pending(rdev, conf->mddev); 2394 rdev_dec_pending(rdev, conf->mddev);
2395 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2395 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2396 set_bit(STRIPE_HANDLE, &sh->state); 2396 set_bit(STRIPE_HANDLE, &sh->state);
2397 release_stripe(sh); 2397 raid5_release_stripe(sh);
2398} 2398}
2399 2399
2400static void raid5_end_write_request(struct bio *bi) 2400static void raid5_end_write_request(struct bio *bi)
@@ -2468,14 +2468,12 @@ static void raid5_end_write_request(struct bio *bi)
2468 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2468 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2469 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2469 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2470 set_bit(STRIPE_HANDLE, &sh->state); 2470 set_bit(STRIPE_HANDLE, &sh->state);
2471 release_stripe(sh); 2471 raid5_release_stripe(sh);
2472 2472
2473 if (sh->batch_head && sh != sh->batch_head) 2473 if (sh->batch_head && sh != sh->batch_head)
2474 release_stripe(sh->batch_head); 2474 raid5_release_stripe(sh->batch_head);
2475} 2475}
2476 2476
2477static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
2478
2479static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2477static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2480{ 2478{
2481 struct r5dev *dev = &sh->dev[i]; 2479 struct r5dev *dev = &sh->dev[i];
@@ -2491,7 +2489,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2491 dev->rreq.bi_private = sh; 2489 dev->rreq.bi_private = sh;
2492 2490
2493 dev->flags = 0; 2491 dev->flags = 0;
2494 dev->sector = compute_blocknr(sh, i, previous); 2492 dev->sector = raid5_compute_blocknr(sh, i, previous);
2495} 2493}
2496 2494
2497static void error(struct mddev *mddev, struct md_rdev *rdev) 2495static void error(struct mddev *mddev, struct md_rdev *rdev)
@@ -2524,9 +2522,9 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
2524 * Input: a 'big' sector number, 2522 * Input: a 'big' sector number,
2525 * Output: index of the data and parity disk, and the sector # in them. 2523 * Output: index of the data and parity disk, and the sector # in them.
2526 */ 2524 */
2527static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2525sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2528 int previous, int *dd_idx, 2526 int previous, int *dd_idx,
2529 struct stripe_head *sh) 2527 struct stripe_head *sh)
2530{ 2528{
2531 sector_t stripe, stripe2; 2529 sector_t stripe, stripe2;
2532 sector_t chunk_number; 2530 sector_t chunk_number;
@@ -2726,7 +2724,7 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2726 return new_sector; 2724 return new_sector;
2727} 2725}
2728 2726
2729static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 2727sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
2730{ 2728{
2731 struct r5conf *conf = sh->raid_conf; 2729 struct r5conf *conf = sh->raid_conf;
2732 int raid_disks = sh->disks; 2730 int raid_disks = sh->disks;
@@ -3937,10 +3935,10 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3937 struct stripe_head *sh2; 3935 struct stripe_head *sh2;
3938 struct async_submit_ctl submit; 3936 struct async_submit_ctl submit;
3939 3937
3940 sector_t bn = compute_blocknr(sh, i, 1); 3938 sector_t bn = raid5_compute_blocknr(sh, i, 1);
3941 sector_t s = raid5_compute_sector(conf, bn, 0, 3939 sector_t s = raid5_compute_sector(conf, bn, 0,
3942 &dd_idx, NULL); 3940 &dd_idx, NULL);
3943 sh2 = get_active_stripe(conf, s, 0, 1, 1); 3941 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
3944 if (sh2 == NULL) 3942 if (sh2 == NULL)
3945 /* so far only the early blocks of this stripe 3943 /* so far only the early blocks of this stripe
3946 * have been requested. When later blocks 3944 * have been requested. When later blocks
@@ -3950,7 +3948,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3950 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 3948 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3951 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 3949 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3952 /* must have already done this block */ 3950 /* must have already done this block */
3953 release_stripe(sh2); 3951 raid5_release_stripe(sh2);
3954 continue; 3952 continue;
3955 } 3953 }
3956 3954
@@ -3971,7 +3969,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3971 set_bit(STRIPE_EXPAND_READY, &sh2->state); 3969 set_bit(STRIPE_EXPAND_READY, &sh2->state);
3972 set_bit(STRIPE_HANDLE, &sh2->state); 3970 set_bit(STRIPE_HANDLE, &sh2->state);
3973 } 3971 }
3974 release_stripe(sh2); 3972 raid5_release_stripe(sh2);
3975 3973
3976 } 3974 }
3977 /* done submitting copies, wait for them to complete */ 3975 /* done submitting copies, wait for them to complete */
@@ -4257,7 +4255,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
4257 if (handle_flags == 0 || 4255 if (handle_flags == 0 ||
4258 sh->state & handle_flags) 4256 sh->state & handle_flags)
4259 set_bit(STRIPE_HANDLE, &sh->state); 4257 set_bit(STRIPE_HANDLE, &sh->state);
4260 release_stripe(sh); 4258 raid5_release_stripe(sh);
4261 } 4259 }
4262 spin_lock_irq(&head_sh->stripe_lock); 4260 spin_lock_irq(&head_sh->stripe_lock);
4263 head_sh->batch_head = NULL; 4261 head_sh->batch_head = NULL;
@@ -4504,7 +4502,7 @@ static void handle_stripe(struct stripe_head *sh)
4504 /* Finish reconstruct operations initiated by the expansion process */ 4502 /* Finish reconstruct operations initiated by the expansion process */
4505 if (sh->reconstruct_state == reconstruct_state_result) { 4503 if (sh->reconstruct_state == reconstruct_state_result) {
4506 struct stripe_head *sh_src 4504 struct stripe_head *sh_src
4507 = get_active_stripe(conf, sh->sector, 1, 1, 1); 4505 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
4508 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4506 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
4509 /* sh cannot be written until sh_src has been read. 4507 /* sh cannot be written until sh_src has been read.
4510 * so arrange for sh to be delayed a little 4508 * so arrange for sh to be delayed a little
@@ -4514,11 +4512,11 @@ static void handle_stripe(struct stripe_head *sh)
4514 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4512 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
4515 &sh_src->state)) 4513 &sh_src->state))
4516 atomic_inc(&conf->preread_active_stripes); 4514 atomic_inc(&conf->preread_active_stripes);
4517 release_stripe(sh_src); 4515 raid5_release_stripe(sh_src);
4518 goto finish; 4516 goto finish;
4519 } 4517 }
4520 if (sh_src) 4518 if (sh_src)
4521 release_stripe(sh_src); 4519 raid5_release_stripe(sh_src);
4522 4520
4523 sh->reconstruct_state = reconstruct_state_idle; 4521 sh->reconstruct_state = reconstruct_state_idle;
4524 clear_bit(STRIPE_EXPANDING, &sh->state); 4522 clear_bit(STRIPE_EXPANDING, &sh->state);
@@ -5010,7 +5008,7 @@ static void release_stripe_plug(struct mddev *mddev,
5010 struct raid5_plug_cb *cb; 5008 struct raid5_plug_cb *cb;
5011 5009
5012 if (!blk_cb) { 5010 if (!blk_cb) {
5013 release_stripe(sh); 5011 raid5_release_stripe(sh);
5014 return; 5012 return;
5015 } 5013 }
5016 5014
@@ -5026,7 +5024,7 @@ static void release_stripe_plug(struct mddev *mddev,
5026 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5024 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
5027 list_add_tail(&sh->lru, &cb->list); 5025 list_add_tail(&sh->lru, &cb->list);
5028 else 5026 else
5029 release_stripe(sh); 5027 raid5_release_stripe(sh);
5030} 5028}
5031 5029
5032static void make_discard_request(struct mddev *mddev, struct bio *bi) 5030static void make_discard_request(struct mddev *mddev, struct bio *bi)
@@ -5061,12 +5059,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5061 DEFINE_WAIT(w); 5059 DEFINE_WAIT(w);
5062 int d; 5060 int d;
5063 again: 5061 again:
5064 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 5062 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
5065 prepare_to_wait(&conf->wait_for_overlap, &w, 5063 prepare_to_wait(&conf->wait_for_overlap, &w,
5066 TASK_UNINTERRUPTIBLE); 5064 TASK_UNINTERRUPTIBLE);
5067 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5065 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5068 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5066 if (test_bit(STRIPE_SYNCING, &sh->state)) {
5069 release_stripe(sh); 5067 raid5_release_stripe(sh);
5070 schedule(); 5068 schedule();
5071 goto again; 5069 goto again;
5072 } 5070 }
@@ -5078,7 +5076,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5078 if (sh->dev[d].towrite || sh->dev[d].toread) { 5076 if (sh->dev[d].towrite || sh->dev[d].toread) {
5079 set_bit(R5_Overlap, &sh->dev[d].flags); 5077 set_bit(R5_Overlap, &sh->dev[d].flags);
5080 spin_unlock_irq(&sh->stripe_lock); 5078 spin_unlock_irq(&sh->stripe_lock);
5081 release_stripe(sh); 5079 raid5_release_stripe(sh);
5082 schedule(); 5080 schedule();
5083 goto again; 5081 goto again;
5084 } 5082 }
@@ -5208,7 +5206,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5208 (unsigned long long)new_sector, 5206 (unsigned long long)new_sector,
5209 (unsigned long long)logical_sector); 5207 (unsigned long long)logical_sector);
5210 5208
5211 sh = get_active_stripe(conf, new_sector, previous, 5209 sh = raid5_get_active_stripe(conf, new_sector, previous,
5212 (bi->bi_rw&RWA_MASK), 0); 5210 (bi->bi_rw&RWA_MASK), 0);
5213 if (sh) { 5211 if (sh) {
5214 if (unlikely(previous)) { 5212 if (unlikely(previous)) {
@@ -5229,7 +5227,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5229 must_retry = 1; 5227 must_retry = 1;
5230 spin_unlock_irq(&conf->device_lock); 5228 spin_unlock_irq(&conf->device_lock);
5231 if (must_retry) { 5229 if (must_retry) {
5232 release_stripe(sh); 5230 raid5_release_stripe(sh);
5233 schedule(); 5231 schedule();
5234 do_prepare = true; 5232 do_prepare = true;
5235 goto retry; 5233 goto retry;
@@ -5239,14 +5237,14 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5239 /* Might have got the wrong stripe_head 5237 /* Might have got the wrong stripe_head
5240 * by accident 5238 * by accident
5241 */ 5239 */
5242 release_stripe(sh); 5240 raid5_release_stripe(sh);
5243 goto retry; 5241 goto retry;
5244 } 5242 }
5245 5243
5246 if (rw == WRITE && 5244 if (rw == WRITE &&
5247 logical_sector >= mddev->suspend_lo && 5245 logical_sector >= mddev->suspend_lo &&
5248 logical_sector < mddev->suspend_hi) { 5246 logical_sector < mddev->suspend_hi) {
5249 release_stripe(sh); 5247 raid5_release_stripe(sh);
5250 /* As the suspend_* range is controlled by 5248 /* As the suspend_* range is controlled by
5251 * userspace, we want an interruptible 5249 * userspace, we want an interruptible
5252 * wait. 5250 * wait.
@@ -5269,7 +5267,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
5269 * and wait a while 5267 * and wait a while
5270 */ 5268 */
5271 md_wakeup_thread(mddev->thread); 5269 md_wakeup_thread(mddev->thread);
5272 release_stripe(sh); 5270 raid5_release_stripe(sh);
5273 schedule(); 5271 schedule();
5274 do_prepare = true; 5272 do_prepare = true;
5275 goto retry; 5273 goto retry;
@@ -5456,7 +5454,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
5456 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5454 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
5457 int j; 5455 int j;
5458 int skipped_disk = 0; 5456 int skipped_disk = 0;
5459 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5457 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
5460 set_bit(STRIPE_EXPANDING, &sh->state); 5458 set_bit(STRIPE_EXPANDING, &sh->state);
5461 atomic_inc(&conf->reshape_stripes); 5459 atomic_inc(&conf->reshape_stripes);
5462 /* If any of this stripe is beyond the end of the old 5460 /* If any of this stripe is beyond the end of the old
@@ -5469,7 +5467,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
5469 if (conf->level == 6 && 5467 if (conf->level == 6 &&
5470 j == sh->qd_idx) 5468 j == sh->qd_idx)
5471 continue; 5469 continue;
5472 s = compute_blocknr(sh, j, 0); 5470 s = raid5_compute_blocknr(sh, j, 0);
5473 if (s < raid5_size(mddev, 0, 0)) { 5471 if (s < raid5_size(mddev, 0, 0)) {
5474 skipped_disk = 1; 5472 skipped_disk = 1;
5475 continue; 5473 continue;
@@ -5505,10 +5503,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
5505 if (last_sector >= mddev->dev_sectors) 5503 if (last_sector >= mddev->dev_sectors)
5506 last_sector = mddev->dev_sectors - 1; 5504 last_sector = mddev->dev_sectors - 1;
5507 while (first_sector <= last_sector) { 5505 while (first_sector <= last_sector) {
5508 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 5506 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
5509 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5507 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
5510 set_bit(STRIPE_HANDLE, &sh->state); 5508 set_bit(STRIPE_HANDLE, &sh->state);
5511 release_stripe(sh); 5509 raid5_release_stripe(sh);
5512 first_sector += STRIPE_SECTORS; 5510 first_sector += STRIPE_SECTORS;
5513 } 5511 }
5514 /* Now that the sources are clearly marked, we can release 5512 /* Now that the sources are clearly marked, we can release
@@ -5517,7 +5515,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
5517 while (!list_empty(&stripes)) { 5515 while (!list_empty(&stripes)) {
5518 sh = list_entry(stripes.next, struct stripe_head, lru); 5516 sh = list_entry(stripes.next, struct stripe_head, lru);
5519 list_del_init(&sh->lru); 5517 list_del_init(&sh->lru);
5520 release_stripe(sh); 5518 raid5_release_stripe(sh);
5521 } 5519 }
5522 /* If this takes us to the resync_max point where we have to pause, 5520 /* If this takes us to the resync_max point where we have to pause,
5523 * then we need to write out the superblock. 5521 * then we need to write out the superblock.
@@ -5615,9 +5613,9 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
5615 5613
5616 bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 5614 bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
5617 5615
5618 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 5616 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
5619 if (sh == NULL) { 5617 if (sh == NULL) {
5620 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 5618 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
5621 /* make sure we don't swamp the stripe cache if someone else 5619 /* make sure we don't swamp the stripe cache if someone else
5622 * is trying to get access 5620 * is trying to get access
5623 */ 5621 */
@@ -5641,7 +5639,7 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
5641 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5639 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
5642 set_bit(STRIPE_HANDLE, &sh->state); 5640 set_bit(STRIPE_HANDLE, &sh->state);
5643 5641
5644 release_stripe(sh); 5642 raid5_release_stripe(sh);
5645 5643
5646 return STRIPE_SECTORS; 5644 return STRIPE_SECTORS;
5647} 5645}
@@ -5680,7 +5678,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5680 /* already done this stripe */ 5678 /* already done this stripe */
5681 continue; 5679 continue;
5682 5680
5683 sh = get_active_stripe(conf, sector, 0, 1, 1); 5681 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
5684 5682
5685 if (!sh) { 5683 if (!sh) {
5686 /* failed to get a stripe - must wait */ 5684 /* failed to get a stripe - must wait */
@@ -5690,7 +5688,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5690 } 5688 }
5691 5689
5692 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 5690 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
5693 release_stripe(sh); 5691 raid5_release_stripe(sh);
5694 raid5_set_bi_processed_stripes(raid_bio, scnt); 5692 raid5_set_bi_processed_stripes(raid_bio, scnt);
5695 conf->retry_read_aligned = raid_bio; 5693 conf->retry_read_aligned = raid_bio;
5696 return handled; 5694 return handled;
@@ -5698,7 +5696,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5698 5696
5699 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 5697 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
5700 handle_stripe(sh); 5698 handle_stripe(sh);
5701 release_stripe(sh); 5699 raid5_release_stripe(sh);
5702 handled++; 5700 handled++;
5703 } 5701 }
5704 remaining = raid5_dec_bi_active_stripes(raid_bio); 5702 remaining = raid5_dec_bi_active_stripes(raid_bio);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 828c2925e68f..7686fcb62157 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -609,4 +609,12 @@ static inline int algorithm_is_DDF(int layout)
609 609
610extern void md_raid5_kick_device(struct r5conf *conf); 610extern void md_raid5_kick_device(struct r5conf *conf);
611extern int raid5_set_cache_size(struct mddev *mddev, int size); 611extern int raid5_set_cache_size(struct mddev *mddev, int size);
612extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
613extern void raid5_release_stripe(struct stripe_head *sh);
614extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
615 int previous, int *dd_idx,
616 struct stripe_head *sh);
617extern struct stripe_head *
618raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
619 int previous, int noblock, int noquiesce);
612#endif 620#endif