summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2017-03-14 23:05:13 -0400
committerShaohua Li <shli@fb.com>2017-03-22 22:16:56 -0400
commit0472a42ba1f89ec85f070c731f4440d7cc38c44c (patch)
treeabdcc017ea2f4fc66e80b2d5f99921894702b687 /drivers/md/raid5.c
parent016c76ac76e4c678b01a75a602dc6be0282f5b29 (diff)
md/raid5: remove over-loading of ->bi_phys_segments.
When a read request, which bypassed the cache, fails, we need to retry it through the cache. This involves attaching it to a sequence of stripe_heads, and it may not be possible to get all the stripe_heads we need at once. We do what we can, and record how far we got in ->bi_phys_segments so we can pick up again later. There is only ever one bio which may have a non-zero offset stored in ->bi_phys_segments, the one that is either active in the single thread which calls retry_aligned_read(), or is in conf->retry_read_aligned waiting for retry_aligned_read() to be called again. So we only need to store one offset value. This can be in a local variable passed between remove_bio_from_retry() and retry_aligned_read(), or in the r5conf structure next to the ->retry_read_aligned pointer. Storing it there allows the last usage of ->bi_phys_segments to be removed from md/raid5.c. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0ec9e0212158..1c8be667e9a9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5082,12 +5082,14 @@ static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
5082 md_wakeup_thread(conf->mddev->thread); 5082 md_wakeup_thread(conf->mddev->thread);
5083} 5083}
5084 5084
5085static struct bio *remove_bio_from_retry(struct r5conf *conf) 5085static struct bio *remove_bio_from_retry(struct r5conf *conf,
5086 unsigned int *offset)
5086{ 5087{
5087 struct bio *bi; 5088 struct bio *bi;
5088 5089
5089 bi = conf->retry_read_aligned; 5090 bi = conf->retry_read_aligned;
5090 if (bi) { 5091 if (bi) {
5092 *offset = conf->retry_read_offset;
5091 conf->retry_read_aligned = NULL; 5093 conf->retry_read_aligned = NULL;
5092 return bi; 5094 return bi;
5093 } 5095 }
@@ -5095,11 +5097,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
5095 if(bi) { 5097 if(bi) {
5096 conf->retry_read_aligned_list = bi->bi_next; 5098 conf->retry_read_aligned_list = bi->bi_next;
5097 bi->bi_next = NULL; 5099 bi->bi_next = NULL;
5098 /* 5100 *offset = 0;
5099 * this sets the active strip count to 1 and the processed
5100 * strip count to zero (upper 8 bits)
5101 */
5102 raid5_set_bi_processed_stripes(bi, 0);
5103 } 5101 }
5104 5102
5105 return bi; 5103 return bi;
@@ -6055,7 +6053,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
6055 return STRIPE_SECTORS; 6053 return STRIPE_SECTORS;
6056} 6054}
6057 6055
6058static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 6056static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
6057 unsigned int offset)
6059{ 6058{
6060 /* We may not be able to submit a whole bio at once as there 6059 /* We may not be able to submit a whole bio at once as there
6061 * may not be enough stripe_heads available. 6060 * may not be enough stripe_heads available.
@@ -6084,7 +6083,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
6084 sector += STRIPE_SECTORS, 6083 sector += STRIPE_SECTORS,
6085 scnt++) { 6084 scnt++) {
6086 6085
6087 if (scnt < raid5_bi_processed_stripes(raid_bio)) 6086 if (scnt < offset)
6088 /* already done this stripe */ 6087 /* already done this stripe */
6089 continue; 6088 continue;
6090 6089
@@ -6092,15 +6091,15 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
6092 6091
6093 if (!sh) { 6092 if (!sh) {
6094 /* failed to get a stripe - must wait */ 6093 /* failed to get a stripe - must wait */
6095 raid5_set_bi_processed_stripes(raid_bio, scnt);
6096 conf->retry_read_aligned = raid_bio; 6094 conf->retry_read_aligned = raid_bio;
6095 conf->retry_read_offset = scnt;
6097 return handled; 6096 return handled;
6098 } 6097 }
6099 6098
6100 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 6099 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
6101 raid5_release_stripe(sh); 6100 raid5_release_stripe(sh);
6102 raid5_set_bi_processed_stripes(raid_bio, scnt);
6103 conf->retry_read_aligned = raid_bio; 6101 conf->retry_read_aligned = raid_bio;
6102 conf->retry_read_offset = scnt;
6104 return handled; 6103 return handled;
6105 } 6104 }
6106 6105
@@ -6228,6 +6227,7 @@ static void raid5d(struct md_thread *thread)
6228 while (1) { 6227 while (1) {
6229 struct bio *bio; 6228 struct bio *bio;
6230 int batch_size, released; 6229 int batch_size, released;
6230 unsigned int offset;
6231 6231
6232 released = release_stripe_list(conf, conf->temp_inactive_list); 6232 released = release_stripe_list(conf, conf->temp_inactive_list);
6233 if (released) 6233 if (released)
@@ -6245,10 +6245,10 @@ static void raid5d(struct md_thread *thread)
6245 } 6245 }
6246 raid5_activate_delayed(conf); 6246 raid5_activate_delayed(conf);
6247 6247
6248 while ((bio = remove_bio_from_retry(conf))) { 6248 while ((bio = remove_bio_from_retry(conf, &offset))) {
6249 int ok; 6249 int ok;
6250 spin_unlock_irq(&conf->device_lock); 6250 spin_unlock_irq(&conf->device_lock);
6251 ok = retry_aligned_read(conf, bio); 6251 ok = retry_aligned_read(conf, bio, offset);
6252 spin_lock_irq(&conf->device_lock); 6252 spin_lock_irq(&conf->device_lock);
6253 if (!ok) 6253 if (!ok)
6254 break; 6254 break;