diff options
author | Dan Williams <dan.j.williams@intel.com> | 2007-09-24 13:06:13 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2007-09-24 16:23:35 -0400 |
commit | e4d84909dd48b5e5806a5d18b881e1ca1610ba9b (patch) | |
tree | 85aa712ebc034a31b9a2a5db2f0e7ce4951cb440 /drivers/md/raid5.c | |
parent | 6247cdc2cd334dad0ea5428245a7d8f4b075f21e (diff) |
raid5: fix 2 bugs in ops_complete_biofill
1/ ops_complete_biofill tried to avoid calling handle_stripe since all the
state necessary to return read completions is available. However the
process of determining whether more read requests are pending requires
locking the stripe (to block add_stripe_bio from updating dev->toead).
ops_complete_biofill can run in tasklet context, so rather than upgrading
all the stripe locks from spin_lock to spin_lock_bh this patch just
unconditionally reschedules handle_stripe after completing the read
request.
2/ ops_complete_biofill needlessly qualified processing R5_Wantfill with
dev->toread. The result being that the 'biofill' pending bit is cleared
before handling the pending read-completions on dev->read. R5_Wantfill can
be unconditionally handled because the 'biofill' pending bit prevents new
R5_Wantfill requests from being seen by ops_run_biofill and
ops_complete_biofill.
Found-by: Yuri Tikhonov <yur@emcraft.com>
[neilb@suse.de: simpler fix for bug 1 than moving code]
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 17 |
1 files changed, 7 insertions, 10 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4d63773ee73a..f96dea975fa5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
514 | struct stripe_head *sh = stripe_head_ref; | 514 | struct stripe_head *sh = stripe_head_ref; |
515 | struct bio *return_bi = NULL; | 515 | struct bio *return_bi = NULL; |
516 | raid5_conf_t *conf = sh->raid_conf; | 516 | raid5_conf_t *conf = sh->raid_conf; |
517 | int i, more_to_read = 0; | 517 | int i; |
518 | 518 | ||
519 | pr_debug("%s: stripe %llu\n", __FUNCTION__, | 519 | pr_debug("%s: stripe %llu\n", __FUNCTION__, |
520 | (unsigned long long)sh->sector); | 520 | (unsigned long long)sh->sector); |
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
522 | /* clear completed biofills */ | 522 | /* clear completed biofills */ |
523 | for (i = sh->disks; i--; ) { | 523 | for (i = sh->disks; i--; ) { |
524 | struct r5dev *dev = &sh->dev[i]; | 524 | struct r5dev *dev = &sh->dev[i]; |
525 | /* check if this stripe has new incoming reads */ | ||
526 | if (dev->toread) | ||
527 | more_to_read++; | ||
528 | 525 | ||
529 | /* acknowledge completion of a biofill operation */ | 526 | /* acknowledge completion of a biofill operation */ |
530 | /* and check if we need to reply to a read request | 527 | /* and check if we need to reply to a read request, |
531 | */ | 528 | * new R5_Wantfill requests are held off until |
532 | if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { | 529 | * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) |
530 | */ | ||
531 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { | ||
533 | struct bio *rbi, *rbi2; | 532 | struct bio *rbi, *rbi2; |
534 | clear_bit(R5_Wantfill, &dev->flags); | ||
535 | 533 | ||
536 | /* The access to dev->read is outside of the | 534 | /* The access to dev->read is outside of the |
537 | * spin_lock_irq(&conf->device_lock), but is protected | 535 | * spin_lock_irq(&conf->device_lock), but is protected |
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
558 | 556 | ||
559 | return_io(return_bi); | 557 | return_io(return_bi); |
560 | 558 | ||
561 | if (more_to_read) | 559 | set_bit(STRIPE_HANDLE, &sh->state); |
562 | set_bit(STRIPE_HANDLE, &sh->state); | ||
563 | release_stripe(sh); | 560 | release_stripe(sh); |
564 | } | 561 | } |
565 | 562 | ||