diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-06-27 18:31:58 -0400 |
---|---|---|
committer | Neil Brown <neilb@notabene.brown> | 2008-06-27 18:31:58 -0400 |
commit | 83de75cc92be599850e5ef3928e07cd840833499 (patch) | |
tree | 19995f66767debc27bf207ad1ec73280c31ca8fc /drivers/md/raid5.c | |
parent | ecc65c9b3f9b9d740a5deade3d85b39be56401b6 (diff) |
md: replace STRIPE_OP_BIOFILL with STRIPE_BIOFILL_RUN
From: Dan Williams <dan.j.williams@intel.com>
Track the state of read operations (copying data from the stripe cache to bio
buffers outside the lock) with a state flag. Reduce the scope of the
STRIPE_OP_BIOFILL flag to only tracking whether a biofill operation has been
requested via the ops_request field of struct stripe_head_state.
This is another step towards the removal of ops.{pending,ack,complete,count},
i.e. STRIPE_OP_BIOFILL only requests an operation and does not track the state
of the operation.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 544e1600f208..b9c0a32a4f95 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -523,38 +523,34 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
523 | (unsigned long long)sh->sector); | 523 | (unsigned long long)sh->sector); |
524 | 524 | ||
525 | /* clear completed biofills */ | 525 | /* clear completed biofills */ |
526 | spin_lock_irq(&conf->device_lock); | ||
526 | for (i = sh->disks; i--; ) { | 527 | for (i = sh->disks; i--; ) { |
527 | struct r5dev *dev = &sh->dev[i]; | 528 | struct r5dev *dev = &sh->dev[i]; |
528 | 529 | ||
529 | /* acknowledge completion of a biofill operation */ | 530 | /* acknowledge completion of a biofill operation */ |
530 | /* and check if we need to reply to a read request, | 531 | /* and check if we need to reply to a read request, |
531 | * new R5_Wantfill requests are held off until | 532 | * new R5_Wantfill requests are held off until |
532 | * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) | 533 | * !STRIPE_BIOFILL_RUN |
533 | */ | 534 | */ |
534 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { | 535 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { |
535 | struct bio *rbi, *rbi2; | 536 | struct bio *rbi, *rbi2; |
536 | 537 | ||
537 | /* The access to dev->read is outside of the | ||
538 | * spin_lock_irq(&conf->device_lock), but is protected | ||
539 | * by the STRIPE_OP_BIOFILL pending bit | ||
540 | */ | ||
541 | BUG_ON(!dev->read); | 538 | BUG_ON(!dev->read); |
542 | rbi = dev->read; | 539 | rbi = dev->read; |
543 | dev->read = NULL; | 540 | dev->read = NULL; |
544 | while (rbi && rbi->bi_sector < | 541 | while (rbi && rbi->bi_sector < |
545 | dev->sector + STRIPE_SECTORS) { | 542 | dev->sector + STRIPE_SECTORS) { |
546 | rbi2 = r5_next_bio(rbi, dev->sector); | 543 | rbi2 = r5_next_bio(rbi, dev->sector); |
547 | spin_lock_irq(&conf->device_lock); | ||
548 | if (--rbi->bi_phys_segments == 0) { | 544 | if (--rbi->bi_phys_segments == 0) { |
549 | rbi->bi_next = return_bi; | 545 | rbi->bi_next = return_bi; |
550 | return_bi = rbi; | 546 | return_bi = rbi; |
551 | } | 547 | } |
552 | spin_unlock_irq(&conf->device_lock); | ||
553 | rbi = rbi2; | 548 | rbi = rbi2; |
554 | } | 549 | } |
555 | } | 550 | } |
556 | } | 551 | } |
557 | set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); | 552 | spin_unlock_irq(&conf->device_lock); |
553 | clear_bit(STRIPE_BIOFILL_RUN, &sh->state); | ||
558 | 554 | ||
559 | return_io(return_bi); | 555 | return_io(return_bi); |
560 | 556 | ||
@@ -880,7 +876,7 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending, | |||
880 | int overlap_clear = 0, i, disks = sh->disks; | 876 | int overlap_clear = 0, i, disks = sh->disks; |
881 | struct dma_async_tx_descriptor *tx = NULL; | 877 | struct dma_async_tx_descriptor *tx = NULL; |
882 | 878 | ||
883 | if (test_bit(STRIPE_OP_BIOFILL, &pending)) { | 879 | if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { |
884 | ops_run_biofill(sh); | 880 | ops_run_biofill(sh); |
885 | overlap_clear++; | 881 | overlap_clear++; |
886 | } | 882 | } |
@@ -2630,15 +2626,8 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2630 | s.syncing = test_bit(STRIPE_SYNCING, &sh->state); | 2626 | s.syncing = test_bit(STRIPE_SYNCING, &sh->state); |
2631 | s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); | 2627 | s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
2632 | s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); | 2628 | s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); |
2633 | /* Now to look around and see what can be done */ | ||
2634 | |||
2635 | /* clean-up completed biofill operations */ | ||
2636 | if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) { | ||
2637 | clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending); | ||
2638 | clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); | ||
2639 | clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); | ||
2640 | } | ||
2641 | 2629 | ||
2630 | /* Now to look around and see what can be done */ | ||
2642 | rcu_read_lock(); | 2631 | rcu_read_lock(); |
2643 | for (i=disks; i--; ) { | 2632 | for (i=disks; i--; ) { |
2644 | mdk_rdev_t *rdev; | 2633 | mdk_rdev_t *rdev; |
@@ -2652,10 +2641,10 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2652 | /* maybe we can request a biofill operation | 2641 | /* maybe we can request a biofill operation |
2653 | * | 2642 | * |
2654 | * new wantfill requests are only permitted while | 2643 | * new wantfill requests are only permitted while |
2655 | * STRIPE_OP_BIOFILL is clear | 2644 | * ops_complete_biofill is guaranteed to be inactive |
2656 | */ | 2645 | */ |
2657 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && | 2646 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && |
2658 | !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) | 2647 | !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) |
2659 | set_bit(R5_Wantfill, &dev->flags); | 2648 | set_bit(R5_Wantfill, &dev->flags); |
2660 | 2649 | ||
2661 | /* now count some things */ | 2650 | /* now count some things */ |
@@ -2699,8 +2688,10 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2699 | goto unlock; | 2688 | goto unlock; |
2700 | } | 2689 | } |
2701 | 2690 | ||
2702 | if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) | 2691 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { |
2703 | sh->ops.count++; | 2692 | set_bit(STRIPE_OP_BIOFILL, &s.ops_request); |
2693 | set_bit(STRIPE_BIOFILL_RUN, &sh->state); | ||
2694 | } | ||
2704 | 2695 | ||
2705 | pr_debug("locked=%d uptodate=%d to_read=%d" | 2696 | pr_debug("locked=%d uptodate=%d to_read=%d" |
2706 | " to_write=%d failed=%d failed_num=%d\n", | 2697 | " to_write=%d failed=%d failed_num=%d\n", |