aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-06-27 18:32:06 -0400
committerNeil Brown <neilb@notabene.brown>2008-06-27 18:32:06 -0400
commitd8ee0728b5b30d7a6f62c399a95e953616d31f23 (patch)
tree7f9fcf2b0f6222b2a2b8ee44d69af1fd6990064d /drivers/md/raid5.c
parent600aa10993012ff2dd5617720dac081e4f992017 (diff)
md: replace R5_WantPrexor with R5_WantDrain, add 'prexor' reconstruct_states
From: Dan Williams <dan.j.williams@intel.com> Currently ops_run_biodrain and other locations have extra logic to determine which blocks are processed in the prexor and non-prexor cases. This can be eliminated if handle_write_operations5 flags the blocks to be processed in all cases via R5_Wantdrain. The presence of the prexor operation is tracked in sh->reconstruct_state. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c89
1 files changed, 29 insertions, 60 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b9159367491a..c71246061c0e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -637,7 +637,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
637 for (i = disks; i--; ) { 637 for (i = disks; i--; ) {
638 struct r5dev *dev = &sh->dev[i]; 638 struct r5dev *dev = &sh->dev[i];
639 /* Only process blocks that are known to be uptodate */ 639 /* Only process blocks that are known to be uptodate */
640 if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags)) 640 if (test_bit(R5_Wantdrain, &dev->flags))
641 xor_srcs[count++] = dev->page; 641 xor_srcs[count++] = dev->page;
642 } 642 }
643 643
@@ -649,16 +649,10 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
649} 649}
650 650
651static struct dma_async_tx_descriptor * 651static struct dma_async_tx_descriptor *
652ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 652ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
653 unsigned long ops_request)
654{ 653{
655 int disks = sh->disks; 654 int disks = sh->disks;
656 int pd_idx = sh->pd_idx, i; 655 int i;
657
658 /* check if prexor is active which means only process blocks
659 * that are part of a read-modify-write (Wantprexor)
660 */
661 int prexor = test_bit(STRIPE_OP_PREXOR, &ops_request);
662 656
663 pr_debug("%s: stripe %llu\n", __func__, 657 pr_debug("%s: stripe %llu\n", __func__,
664 (unsigned long long)sh->sector); 658 (unsigned long long)sh->sector);
@@ -666,20 +660,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
666 for (i = disks; i--; ) { 660 for (i = disks; i--; ) {
667 struct r5dev *dev = &sh->dev[i]; 661 struct r5dev *dev = &sh->dev[i];
668 struct bio *chosen; 662 struct bio *chosen;
669 int towrite;
670 663
671 towrite = 0; 664 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
672 if (prexor) { /* rmw */
673 if (dev->towrite &&
674 test_bit(R5_Wantprexor, &dev->flags))
675 towrite = 1;
676 } else { /* rcw */
677 if (i != pd_idx && dev->towrite &&
678 test_bit(R5_LOCKED, &dev->flags))
679 towrite = 1;
680 }
681
682 if (towrite) {
683 struct bio *wbi; 665 struct bio *wbi;
684 666
685 spin_lock(&sh->lock); 667 spin_lock(&sh->lock);
@@ -704,18 +686,6 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
704static void ops_complete_postxor(void *stripe_head_ref) 686static void ops_complete_postxor(void *stripe_head_ref)
705{ 687{
706 struct stripe_head *sh = stripe_head_ref; 688 struct stripe_head *sh = stripe_head_ref;
707
708 pr_debug("%s: stripe %llu\n", __func__,
709 (unsigned long long)sh->sector);
710
711 sh->reconstruct_state = reconstruct_state_result;
712 set_bit(STRIPE_HANDLE, &sh->state);
713 release_stripe(sh);
714}
715
716static void ops_complete_write(void *stripe_head_ref)
717{
718 struct stripe_head *sh = stripe_head_ref;
719 int disks = sh->disks, i, pd_idx = sh->pd_idx; 689 int disks = sh->disks, i, pd_idx = sh->pd_idx;
720 690
721 pr_debug("%s: stripe %llu\n", __func__, 691 pr_debug("%s: stripe %llu\n", __func__,
@@ -727,14 +697,21 @@ static void ops_complete_write(void *stripe_head_ref)
727 set_bit(R5_UPTODATE, &dev->flags); 697 set_bit(R5_UPTODATE, &dev->flags);
728 } 698 }
729 699
730 sh->reconstruct_state = reconstruct_state_drain_result; 700 if (sh->reconstruct_state == reconstruct_state_drain_run)
701 sh->reconstruct_state = reconstruct_state_drain_result;
702 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
703 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
704 else {
705 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
706 sh->reconstruct_state = reconstruct_state_result;
707 }
708
731 set_bit(STRIPE_HANDLE, &sh->state); 709 set_bit(STRIPE_HANDLE, &sh->state);
732 release_stripe(sh); 710 release_stripe(sh);
733} 711}
734 712
735static void 713static void
736ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 714ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
737 unsigned long ops_request)
738{ 715{
739 /* kernel stack size limits the total number of disks */ 716 /* kernel stack size limits the total number of disks */
740 int disks = sh->disks; 717 int disks = sh->disks;
@@ -742,9 +719,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
742 719
743 int count = 0, pd_idx = sh->pd_idx, i; 720 int count = 0, pd_idx = sh->pd_idx, i;
744 struct page *xor_dest; 721 struct page *xor_dest;
745 int prexor = test_bit(STRIPE_OP_PREXOR, &ops_request); 722 int prexor = 0;
746 unsigned long flags; 723 unsigned long flags;
747 dma_async_tx_callback callback;
748 724
749 pr_debug("%s: stripe %llu\n", __func__, 725 pr_debug("%s: stripe %llu\n", __func__,
750 (unsigned long long)sh->sector); 726 (unsigned long long)sh->sector);
@@ -752,7 +728,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
752 /* check if prexor is active which means only process blocks 728 /* check if prexor is active which means only process blocks
753 * that are part of a read-modify-write (written) 729 * that are part of a read-modify-write (written)
754 */ 730 */
755 if (prexor) { 731 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
732 prexor = 1;
756 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 733 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
757 for (i = disks; i--; ) { 734 for (i = disks; i--; ) {
758 struct r5dev *dev = &sh->dev[i]; 735 struct r5dev *dev = &sh->dev[i];
@@ -768,10 +745,6 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
768 } 745 }
769 } 746 }
770 747
771 /* check whether this postxor is part of a write */
772 callback = test_bit(STRIPE_OP_BIODRAIN, &ops_request) ?
773 ops_complete_write : ops_complete_postxor;
774
775 /* 1/ if we prexor'd then the dest is reused as a source 748 /* 1/ if we prexor'd then the dest is reused as a source
776 * 2/ if we did not prexor then we are redoing the parity 749 * 2/ if we did not prexor then we are redoing the parity
777 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 750 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
@@ -785,10 +758,10 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
785 if (unlikely(count == 1)) { 758 if (unlikely(count == 1)) {
786 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 759 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
787 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 760 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
788 flags, tx, callback, sh); 761 flags, tx, ops_complete_postxor, sh);
789 } else 762 } else
790 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 763 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
791 flags, tx, callback, sh); 764 flags, tx, ops_complete_postxor, sh);
792} 765}
793 766
794static void ops_complete_check(void *stripe_head_ref) 767static void ops_complete_check(void *stripe_head_ref)
@@ -847,12 +820,12 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
847 tx = ops_run_prexor(sh, tx); 820 tx = ops_run_prexor(sh, tx);
848 821
849 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 822 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
850 tx = ops_run_biodrain(sh, tx, ops_request); 823 tx = ops_run_biodrain(sh, tx);
851 overlap_clear++; 824 overlap_clear++;
852 } 825 }
853 826
854 if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 827 if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
855 ops_run_postxor(sh, tx, ops_request); 828 ops_run_postxor(sh, tx);
856 829
857 if (test_bit(STRIPE_OP_CHECK, &ops_request)) 830 if (test_bit(STRIPE_OP_CHECK, &ops_request))
858 ops_run_check(sh); 831 ops_run_check(sh);
@@ -1669,6 +1642,7 @@ handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
1669 1642
1670 if (dev->towrite) { 1643 if (dev->towrite) {
1671 set_bit(R5_LOCKED, &dev->flags); 1644 set_bit(R5_LOCKED, &dev->flags);
1645 set_bit(R5_Wantdrain, &dev->flags);
1672 if (!expand) 1646 if (!expand)
1673 clear_bit(R5_UPTODATE, &dev->flags); 1647 clear_bit(R5_UPTODATE, &dev->flags);
1674 s->locked++; 1648 s->locked++;
@@ -1681,7 +1655,7 @@ handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
1681 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1655 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1682 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1656 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1683 1657
1684 sh->reconstruct_state = reconstruct_state_drain_run; 1658 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
1685 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 1659 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1686 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1660 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1687 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1661 set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
@@ -1691,15 +1665,10 @@ handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
1691 if (i == pd_idx) 1665 if (i == pd_idx)
1692 continue; 1666 continue;
1693 1667
1694 /* For a read-modify write there may be blocks that are
1695 * locked for reading while others are ready to be
1696 * written so we distinguish these blocks by the
1697 * R5_Wantprexor bit
1698 */
1699 if (dev->towrite && 1668 if (dev->towrite &&
1700 (test_bit(R5_UPTODATE, &dev->flags) || 1669 (test_bit(R5_UPTODATE, &dev->flags) ||
1701 test_bit(R5_Wantcompute, &dev->flags))) { 1670 test_bit(R5_Wantcompute, &dev->flags))) {
1702 set_bit(R5_Wantprexor, &dev->flags); 1671 set_bit(R5_Wantdrain, &dev->flags);
1703 set_bit(R5_LOCKED, &dev->flags); 1672 set_bit(R5_LOCKED, &dev->flags);
1704 clear_bit(R5_UPTODATE, &dev->flags); 1673 clear_bit(R5_UPTODATE, &dev->flags);
1705 s->locked++; 1674 s->locked++;
@@ -2660,11 +2629,11 @@ static void handle_stripe5(struct stripe_head *sh)
2660 * completed 2629 * completed
2661 */ 2630 */
2662 prexor = 0; 2631 prexor = 0;
2663 if (sh->reconstruct_state == reconstruct_state_drain_result) { 2632 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
2633 prexor = 1;
2634 if (sh->reconstruct_state == reconstruct_state_drain_result ||
2635 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
2664 sh->reconstruct_state = reconstruct_state_idle; 2636 sh->reconstruct_state = reconstruct_state_idle;
2665 for (i = disks; i--; )
2666 prexor += test_and_clear_bit(R5_Wantprexor,
2667 &sh->dev[i].flags);
2668 2637
2669 /* All the 'written' buffers and the parity block are ready to 2638 /* All the 'written' buffers and the parity block are ready to
2670 * be written back to disk 2639 * be written back to disk