aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/raid5.c322
-rw-r--r--drivers/md/raid5.h8
3 files changed, 299 insertions, 33 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 41b3ae25b813..abb8636bfde2 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -124,6 +124,8 @@ config MD_RAID456
124 select MD_RAID6_PQ 124 select MD_RAID6_PQ
125 select ASYNC_MEMCPY 125 select ASYNC_MEMCPY
126 select ASYNC_XOR 126 select ASYNC_XOR
127 select ASYNC_PQ
128 select ASYNC_RAID6_RECOV
127 ---help--- 129 ---help---
128 A RAID-5 set of N drives with a capacity of C MB per drive provides 130 A RAID-5 set of N drives with a capacity of C MB per drive provides
129 the capacity of C * (N - 1) MB, and protects against a failure 131 the capacity of C * (N - 1) MB, and protects against a failure
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e3a2990bdc7c..e68616ed3e78 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -636,15 +636,16 @@ static void mark_target_uptodate(struct stripe_head *sh, int target)
636 clear_bit(R5_Wantcompute, &tgt->flags); 636 clear_bit(R5_Wantcompute, &tgt->flags);
637} 637}
638 638
639static void ops_complete_compute5(void *stripe_head_ref) 639static void ops_complete_compute(void *stripe_head_ref)
640{ 640{
641 struct stripe_head *sh = stripe_head_ref; 641 struct stripe_head *sh = stripe_head_ref;
642 642
643 pr_debug("%s: stripe %llu\n", __func__, 643 pr_debug("%s: stripe %llu\n", __func__,
644 (unsigned long long)sh->sector); 644 (unsigned long long)sh->sector);
645 645
646 /* mark the computed target as uptodate */ 646 /* mark the computed target(s) as uptodate */
647 mark_target_uptodate(sh, sh->ops.target); 647 mark_target_uptodate(sh, sh->ops.target);
648 mark_target_uptodate(sh, sh->ops.target2);
648 649
649 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 650 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
650 if (sh->check_state == check_state_compute_run) 651 if (sh->check_state == check_state_compute_run)
@@ -684,7 +685,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
684 atomic_inc(&sh->count); 685 atomic_inc(&sh->count);
685 686
686 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, 687 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
687 ops_complete_compute5, sh, to_addr_conv(sh, percpu)); 688 ops_complete_compute, sh, to_addr_conv(sh, percpu));
688 if (unlikely(count == 1)) 689 if (unlikely(count == 1))
689 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 690 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
690 else 691 else
@@ -693,6 +694,197 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
693 return tx; 694 return tx;
694} 695}
695 696
697/* set_syndrome_sources - populate source buffers for gen_syndrome
698 * @srcs - (struct page *) array of size sh->disks
699 * @sh - stripe_head to parse
700 *
701 * Populates srcs in proper layout order for the stripe and returns the
702 * 'count' of sources to be used in a call to async_gen_syndrome. The P
703 * destination buffer is recorded in srcs[count] and the Q destination
704 * is recorded in srcs[count+1]].
705 */
706static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
707{
708 int disks = sh->disks;
709 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
710 int d0_idx = raid6_d0(sh);
711 int count;
712 int i;
713
714 for (i = 0; i < disks; i++)
715 srcs[i] = (void *)raid6_empty_zero_page;
716
717 count = 0;
718 i = d0_idx;
719 do {
720 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
721
722 srcs[slot] = sh->dev[i].page;
723 i = raid6_next_disk(i, disks);
724 } while (i != d0_idx);
725 BUG_ON(count != syndrome_disks);
726
727 return count;
728}
729
730static struct dma_async_tx_descriptor *
731ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
732{
733 int disks = sh->disks;
734 struct page **blocks = percpu->scribble;
735 int target;
736 int qd_idx = sh->qd_idx;
737 struct dma_async_tx_descriptor *tx;
738 struct async_submit_ctl submit;
739 struct r5dev *tgt;
740 struct page *dest;
741 int i;
742 int count;
743
744 if (sh->ops.target < 0)
745 target = sh->ops.target2;
746 else if (sh->ops.target2 < 0)
747 target = sh->ops.target;
748 else
749 /* we should only have one valid target */
750 BUG();
751 BUG_ON(target < 0);
752 pr_debug("%s: stripe %llu block: %d\n",
753 __func__, (unsigned long long)sh->sector, target);
754
755 tgt = &sh->dev[target];
756 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
757 dest = tgt->page;
758
759 atomic_inc(&sh->count);
760
761 if (target == qd_idx) {
762 count = set_syndrome_sources(blocks, sh);
763 blocks[count] = NULL; /* regenerating p is not necessary */
764 BUG_ON(blocks[count+1] != dest); /* q should already be set */
765 init_async_submit(&submit, 0, NULL, ops_complete_compute, sh,
766 to_addr_conv(sh, percpu));
767 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
768 } else {
769 /* Compute any data- or p-drive using XOR */
770 count = 0;
771 for (i = disks; i-- ; ) {
772 if (i == target || i == qd_idx)
773 continue;
774 blocks[count++] = sh->dev[i].page;
775 }
776
777 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
778 ops_complete_compute, sh,
779 to_addr_conv(sh, percpu));
780 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
781 }
782
783 return tx;
784}
785
786static struct dma_async_tx_descriptor *
787ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
788{
789 int i, count, disks = sh->disks;
790 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
791 int d0_idx = raid6_d0(sh);
792 int faila = -1, failb = -1;
793 int target = sh->ops.target;
794 int target2 = sh->ops.target2;
795 struct r5dev *tgt = &sh->dev[target];
796 struct r5dev *tgt2 = &sh->dev[target2];
797 struct dma_async_tx_descriptor *tx;
798 struct page **blocks = percpu->scribble;
799 struct async_submit_ctl submit;
800
801 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
802 __func__, (unsigned long long)sh->sector, target, target2);
803 BUG_ON(target < 0 || target2 < 0);
804 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
805 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
806
807 /* we need to open-code set_syndrome_sources to handle to the
808 * slot number conversion for 'faila' and 'failb'
809 */
810 for (i = 0; i < disks ; i++)
811 blocks[i] = (void *)raid6_empty_zero_page;
812 count = 0;
813 i = d0_idx;
814 do {
815 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
816
817 blocks[slot] = sh->dev[i].page;
818
819 if (i == target)
820 faila = slot;
821 if (i == target2)
822 failb = slot;
823 i = raid6_next_disk(i, disks);
824 } while (i != d0_idx);
825 BUG_ON(count != syndrome_disks);
826
827 BUG_ON(faila == failb);
828 if (failb < faila)
829 swap(faila, failb);
830 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
831 __func__, (unsigned long long)sh->sector, faila, failb);
832
833 atomic_inc(&sh->count);
834
835 if (failb == syndrome_disks+1) {
836 /* Q disk is one of the missing disks */
837 if (faila == syndrome_disks) {
838 /* Missing P+Q, just recompute */
839 init_async_submit(&submit, 0, NULL, ops_complete_compute,
840 sh, to_addr_conv(sh, percpu));
841 return async_gen_syndrome(blocks, 0, count+2,
842 STRIPE_SIZE, &submit);
843 } else {
844 struct page *dest;
845 int data_target;
846 int qd_idx = sh->qd_idx;
847
848 /* Missing D+Q: recompute D from P, then recompute Q */
849 if (target == qd_idx)
850 data_target = target2;
851 else
852 data_target = target;
853
854 count = 0;
855 for (i = disks; i-- ; ) {
856 if (i == data_target || i == qd_idx)
857 continue;
858 blocks[count++] = sh->dev[i].page;
859 }
860 dest = sh->dev[data_target].page;
861 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
862 NULL, NULL, to_addr_conv(sh, percpu));
863 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
864 &submit);
865
866 count = set_syndrome_sources(blocks, sh);
867 init_async_submit(&submit, 0, tx, ops_complete_compute,
868 sh, to_addr_conv(sh, percpu));
869 return async_gen_syndrome(blocks, 0, count+2,
870 STRIPE_SIZE, &submit);
871 }
872 }
873
874 init_async_submit(&submit, 0, NULL, ops_complete_compute, sh,
875 to_addr_conv(sh, percpu));
876 if (failb == syndrome_disks) {
877 /* We're missing D+P. */
878 return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE,
879 faila, blocks, &submit);
880 } else {
881 /* We're missing D+D. */
882 return async_raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE,
883 faila, failb, blocks, &submit);
884 }
885}
886
887
696static void ops_complete_prexor(void *stripe_head_ref) 888static void ops_complete_prexor(void *stripe_head_ref)
697{ 889{
698 struct stripe_head *sh = stripe_head_ref; 890 struct stripe_head *sh = stripe_head_ref;
@@ -765,17 +957,21 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
765 return tx; 957 return tx;
766} 958}
767 959
768static void ops_complete_postxor(void *stripe_head_ref) 960static void ops_complete_reconstruct(void *stripe_head_ref)
769{ 961{
770 struct stripe_head *sh = stripe_head_ref; 962 struct stripe_head *sh = stripe_head_ref;
771 int disks = sh->disks, i, pd_idx = sh->pd_idx; 963 int disks = sh->disks;
964 int pd_idx = sh->pd_idx;
965 int qd_idx = sh->qd_idx;
966 int i;
772 967
773 pr_debug("%s: stripe %llu\n", __func__, 968 pr_debug("%s: stripe %llu\n", __func__,
774 (unsigned long long)sh->sector); 969 (unsigned long long)sh->sector);
775 970
776 for (i = disks; i--; ) { 971 for (i = disks; i--; ) {
777 struct r5dev *dev = &sh->dev[i]; 972 struct r5dev *dev = &sh->dev[i];
778 if (dev->written || i == pd_idx) 973
974 if (dev->written || i == pd_idx || i == qd_idx)
779 set_bit(R5_UPTODATE, &dev->flags); 975 set_bit(R5_UPTODATE, &dev->flags);
780 } 976 }
781 977
@@ -793,8 +989,8 @@ static void ops_complete_postxor(void *stripe_head_ref)
793} 989}
794 990
795static void 991static void
796ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu, 992ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
797 struct dma_async_tx_descriptor *tx) 993 struct dma_async_tx_descriptor *tx)
798{ 994{
799 int disks = sh->disks; 995 int disks = sh->disks;
800 struct page **xor_srcs = percpu->scribble; 996 struct page **xor_srcs = percpu->scribble;
@@ -837,7 +1033,7 @@ ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu,
837 1033
838 atomic_inc(&sh->count); 1034 atomic_inc(&sh->count);
839 1035
840 init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, 1036 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
841 to_addr_conv(sh, percpu)); 1037 to_addr_conv(sh, percpu));
842 if (unlikely(count == 1)) 1038 if (unlikely(count == 1))
843 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1039 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
@@ -845,6 +1041,25 @@ ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu,
845 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1041 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
846} 1042}
847 1043
1044static void
1045ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1046 struct dma_async_tx_descriptor *tx)
1047{
1048 struct async_submit_ctl submit;
1049 struct page **blocks = percpu->scribble;
1050 int count;
1051
1052 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1053
1054 count = set_syndrome_sources(blocks, sh);
1055
1056 atomic_inc(&sh->count);
1057
1058 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1059 sh, to_addr_conv(sh, percpu));
1060 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1061}
1062
848static void ops_complete_check(void *stripe_head_ref) 1063static void ops_complete_check(void *stripe_head_ref)
849{ 1064{
850 struct stripe_head *sh = stripe_head_ref; 1065 struct stripe_head *sh = stripe_head_ref;
@@ -857,23 +1072,28 @@ static void ops_complete_check(void *stripe_head_ref)
857 release_stripe(sh); 1072 release_stripe(sh);
858} 1073}
859 1074
860static void ops_run_check(struct stripe_head *sh, struct raid5_percpu *percpu) 1075static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
861{ 1076{
862 int disks = sh->disks; 1077 int disks = sh->disks;
1078 int pd_idx = sh->pd_idx;
1079 int qd_idx = sh->qd_idx;
1080 struct page *xor_dest;
863 struct page **xor_srcs = percpu->scribble; 1081 struct page **xor_srcs = percpu->scribble;
864 struct dma_async_tx_descriptor *tx; 1082 struct dma_async_tx_descriptor *tx;
865 struct async_submit_ctl submit; 1083 struct async_submit_ctl submit;
866 1084 int count;
867 int count = 0, pd_idx = sh->pd_idx, i; 1085 int i;
868 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
869 1086
870 pr_debug("%s: stripe %llu\n", __func__, 1087 pr_debug("%s: stripe %llu\n", __func__,
871 (unsigned long long)sh->sector); 1088 (unsigned long long)sh->sector);
872 1089
1090 count = 0;
1091 xor_dest = sh->dev[pd_idx].page;
1092 xor_srcs[count++] = xor_dest;
873 for (i = disks; i--; ) { 1093 for (i = disks; i--; ) {
874 struct r5dev *dev = &sh->dev[i]; 1094 if (i == pd_idx || i == qd_idx)
875 if (i != pd_idx) 1095 continue;
876 xor_srcs[count++] = dev->page; 1096 xor_srcs[count++] = sh->dev[i].page;
877 } 1097 }
878 1098
879 init_async_submit(&submit, 0, NULL, NULL, NULL, 1099 init_async_submit(&submit, 0, NULL, NULL, NULL,
@@ -886,11 +1106,32 @@ static void ops_run_check(struct stripe_head *sh, struct raid5_percpu *percpu)
886 tx = async_trigger_callback(&submit); 1106 tx = async_trigger_callback(&submit);
887} 1107}
888 1108
889static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 1109static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1110{
1111 struct page **srcs = percpu->scribble;
1112 struct async_submit_ctl submit;
1113 int count;
1114
1115 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1116 (unsigned long long)sh->sector, checkp);
1117
1118 count = set_syndrome_sources(srcs, sh);
1119 if (!checkp)
1120 srcs[count] = NULL;
1121
1122 atomic_inc(&sh->count);
1123 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1124 sh, to_addr_conv(sh, percpu));
1125 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1126 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1127}
1128
1129static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
890{ 1130{
891 int overlap_clear = 0, i, disks = sh->disks; 1131 int overlap_clear = 0, i, disks = sh->disks;
892 struct dma_async_tx_descriptor *tx = NULL; 1132 struct dma_async_tx_descriptor *tx = NULL;
893 raid5_conf_t *conf = sh->raid_conf; 1133 raid5_conf_t *conf = sh->raid_conf;
1134 int level = conf->level;
894 struct raid5_percpu *percpu; 1135 struct raid5_percpu *percpu;
895 unsigned long cpu; 1136 unsigned long cpu;
896 1137
@@ -902,9 +1143,16 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
902 } 1143 }
903 1144
904 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1145 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
905 tx = ops_run_compute5(sh, percpu); 1146 if (level < 6)
906 /* terminate the chain if postxor is not set to be run */ 1147 tx = ops_run_compute5(sh, percpu);
907 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) 1148 else {
1149 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1150 tx = ops_run_compute6_1(sh, percpu);
1151 else
1152 tx = ops_run_compute6_2(sh, percpu);
1153 }
1154 /* terminate the chain if reconstruct is not set to be run */
1155 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
908 async_tx_ack(tx); 1156 async_tx_ack(tx);
909 } 1157 }
910 1158
@@ -916,11 +1164,23 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
916 overlap_clear++; 1164 overlap_clear++;
917 } 1165 }
918 1166
919 if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 1167 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
920 ops_run_postxor(sh, percpu, tx); 1168 if (level < 6)
1169 ops_run_reconstruct5(sh, percpu, tx);
1170 else
1171 ops_run_reconstruct6(sh, percpu, tx);
1172 }
921 1173
922 if (test_bit(STRIPE_OP_CHECK, &ops_request)) 1174 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
923 ops_run_check(sh, percpu); 1175 if (sh->check_state == check_state_run)
1176 ops_run_check_p(sh, percpu);
1177 else if (sh->check_state == check_state_run_q)
1178 ops_run_check_pq(sh, percpu, 0);
1179 else if (sh->check_state == check_state_run_pq)
1180 ops_run_check_pq(sh, percpu, 1);
1181 else
1182 BUG();
1183 }
924 1184
925 if (overlap_clear) 1185 if (overlap_clear)
926 for (i = disks; i--; ) { 1186 for (i = disks; i--; ) {
@@ -1931,7 +2191,7 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
1931 } else 2191 } else
1932 sh->reconstruct_state = reconstruct_state_run; 2192 sh->reconstruct_state = reconstruct_state_run;
1933 2193
1934 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 2194 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
1935 2195
1936 for (i = disks; i--; ) { 2196 for (i = disks; i--; ) {
1937 struct r5dev *dev = &sh->dev[i]; 2197 struct r5dev *dev = &sh->dev[i];
@@ -1954,7 +2214,7 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
1954 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2214 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
1955 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2215 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1956 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2216 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1957 set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 2217 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
1958 2218
1959 for (i = disks; i--; ) { 2219 for (i = disks; i--; ) {
1960 struct r5dev *dev = &sh->dev[i]; 2220 struct r5dev *dev = &sh->dev[i];
@@ -2206,9 +2466,10 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2206 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2466 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2207 set_bit(R5_Wantcompute, &dev->flags); 2467 set_bit(R5_Wantcompute, &dev->flags);
2208 sh->ops.target = disk_idx; 2468 sh->ops.target = disk_idx;
2469 sh->ops.target2 = -1;
2209 s->req_compute = 1; 2470 s->req_compute = 1;
2210 /* Careful: from this point on 'uptodate' is in the eye 2471 /* Careful: from this point on 'uptodate' is in the eye
2211 * of raid5_run_ops which services 'compute' operations 2472 * of raid_run_ops which services 'compute' operations
2212 * before writes. R5_Wantcompute flags a block that will 2473 * before writes. R5_Wantcompute flags a block that will
2213 * be R5_UPTODATE by the time it is needed for a 2474 * be R5_UPTODATE by the time it is needed for a
2214 * subsequent operation. 2475 * subsequent operation.
@@ -2435,8 +2696,8 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
2435 */ 2696 */
2436 /* since handle_stripe can be called at any time we need to handle the 2697 /* since handle_stripe can be called at any time we need to handle the
2437 * case where a compute block operation has been submitted and then a 2698 * case where a compute block operation has been submitted and then a
2438 * subsequent call wants to start a write request. raid5_run_ops only 2699 * subsequent call wants to start a write request. raid_run_ops only
2439 * handles the case where compute block and postxor are requested 2700 * handles the case where compute block and reconstruct are requested
2440 * simultaneously. If this is not the case then new writes need to be 2701 * simultaneously. If this is not the case then new writes need to be
2441 * held off until the compute completes. 2702 * held off until the compute completes.
2442 */ 2703 */
@@ -2618,6 +2879,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2618 set_bit(R5_Wantcompute, 2879 set_bit(R5_Wantcompute,
2619 &sh->dev[sh->pd_idx].flags); 2880 &sh->dev[sh->pd_idx].flags);
2620 sh->ops.target = sh->pd_idx; 2881 sh->ops.target = sh->pd_idx;
2882 sh->ops.target2 = -1;
2621 s->uptodate++; 2883 s->uptodate++;
2622 } 2884 }
2623 } 2885 }
@@ -3067,7 +3329,7 @@ static bool handle_stripe5(struct stripe_head *sh)
3067 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3329 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3068 3330
3069 if (s.ops_request) 3331 if (s.ops_request)
3070 raid5_run_ops(sh, s.ops_request); 3332 raid_run_ops(sh, s.ops_request);
3071 3333
3072 ops_run_io(sh, &s); 3334 ops_run_io(sh, &s);
3073 3335
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 75f2c6c4cf90..116d0b44b2a9 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -176,7 +176,9 @@
176 */ 176 */
177enum check_states { 177enum check_states {
178 check_state_idle = 0, 178 check_state_idle = 0,
179 check_state_run, /* parity check */ 179 check_state_run, /* xor parity check */
180 check_state_run_q, /* q-parity check */
181 check_state_run_pq, /* pq dual parity check */
180 check_state_check_result, 182 check_state_check_result,
181 check_state_compute_run, /* parity repair */ 183 check_state_compute_run, /* parity repair */
182 check_state_compute_result, 184 check_state_compute_result,
@@ -216,7 +218,7 @@ struct stripe_head {
216 * @target - STRIPE_OP_COMPUTE_BLK target 218 * @target - STRIPE_OP_COMPUTE_BLK target
217 */ 219 */
218 struct stripe_operations { 220 struct stripe_operations {
219 int target; 221 int target, target2;
220 enum sum_check_flags zero_sum_result; 222 enum sum_check_flags zero_sum_result;
221 } ops; 223 } ops;
222 struct r5dev { 224 struct r5dev {
@@ -299,7 +301,7 @@ struct r6_state {
299#define STRIPE_OP_COMPUTE_BLK 1 301#define STRIPE_OP_COMPUTE_BLK 1
300#define STRIPE_OP_PREXOR 2 302#define STRIPE_OP_PREXOR 2
301#define STRIPE_OP_BIODRAIN 3 303#define STRIPE_OP_BIODRAIN 3
302#define STRIPE_OP_POSTXOR 4 304#define STRIPE_OP_RECONSTRUCT 4
303#define STRIPE_OP_CHECK 5 305#define STRIPE_OP_CHECK 5
304 306
305/* 307/*