aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:50 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:50 -0400
commit0403e3827788d878163f9ef0541b748b0f88ca5d (patch)
tree2dc73744bd92c268a1310f24668167f130877278 /drivers/md
parentf9dd2134374c8de6b911e2b8652c6c9622eaa658 (diff)
dmaengine: add fence support
Some engines optimize operation by reading ahead in the descriptor chain such that descriptor2 may start execution before descriptor1 completes. If descriptor2 depends on the result from descriptor1 then a fence is required (on descriptor2) to disable this optimization. The async_tx api could implicitly identify dependencies via the 'depend_tx' parameter, but that would constrain cases where the dependency chain only specifies a completion order rather than a data dependency. So, provide an ASYNC_TX_FENCE to explicitly identify data dependencies. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0a5cf2171214..54ef8d75541d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -502,13 +502,17 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
502 int i; 502 int i;
503 int page_offset; 503 int page_offset;
504 struct async_submit_ctl submit; 504 struct async_submit_ctl submit;
505 enum async_tx_flags flags = 0;
505 506
506 if (bio->bi_sector >= sector) 507 if (bio->bi_sector >= sector)
507 page_offset = (signed)(bio->bi_sector - sector) * 512; 508 page_offset = (signed)(bio->bi_sector - sector) * 512;
508 else 509 else
509 page_offset = (signed)(sector - bio->bi_sector) * -512; 510 page_offset = (signed)(sector - bio->bi_sector) * -512;
510 511
511 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 512 if (frombio)
513 flags |= ASYNC_TX_FENCE;
514 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
515
512 bio_for_each_segment(bvl, bio, i) { 516 bio_for_each_segment(bvl, bio, i) {
513 int len = bio_iovec_idx(bio, i)->bv_len; 517 int len = bio_iovec_idx(bio, i)->bv_len;
514 int clen; 518 int clen;
@@ -685,7 +689,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
685 689
686 atomic_inc(&sh->count); 690 atomic_inc(&sh->count);
687 691
688 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, 692 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
689 ops_complete_compute, sh, to_addr_conv(sh, percpu)); 693 ops_complete_compute, sh, to_addr_conv(sh, percpu));
690 if (unlikely(count == 1)) 694 if (unlikely(count == 1))
691 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 695 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
@@ -763,7 +767,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
763 count = set_syndrome_sources(blocks, sh); 767 count = set_syndrome_sources(blocks, sh);
764 blocks[count] = NULL; /* regenerating p is not necessary */ 768 blocks[count] = NULL; /* regenerating p is not necessary */
765 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 769 BUG_ON(blocks[count+1] != dest); /* q should already be set */
766 init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, 770 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
771 ops_complete_compute, sh,
767 to_addr_conv(sh, percpu)); 772 to_addr_conv(sh, percpu));
768 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 773 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
769 } else { 774 } else {
@@ -775,8 +780,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
775 blocks[count++] = sh->dev[i].page; 780 blocks[count++] = sh->dev[i].page;
776 } 781 }
777 782
778 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, 783 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
779 ops_complete_compute, sh, 784 NULL, ops_complete_compute, sh,
780 to_addr_conv(sh, percpu)); 785 to_addr_conv(sh, percpu));
781 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 786 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
782 } 787 }
@@ -837,8 +842,9 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
837 /* Q disk is one of the missing disks */ 842 /* Q disk is one of the missing disks */
838 if (faila == syndrome_disks) { 843 if (faila == syndrome_disks) {
839 /* Missing P+Q, just recompute */ 844 /* Missing P+Q, just recompute */
840 init_async_submit(&submit, 0, NULL, ops_complete_compute, 845 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
841 sh, to_addr_conv(sh, percpu)); 846 ops_complete_compute, sh,
847 to_addr_conv(sh, percpu));
842 return async_gen_syndrome(blocks, 0, count+2, 848 return async_gen_syndrome(blocks, 0, count+2,
843 STRIPE_SIZE, &submit); 849 STRIPE_SIZE, &submit);
844 } else { 850 } else {
@@ -859,21 +865,24 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
859 blocks[count++] = sh->dev[i].page; 865 blocks[count++] = sh->dev[i].page;
860 } 866 }
861 dest = sh->dev[data_target].page; 867 dest = sh->dev[data_target].page;
862 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, 868 init_async_submit(&submit,
863 NULL, NULL, to_addr_conv(sh, percpu)); 869 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
870 NULL, NULL, NULL,
871 to_addr_conv(sh, percpu));
864 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 872 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
865 &submit); 873 &submit);
866 874
867 count = set_syndrome_sources(blocks, sh); 875 count = set_syndrome_sources(blocks, sh);
868 init_async_submit(&submit, 0, tx, ops_complete_compute, 876 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
869 sh, to_addr_conv(sh, percpu)); 877 ops_complete_compute, sh,
878 to_addr_conv(sh, percpu));
870 return async_gen_syndrome(blocks, 0, count+2, 879 return async_gen_syndrome(blocks, 0, count+2,
871 STRIPE_SIZE, &submit); 880 STRIPE_SIZE, &submit);
872 } 881 }
873 } 882 }
874 883
875 init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, 884 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute,
876 to_addr_conv(sh, percpu)); 885 sh, to_addr_conv(sh, percpu));
877 if (failb == syndrome_disks) { 886 if (failb == syndrome_disks) {
878 /* We're missing D+P. */ 887 /* We're missing D+P. */
879 return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, 888 return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE,
@@ -916,7 +925,7 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
916 xor_srcs[count++] = dev->page; 925 xor_srcs[count++] = dev->page;
917 } 926 }
918 927
919 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, 928 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
920 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); 929 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
921 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 930 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
922 931