diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-06-03 14:43:59 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-06-03 17:07:35 -0400 |
commit | a08abd8ca890a377521d65d493d174bebcaf694b (patch) | |
tree | 987c149a2d7d6ab345f426ac28191627b4a02a3e /drivers | |
parent | 88ba2aa586c874681c072101287e15d40de7e6e2 (diff) |
async_tx: structify submission arguments, add scribble
Prepare the api for the arrival of a new parameter, 'scribble'. This
will allow callers to identify scratchpad memory for dma address or page
address conversions. As this adds yet another parameter, take this
opportunity to convert the common submission parameters (flags,
dependency, callback, and callback argument) into an object that is
passed by reference.
Also, take this opportunity to fix up the kerneldoc and add notes about
the relevant ASYNC_TX_* flags for each routine.
[ Impact: moves api pass-by-value parameters to a pass-by-reference struct ]
Signed-off-by: Andre Noll <maan@systemlinux.org>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/raid5.c | 59 |
1 files changed, 34 insertions, 25 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0ef5362c8d02..e1920f23579f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -499,11 +499,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
499 | struct page *bio_page; | 499 | struct page *bio_page; |
500 | int i; | 500 | int i; |
501 | int page_offset; | 501 | int page_offset; |
502 | struct async_submit_ctl submit; | ||
502 | 503 | ||
503 | if (bio->bi_sector >= sector) | 504 | if (bio->bi_sector >= sector) |
504 | page_offset = (signed)(bio->bi_sector - sector) * 512; | 505 | page_offset = (signed)(bio->bi_sector - sector) * 512; |
505 | else | 506 | else |
506 | page_offset = (signed)(sector - bio->bi_sector) * -512; | 507 | page_offset = (signed)(sector - bio->bi_sector) * -512; |
508 | |||
509 | init_async_submit(&submit, 0, tx, NULL, NULL, NULL); | ||
507 | bio_for_each_segment(bvl, bio, i) { | 510 | bio_for_each_segment(bvl, bio, i) { |
508 | int len = bio_iovec_idx(bio, i)->bv_len; | 511 | int len = bio_iovec_idx(bio, i)->bv_len; |
509 | int clen; | 512 | int clen; |
@@ -525,13 +528,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
525 | bio_page = bio_iovec_idx(bio, i)->bv_page; | 528 | bio_page = bio_iovec_idx(bio, i)->bv_page; |
526 | if (frombio) | 529 | if (frombio) |
527 | tx = async_memcpy(page, bio_page, page_offset, | 530 | tx = async_memcpy(page, bio_page, page_offset, |
528 | b_offset, clen, 0, | 531 | b_offset, clen, &submit); |
529 | tx, NULL, NULL); | ||
530 | else | 532 | else |
531 | tx = async_memcpy(bio_page, page, b_offset, | 533 | tx = async_memcpy(bio_page, page, b_offset, |
532 | page_offset, clen, 0, | 534 | page_offset, clen, &submit); |
533 | tx, NULL, NULL); | ||
534 | } | 535 | } |
536 | /* chain the operations */ | ||
537 | submit.depend_tx = tx; | ||
538 | |||
535 | if (clen < len) /* hit end of page */ | 539 | if (clen < len) /* hit end of page */ |
536 | break; | 540 | break; |
537 | page_offset += len; | 541 | page_offset += len; |
@@ -590,6 +594,7 @@ static void ops_run_biofill(struct stripe_head *sh) | |||
590 | { | 594 | { |
591 | struct dma_async_tx_descriptor *tx = NULL; | 595 | struct dma_async_tx_descriptor *tx = NULL; |
592 | raid5_conf_t *conf = sh->raid_conf; | 596 | raid5_conf_t *conf = sh->raid_conf; |
597 | struct async_submit_ctl submit; | ||
593 | int i; | 598 | int i; |
594 | 599 | ||
595 | pr_debug("%s: stripe %llu\n", __func__, | 600 | pr_debug("%s: stripe %llu\n", __func__, |
@@ -613,7 +618,8 @@ static void ops_run_biofill(struct stripe_head *sh) | |||
613 | } | 618 | } |
614 | 619 | ||
615 | atomic_inc(&sh->count); | 620 | atomic_inc(&sh->count); |
616 | async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); | 621 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); |
622 | async_trigger_callback(&submit); | ||
617 | } | 623 | } |
618 | 624 | ||
619 | static void ops_complete_compute5(void *stripe_head_ref) | 625 | static void ops_complete_compute5(void *stripe_head_ref) |
@@ -645,6 +651,7 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) | |||
645 | struct page *xor_dest = tgt->page; | 651 | struct page *xor_dest = tgt->page; |
646 | int count = 0; | 652 | int count = 0; |
647 | struct dma_async_tx_descriptor *tx; | 653 | struct dma_async_tx_descriptor *tx; |
654 | struct async_submit_ctl submit; | ||
648 | int i; | 655 | int i; |
649 | 656 | ||
650 | pr_debug("%s: stripe %llu block: %d\n", | 657 | pr_debug("%s: stripe %llu block: %d\n", |
@@ -657,13 +664,12 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) | |||
657 | 664 | ||
658 | atomic_inc(&sh->count); | 665 | atomic_inc(&sh->count); |
659 | 666 | ||
667 | init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
668 | ops_complete_compute5, sh, NULL); | ||
660 | if (unlikely(count == 1)) | 669 | if (unlikely(count == 1)) |
661 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, | 670 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
662 | 0, NULL, ops_complete_compute5, sh); | ||
663 | else | 671 | else |
664 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 672 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
665 | ASYNC_TX_XOR_ZERO_DST, NULL, | ||
666 | ops_complete_compute5, sh); | ||
667 | 673 | ||
668 | return tx; | 674 | return tx; |
669 | } | 675 | } |
@@ -683,6 +689,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
683 | int disks = sh->disks; | 689 | int disks = sh->disks; |
684 | struct page *xor_srcs[disks]; | 690 | struct page *xor_srcs[disks]; |
685 | int count = 0, pd_idx = sh->pd_idx, i; | 691 | int count = 0, pd_idx = sh->pd_idx, i; |
692 | struct async_submit_ctl submit; | ||
686 | 693 | ||
687 | /* existing parity data subtracted */ | 694 | /* existing parity data subtracted */ |
688 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; | 695 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
@@ -697,9 +704,9 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
697 | xor_srcs[count++] = dev->page; | 704 | xor_srcs[count++] = dev->page; |
698 | } | 705 | } |
699 | 706 | ||
700 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 707 | init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, |
701 | ASYNC_TX_XOR_DROP_DST, tx, | 708 | ops_complete_prexor, sh, NULL); |
702 | ops_complete_prexor, sh); | 709 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
703 | 710 | ||
704 | return tx; | 711 | return tx; |
705 | } | 712 | } |
@@ -772,7 +779,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
772 | /* kernel stack size limits the total number of disks */ | 779 | /* kernel stack size limits the total number of disks */ |
773 | int disks = sh->disks; | 780 | int disks = sh->disks; |
774 | struct page *xor_srcs[disks]; | 781 | struct page *xor_srcs[disks]; |
775 | 782 | struct async_submit_ctl submit; | |
776 | int count = 0, pd_idx = sh->pd_idx, i; | 783 | int count = 0, pd_idx = sh->pd_idx, i; |
777 | struct page *xor_dest; | 784 | struct page *xor_dest; |
778 | int prexor = 0; | 785 | int prexor = 0; |
@@ -811,13 +818,11 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
811 | 818 | ||
812 | atomic_inc(&sh->count); | 819 | atomic_inc(&sh->count); |
813 | 820 | ||
814 | if (unlikely(count == 1)) { | 821 | init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, NULL); |
815 | flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); | 822 | if (unlikely(count == 1)) |
816 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, | 823 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
817 | flags, tx, ops_complete_postxor, sh); | 824 | else |
818 | } else | 825 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
819 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | ||
820 | flags, tx, ops_complete_postxor, sh); | ||
821 | } | 826 | } |
822 | 827 | ||
823 | static void ops_complete_check(void *stripe_head_ref) | 828 | static void ops_complete_check(void *stripe_head_ref) |
@@ -838,6 +843,7 @@ static void ops_run_check(struct stripe_head *sh) | |||
838 | int disks = sh->disks; | 843 | int disks = sh->disks; |
839 | struct page *xor_srcs[disks]; | 844 | struct page *xor_srcs[disks]; |
840 | struct dma_async_tx_descriptor *tx; | 845 | struct dma_async_tx_descriptor *tx; |
846 | struct async_submit_ctl submit; | ||
841 | 847 | ||
842 | int count = 0, pd_idx = sh->pd_idx, i; | 848 | int count = 0, pd_idx = sh->pd_idx, i; |
843 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; | 849 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
@@ -851,12 +857,13 @@ static void ops_run_check(struct stripe_head *sh) | |||
851 | xor_srcs[count++] = dev->page; | 857 | xor_srcs[count++] = dev->page; |
852 | } | 858 | } |
853 | 859 | ||
860 | init_async_submit(&submit, 0, NULL, NULL, NULL, NULL); | ||
854 | tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, | 861 | tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
855 | &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); | 862 | &sh->ops.zero_sum_result, &submit); |
856 | 863 | ||
857 | atomic_inc(&sh->count); | 864 | atomic_inc(&sh->count); |
858 | tx = async_trigger_callback(ASYNC_TX_ACK, tx, | 865 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); |
859 | ops_complete_check, sh); | 866 | tx = async_trigger_callback(&submit); |
860 | } | 867 | } |
861 | 868 | ||
862 | static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) | 869 | static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) |
@@ -2664,6 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2664 | if (i != sh->pd_idx && i != sh->qd_idx) { | 2671 | if (i != sh->pd_idx && i != sh->qd_idx) { |
2665 | int dd_idx, j; | 2672 | int dd_idx, j; |
2666 | struct stripe_head *sh2; | 2673 | struct stripe_head *sh2; |
2674 | struct async_submit_ctl submit; | ||
2667 | 2675 | ||
2668 | sector_t bn = compute_blocknr(sh, i, 1); | 2676 | sector_t bn = compute_blocknr(sh, i, 1); |
2669 | sector_t s = raid5_compute_sector(conf, bn, 0, | 2677 | sector_t s = raid5_compute_sector(conf, bn, 0, |
@@ -2683,9 +2691,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
2683 | } | 2691 | } |
2684 | 2692 | ||
2685 | /* place all the copies on one channel */ | 2693 | /* place all the copies on one channel */ |
2694 | init_async_submit(&submit, 0, tx, NULL, NULL, NULL); | ||
2686 | tx = async_memcpy(sh2->dev[dd_idx].page, | 2695 | tx = async_memcpy(sh2->dev[dd_idx].page, |
2687 | sh->dev[i].page, 0, 0, STRIPE_SIZE, | 2696 | sh->dev[i].page, 0, 0, STRIPE_SIZE, |
2688 | 0, tx, NULL, NULL); | 2697 | &submit); |
2689 | 2698 | ||
2690 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); | 2699 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); |
2691 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); | 2700 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |