aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEzequiel Garcia <ezequiel.garcia@free-electrons.com>2013-12-10 07:32:36 -0500
committerDan Williams <dan.j.williams@intel.com>2013-12-13 01:48:53 -0500
commitd16695a75019ac4baad7a117dc86d1d292e09115 (patch)
treebe847b67118164debe7748c8b40b55465053b2dc
parent3cc377b9ae4bd3133bf8ba388d2b2b66b2b973c1 (diff)
dma: mv_xor: Use dmaengine_unmap_data for the self-tests
The driver-specific unmap code was removed in: commit 54f8d501e842879143e867e70996574a54d1e130 Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Date: Fri Oct 18 19:35:32 2013 +0200 dmaengine: remove DMA unmap from drivers which had the side-effect of not unmapping the self-test mappings. Fix this by using dmaengine_unmap_data in the self-test routines. In addition, since dmaengine_unmap() assumes that all mappings were created with dma_map_page, this commit changes the single mapping to a page mapping to avoid an incorrect unmapping of the memcpy self-test. The allocation could be changed to be alloc_page(), but sticking to kmalloc results in a less intrusive patch. The size of the test buffer is increased, since dma_map_page() seem to fail when the source and destination pages are the same page. Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/mv_xor.c71
1 files changed, 50 insertions, 21 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23bcc9158cbc..79620887d637 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -781,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
781/* 781/*
782 * Perform a transaction to verify the HW works. 782 * Perform a transaction to verify the HW works.
783 */ 783 */
784#define MV_XOR_TEST_SIZE 2000
785 784
786static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
787{ 786{
@@ -791,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
791 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
792 dma_cookie_t cookie; 791 dma_cookie_t cookie;
793 struct dma_async_tx_descriptor *tx; 792 struct dma_async_tx_descriptor *tx;
793 struct dmaengine_unmap_data *unmap;
794 int err = 0; 794 int err = 0;
795 795
796 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
797 if (!src) 797 if (!src)
798 return -ENOMEM; 798 return -ENOMEM;
799 799
800 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
801 if (!dest) { 801 if (!dest) {
802 kfree(src); 802 kfree(src);
803 return -ENOMEM; 803 return -ENOMEM;
804 } 804 }
805 805
806 /* Fill in src buffer */ 806 /* Fill in src buffer */
807 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 807 for (i = 0; i < PAGE_SIZE; i++)
808 ((u8 *) src)[i] = (u8)i; 808 ((u8 *) src)[i] = (u8)i;
809 809
810 dma_chan = &mv_chan->dmachan; 810 dma_chan = &mv_chan->dmachan;
@@ -813,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
813 goto out; 813 goto out;
814 } 814 }
815 815
816 dest_dma = dma_map_single(dma_chan->device->dev, dest, 816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
817 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
818 826
819 src_dma = dma_map_single(dma_chan->device->dev, src, 827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
820 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
821 833
822 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
823 MV_XOR_TEST_SIZE, 0); 835 PAGE_SIZE, 0);
824 cookie = mv_xor_tx_submit(tx); 836 cookie = mv_xor_tx_submit(tx);
825 mv_xor_issue_pending(dma_chan); 837 mv_xor_issue_pending(dma_chan);
826 async_tx_ack(tx); 838 async_tx_ack(tx);
@@ -835,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
835 } 847 }
836 848
837 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
838 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 850 PAGE_SIZE, DMA_FROM_DEVICE);
839 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 851 if (memcmp(src, dest, PAGE_SIZE)) {
840 dev_err(dma_chan->device->dev, 852 dev_err(dma_chan->device->dev,
841 "Self-test copy failed compare, disabling\n"); 853 "Self-test copy failed compare, disabling\n");
842 err = -ENODEV; 854 err = -ENODEV;
@@ -844,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
844 } 856 }
845 857
846free_resources: 858free_resources:
859 dmaengine_unmap_put(unmap);
847 mv_xor_free_chan_resources(dma_chan); 860 mv_xor_free_chan_resources(dma_chan);
848out: 861out:
849 kfree(src); 862 kfree(src);
@@ -861,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
861 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
862 dma_addr_t dest_dma; 875 dma_addr_t dest_dma;
863 struct dma_async_tx_descriptor *tx; 876 struct dma_async_tx_descriptor *tx;
877 struct dmaengine_unmap_data *unmap;
864 struct dma_chan *dma_chan; 878 struct dma_chan *dma_chan;
865 dma_cookie_t cookie; 879 dma_cookie_t cookie;
866 u8 cmp_byte = 0; 880 u8 cmp_byte = 0;
867 u32 cmp_word; 881 u32 cmp_word;
868 int err = 0; 882 int err = 0;
883 int src_count = MV_XOR_NUM_SRC_TEST;
869 884
870 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 885 for (src_idx = 0; src_idx < src_count; src_idx++) {
871 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
872 if (!xor_srcs[src_idx]) { 887 if (!xor_srcs[src_idx]) {
873 while (src_idx--) 888 while (src_idx--)
@@ -884,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
884 } 899 }
885 900
886 /* Fill in src buffers */ 901 /* Fill in src buffers */
887 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 902 for (src_idx = 0; src_idx < src_count; src_idx++) {
888 u8 *ptr = page_address(xor_srcs[src_idx]); 903 u8 *ptr = page_address(xor_srcs[src_idx]);
889 for (i = 0; i < PAGE_SIZE; i++) 904 for (i = 0; i < PAGE_SIZE; i++)
890 ptr[i] = (1 << src_idx); 905 ptr[i] = (1 << src_idx);
891 } 906 }
892 907
893 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 908 for (src_idx = 0; src_idx < src_count; src_idx++)
894 cmp_byte ^= (u8) (1 << src_idx); 909 cmp_byte ^= (u8) (1 << src_idx);
895 910
896 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -904,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
904 goto out; 919 goto out;
905 } 920 }
906 921
922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
928
907 /* test xor */ 929 /* test xor */
908 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 930 for (i = 0; i < src_count; i++) {
909 DMA_FROM_DEVICE); 931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
910 936
911 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
912 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 938 DMA_FROM_DEVICE);
913 0, PAGE_SIZE, DMA_TO_DEVICE); 939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
914 942
915 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
916 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 944 src_count, PAGE_SIZE, 0);
917 945
918 cookie = mv_xor_tx_submit(tx); 946 cookie = mv_xor_tx_submit(tx);
919 mv_xor_issue_pending(dma_chan); 947 mv_xor_issue_pending(dma_chan);
@@ -942,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
942 } 970 }
943 971
944free_resources: 972free_resources:
973 dmaengine_unmap_put(unmap);
945 mv_xor_free_chan_resources(dma_chan); 974 mv_xor_free_chan_resources(dma_chan);
946out: 975out:
947 src_idx = MV_XOR_NUM_SRC_TEST; 976 src_idx = src_count;
948 while (src_idx--) 977 while (src_idx--)
949 __free_page(xor_srcs[src_idx]); 978 __free_page(xor_srcs[src_idx]);
950 __free_page(dest); 979 __free_page(dest);