aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
authorEzequiel Garcia <ezequiel.garcia@free-electrons.com>2013-12-10 07:32:37 -0500
committerDan Williams <dan.j.williams@intel.com>2014-04-10 15:17:34 -0400
commitb8c01d259a08d75c5049b2bd5f579648262c30a4 (patch)
treef4d3edd27f2af1c4ea74bde4f46cb8aaad7cf257 /drivers/dma/mv_xor.c
parent1fde2548460c1ded9fe71220b506473f0b7de768 (diff)
dma: mv_xor: Add DMA API error checks
This commit adds proper error checking for various DMA API calls, as reported by DMA_API_DEBUG=y. Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c56
1 files changed, 52 insertions, 4 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..e70f271c99fa 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -784,7 +784,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
784 784
785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
786{ 786{
787 int i; 787 int i, ret;
788 void *src, *dest; 788 void *src, *dest;
789 dma_addr_t src_dma, dest_dma; 789 dma_addr_t src_dma, dest_dma;
790 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
@@ -821,19 +821,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
821 821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE); 823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma; 824 unmap->addr[0] = src_dma;
826 825
826 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
827 if (ret) {
828 err = -ENOMEM;
829 goto free_resources;
830 }
831 unmap->to_cnt = 1;
832
827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 833 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
828 PAGE_SIZE, DMA_FROM_DEVICE); 834 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma; 835 unmap->addr[1] = dest_dma;
831 836
837 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
838 if (ret) {
839 err = -ENOMEM;
840 goto free_resources;
841 }
842 unmap->from_cnt = 1;
832 unmap->len = PAGE_SIZE; 843 unmap->len = PAGE_SIZE;
833 844
834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 845 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
835 PAGE_SIZE, 0); 846 PAGE_SIZE, 0);
847 if (!tx) {
848 dev_err(dma_chan->device->dev,
849 "Self-test cannot prepare operation, disabling\n");
850 err = -ENODEV;
851 goto free_resources;
852 }
853
836 cookie = mv_xor_tx_submit(tx); 854 cookie = mv_xor_tx_submit(tx);
855 if (dma_submit_error(cookie)) {
856 dev_err(dma_chan->device->dev,
857 "Self-test submit error, disabling\n");
858 err = -ENODEV;
859 goto free_resources;
860 }
861
837 mv_xor_issue_pending(dma_chan); 862 mv_xor_issue_pending(dma_chan);
838 async_tx_ack(tx); 863 async_tx_ack(tx);
839 msleep(1); 864 msleep(1);
@@ -868,7 +893,7 @@ out:
868static int 893static int
869mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 894mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
870{ 895{
871 int i, src_idx; 896 int i, src_idx, ret;
872 struct page *dest; 897 struct page *dest;
873 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 898 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 899 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
@@ -931,19 +956,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 956 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE); 957 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i]; 958 dma_srcs[i] = unmap->addr[i];
959 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
960 if (ret) {
961 err = -ENOMEM;
962 goto free_resources;
963 }
934 unmap->to_cnt++; 964 unmap->to_cnt++;
935 } 965 }
936 966
937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 967 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
938 DMA_FROM_DEVICE); 968 DMA_FROM_DEVICE);
939 dest_dma = unmap->addr[src_count]; 969 dest_dma = unmap->addr[src_count];
970 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
971 if (ret) {
972 err = -ENOMEM;
973 goto free_resources;
974 }
940 unmap->from_cnt = 1; 975 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE; 976 unmap->len = PAGE_SIZE;
942 977
943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 978 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
944 src_count, PAGE_SIZE, 0); 979 src_count, PAGE_SIZE, 0);
980 if (!tx) {
981 dev_err(dma_chan->device->dev,
982 "Self-test cannot prepare operation, disabling\n");
983 err = -ENODEV;
984 goto free_resources;
985 }
945 986
946 cookie = mv_xor_tx_submit(tx); 987 cookie = mv_xor_tx_submit(tx);
988 if (dma_submit_error(cookie)) {
989 dev_err(dma_chan->device->dev,
990 "Self-test submit error, disabling\n");
991 err = -ENODEV;
992 goto free_resources;
993 }
994
947 mv_xor_issue_pending(dma_chan); 995 mv_xor_issue_pending(dma_chan);
948 async_tx_ack(tx); 996 async_tx_ack(tx);
949 msleep(8); 997 msleep(8);