aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2008-12-05 14:13:18 -0500
committerRoland Dreier <rolandd@cisco.com>2008-12-05 14:13:18 -0500
commit890fccb2427d53b48ab9d009fd87d55bcb173f62 (patch)
tree52d8f1bbb2dad19bb79c147beac3119fe0ae4ed8 /drivers/infiniband/hw/ipath
parentfab01fc56063dafcc083f481ac0f9e6b5a576dd6 (diff)
IB/ipath: Check return value of dma_map_single()
This fixes an obvious oversight where the return value is not checked for error. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 284c9bca517e..8e255adf5d9b 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -698,10 +698,8 @@ retry:
698 698
699 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, 699 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
700 tx->map_len, DMA_TO_DEVICE); 700 tx->map_len, DMA_TO_DEVICE);
701 if (dma_mapping_error(&dd->pcidev->dev, addr)) { 701 if (dma_mapping_error(&dd->pcidev->dev, addr))
702 ret = -EIO; 702 goto ioerr;
703 goto unlock;
704 }
705 703
706 dwoffset = tx->map_len >> 2; 704 dwoffset = tx->map_len >> 2;
707 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); 705 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
@@ -741,6 +739,8 @@ retry:
741 dw = (len + 3) >> 2; 739 dw = (len + 3) >> 2;
742 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, 740 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
743 DMA_TO_DEVICE); 741 DMA_TO_DEVICE);
742 if (dma_mapping_error(&dd->pcidev->dev, addr))
743 goto unmap;
744 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); 744 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
745 /* SDmaUseLargeBuf has to be set in every descriptor */ 745 /* SDmaUseLargeBuf has to be set in every descriptor */
746 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) 746 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
@@ -798,7 +798,18 @@ retry:
798 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); 798 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
799 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) 799 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
800 vl15_watchdog_enq(dd); 800 vl15_watchdog_enq(dd);
801 801 goto unlock;
802
803unmap:
804 while (tail != dd->ipath_sdma_descq_tail) {
805 if (!tail)
806 tail = dd->ipath_sdma_descq_cnt - 1;
807 else
808 tail--;
809 unmap_desc(dd, tail);
810 }
811ioerr:
812 ret = -EIO;
802unlock: 813unlock:
803 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); 814 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
804fail: 815fail: