aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-06-03 01:37:50 -0400
committerRoland Dreier <rolandd@cisco.com>2010-07-06 17:01:42 -0400
commitf38926aa1dc5fbf7dfc5f97a53377b2e796dedc3 (patch)
treefe3e2be8d12a6aca94890955e4164981b5891867
parent67a3e12b05e055c0415c556a315a3d3eb637e29e (diff)
RDMA/cxgb4: Use the DMA state API instead of the pci equivalents
This replace the PCI DMA state API (include/linux/pci-dma.h) with the DMA equivalents since the PCI DMA state API will be obsolete. No functional change. For further information about the background: http://marc.info/?l=linux-netdev&m=127037540020276&w=2 Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h6
5 files changed, 15 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 2447f5295482..e1317f581168 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -77,7 +77,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
77 kfree(cq->sw_queue); 77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev), 78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue, 79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping)); 80 dma_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx); 81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret; 82 return ret;
83} 83}
@@ -112,7 +112,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
112 ret = -ENOMEM; 112 ret = -ENOMEM;
113 goto err3; 113 goto err3;
114 } 114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr); 115 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize); 116 memset(cq->queue, 0, cq->memsize);
117 117
118 /* build fw_ri_res_wr */ 118 /* build fw_ri_res_wr */
@@ -179,7 +179,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
179 return 0; 179 return 0;
180err4: 180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, 181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping)); 182 dma_unmap_addr(cq, mapping));
183err3: 183err3:
184 kfree(cq->sw_queue); 184 kfree(cq->sw_queue);
185err2: 185err2:
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 277ab589b44d..d33e1a668811 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -261,7 +261,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
261 261
262struct c4iw_fr_page_list { 262struct c4iw_fr_page_list {
263 struct ib_fast_reg_page_list ibpl; 263 struct ib_fast_reg_page_list ibpl;
264 DECLARE_PCI_UNMAP_ADDR(mapping); 264 DEFINE_DMA_UNMAP_ADDR(mapping);
265 dma_addr_t dma_addr; 265 dma_addr_t dma_addr;
266 struct c4iw_dev *dev; 266 struct c4iw_dev *dev;
267 int size; 267 int size;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7f94da1a2437..82b5703b8947 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -764,7 +764,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
764 if (!c4pl) 764 if (!c4pl)
765 return ERR_PTR(-ENOMEM); 765 return ERR_PTR(-ENOMEM);
766 766
767 pci_unmap_addr_set(c4pl, mapping, dma_addr); 767 dma_unmap_addr_set(c4pl, mapping, dma_addr);
768 c4pl->dma_addr = dma_addr; 768 c4pl->dma_addr = dma_addr;
769 c4pl->dev = dev; 769 c4pl->dev = dev;
770 c4pl->size = size; 770 c4pl->size = size;
@@ -779,7 +779,7 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
779 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 779 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
780 780
781 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, 781 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
782 c4pl, pci_unmap_addr(c4pl, mapping)); 782 c4pl, dma_unmap_addr(c4pl, mapping));
783} 783}
784 784
785int c4iw_dereg_mr(struct ib_mr *ib_mr) 785int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 0c28ed1eafa6..7065cb310553 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -40,10 +40,10 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
40 */ 40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev), 41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue, 42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping)); 43 dma_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev), 44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue, 45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping)); 46 dma_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq); 48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq); 49 kfree(wq->sq.sw_sq);
@@ -99,7 +99,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
99 if (!wq->sq.queue) 99 if (!wq->sq.queue)
100 goto err5; 100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize); 101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 102 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103 103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), 104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr), 105 wq->rq.memsize, &(wq->rq.dma_addr),
@@ -112,7 +112,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
112 wq->rq.queue, 112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue)); 113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize); 114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 115 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116 116
117 wq->db = rdev->lldi.db_reg; 117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg; 118 wq->gts = rdev->lldi.gts_reg;
@@ -217,11 +217,11 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
217err7: 217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev), 218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue, 219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping)); 220 dma_unmap_addr(&wq->rq, mapping));
221err6: 221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev), 222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue, 223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping)); 224 dma_unmap_addr(&wq->sq, mapping));
225err5: 225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4: 227err4:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 1057cb96302e..9cf8d85bfcff 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -279,7 +279,7 @@ struct t4_swsqe {
279struct t4_sq { 279struct t4_sq {
280 union t4_wr *queue; 280 union t4_wr *queue;
281 dma_addr_t dma_addr; 281 dma_addr_t dma_addr;
282 DECLARE_PCI_UNMAP_ADDR(mapping); 282 DEFINE_DMA_UNMAP_ADDR(mapping);
283 struct t4_swsqe *sw_sq; 283 struct t4_swsqe *sw_sq;
284 struct t4_swsqe *oldest_read; 284 struct t4_swsqe *oldest_read;
285 u64 udb; 285 u64 udb;
@@ -298,7 +298,7 @@ struct t4_swrqe {
298struct t4_rq { 298struct t4_rq {
299 union t4_recv_wr *queue; 299 union t4_recv_wr *queue;
300 dma_addr_t dma_addr; 300 dma_addr_t dma_addr;
301 DECLARE_PCI_UNMAP_ADDR(mapping); 301 DEFINE_DMA_UNMAP_ADDR(mapping);
302 struct t4_swrqe *sw_rq; 302 struct t4_swrqe *sw_rq;
303 u64 udb; 303 u64 udb;
304 size_t memsize; 304 size_t memsize;
@@ -429,7 +429,7 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
429struct t4_cq { 429struct t4_cq {
430 struct t4_cqe *queue; 430 struct t4_cqe *queue;
431 dma_addr_t dma_addr; 431 dma_addr_t dma_addr;
432 DECLARE_PCI_UNMAP_ADDR(mapping); 432 DEFINE_DMA_UNMAP_ADDR(mapping);
433 struct t4_cqe *sw_queue; 433 struct t4_cqe *sw_queue;
434 void __iomem *gts; 434 void __iomem *gts;
435 struct c4iw_rdev *rdev; 435 struct c4iw_rdev *rdev;