aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-07-08 15:20:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-07-08 15:20:54 -0400
commite467e104bb7482170b79f516d2025e7cfcaaa733 (patch)
treed9de9b008b2cec2e5f46e7bbc83cef50d3d5d288 /drivers/infiniband/hw/cxgb4
parentb9f399594d12e353dcb609c25219bdaa76c2a050 (diff)
parent9e770044a0f08a6dcf245152ec1575f7cb0b9631 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Fix world-writable child interface control sysfs attributes IB/qib: Clean up properly if qib_init() fails IB/qib: Completion queue callback needs to be single threaded IB/qib: Update 7322 serdes tables IB/qib: Clear 6120 hardware error register IB/qib: Clear eager buffer memory for each new process IB/qib: Mask hardware error during link reset IB/qib: Don't mark VL15 bufs as WC to avoid a rare 7322 chip problem RDMA/cxgb4: Derive smac_idx from port viid RDMA/cxgb4: Avoid false GTS CIDX_INC overflows RDMA/cxgb4: Don't call abort_connection() for active connect failures RDMA/cxgb4: Use the DMA state API instead of the pci equivalents
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c31
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h6
6 files changed, 42 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 30ce0a8eca09..855ee44fdb52 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -969,7 +969,8 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
969 goto err; 969 goto err;
970 goto out; 970 goto out;
971err: 971err:
972 abort_connection(ep, skb, GFP_KERNEL); 972 state_set(&ep->com, ABORTING);
973 send_abort(ep, skb, GFP_KERNEL);
973out: 974out:
974 connect_reply_upcall(ep, err); 975 connect_reply_upcall(ep, err);
975 return; 976 return;
@@ -1372,7 +1373,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1372 pdev, 0); 1373 pdev, 0);
1373 mtu = pdev->mtu; 1374 mtu = pdev->mtu;
1374 tx_chan = cxgb4_port_chan(pdev); 1375 tx_chan = cxgb4_port_chan(pdev);
1375 smac_idx = tx_chan << 1; 1376 smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1376 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1377 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1377 txq_idx = cxgb4_port_idx(pdev) * step; 1378 txq_idx = cxgb4_port_idx(pdev) * step;
1378 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1379 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
@@ -1383,7 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1383 dst->neighbour->dev, 0); 1384 dst->neighbour->dev, 0);
1384 mtu = dst_mtu(dst); 1385 mtu = dst_mtu(dst);
1385 tx_chan = cxgb4_port_chan(dst->neighbour->dev); 1386 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1386 smac_idx = tx_chan << 1; 1387 smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
1387 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; 1388 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1388 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; 1389 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1389 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 1390 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
@@ -1950,7 +1951,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1950 pdev, 0); 1951 pdev, 0);
1951 ep->mtu = pdev->mtu; 1952 ep->mtu = pdev->mtu;
1952 ep->tx_chan = cxgb4_port_chan(pdev); 1953 ep->tx_chan = cxgb4_port_chan(pdev);
1953 ep->smac_idx = ep->tx_chan << 1; 1954 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1954 step = ep->com.dev->rdev.lldi.ntxq / 1955 step = ep->com.dev->rdev.lldi.ntxq /
1955 ep->com.dev->rdev.lldi.nchan; 1956 ep->com.dev->rdev.lldi.nchan;
1956 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1957 ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -1965,7 +1966,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1965 ep->dst->neighbour->dev, 0); 1966 ep->dst->neighbour->dev, 0);
1966 ep->mtu = dst_mtu(ep->dst); 1967 ep->mtu = dst_mtu(ep->dst);
1967 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); 1968 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
1968 ep->smac_idx = ep->tx_chan << 1; 1969 ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
1970 0x7F) << 1;
1969 step = ep->com.dev->rdev.lldi.ntxq / 1971 step = ep->com.dev->rdev.lldi.ntxq /
1970 ep->com.dev->rdev.lldi.nchan; 1972 ep->com.dev->rdev.lldi.nchan;
1971 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; 1973 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 2447f5295482..fac5c6e68011 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -77,7 +77,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
77 kfree(cq->sw_queue); 77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev), 78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue, 79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping)); 80 dma_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx); 81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret; 82 return ret;
83} 83}
@@ -112,7 +112,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
112 ret = -ENOMEM; 112 ret = -ENOMEM;
113 goto err3; 113 goto err3;
114 } 114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr); 115 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize); 116 memset(cq->queue, 0, cq->memsize);
117 117
118 /* build fw_ri_res_wr */ 118 /* build fw_ri_res_wr */
@@ -179,7 +179,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
179 return 0; 179 return 0;
180err4: 180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, 181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping)); 182 dma_unmap_addr(cq, mapping));
183err3: 183err3:
184 kfree(cq->sw_queue); 184 kfree(cq->sw_queue);
185err2: 185err2:
@@ -764,7 +764,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
764 struct c4iw_create_cq_resp uresp; 764 struct c4iw_create_cq_resp uresp;
765 struct c4iw_ucontext *ucontext = NULL; 765 struct c4iw_ucontext *ucontext = NULL;
766 int ret; 766 int ret;
767 size_t memsize; 767 size_t memsize, hwentries;
768 struct c4iw_mm_entry *mm, *mm2; 768 struct c4iw_mm_entry *mm, *mm2;
769 769
770 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); 770 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
@@ -788,14 +788,29 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
788 * entries must be multiple of 16 for HW. 788 * entries must be multiple of 16 for HW.
789 */ 789 */
790 entries = roundup(entries, 16); 790 entries = roundup(entries, 16);
791 memsize = entries * sizeof *chp->cq.queue; 791
792 /*
793 * Make actual HW queue 2x to avoid cdix_inc overflows.
794 */
795 hwentries = entries * 2;
796
797 /*
798 * Make HW queue at least 64 entries so GTS updates aren't too
799 * frequent.
800 */
801 if (hwentries < 64)
802 hwentries = 64;
803
804 memsize = hwentries * sizeof *chp->cq.queue;
792 805
793 /* 806 /*
794 * memsize must be a multiple of the page size if its a user cq. 807 * memsize must be a multiple of the page size if its a user cq.
795 */ 808 */
796 if (ucontext) 809 if (ucontext) {
797 memsize = roundup(memsize, PAGE_SIZE); 810 memsize = roundup(memsize, PAGE_SIZE);
798 chp->cq.size = entries; 811 hwentries = memsize / sizeof *chp->cq.queue;
812 }
813 chp->cq.size = hwentries;
799 chp->cq.memsize = memsize; 814 chp->cq.memsize = memsize;
800 815
801 ret = create_cq(&rhp->rdev, &chp->cq, 816 ret = create_cq(&rhp->rdev, &chp->cq,
@@ -805,7 +820,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
805 820
806 chp->rhp = rhp; 821 chp->rhp = rhp;
807 chp->cq.size--; /* status page */ 822 chp->cq.size--; /* status page */
808 chp->ibcq.cqe = chp->cq.size - 1; 823 chp->ibcq.cqe = entries - 2;
809 spin_lock_init(&chp->lock); 824 spin_lock_init(&chp->lock);
810 atomic_set(&chp->refcnt, 1); 825 atomic_set(&chp->refcnt, 1);
811 init_waitqueue_head(&chp->wait); 826 init_waitqueue_head(&chp->wait);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 277ab589b44d..d33e1a668811 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -261,7 +261,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
261 261
262struct c4iw_fr_page_list { 262struct c4iw_fr_page_list {
263 struct ib_fast_reg_page_list ibpl; 263 struct ib_fast_reg_page_list ibpl;
264 DECLARE_PCI_UNMAP_ADDR(mapping); 264 DEFINE_DMA_UNMAP_ADDR(mapping);
265 dma_addr_t dma_addr; 265 dma_addr_t dma_addr;
266 struct c4iw_dev *dev; 266 struct c4iw_dev *dev;
267 int size; 267 int size;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7f94da1a2437..82b5703b8947 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -764,7 +764,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
764 if (!c4pl) 764 if (!c4pl)
765 return ERR_PTR(-ENOMEM); 765 return ERR_PTR(-ENOMEM);
766 766
767 pci_unmap_addr_set(c4pl, mapping, dma_addr); 767 dma_unmap_addr_set(c4pl, mapping, dma_addr);
768 c4pl->dma_addr = dma_addr; 768 c4pl->dma_addr = dma_addr;
769 c4pl->dev = dev; 769 c4pl->dev = dev;
770 c4pl->size = size; 770 c4pl->size = size;
@@ -779,7 +779,7 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
779 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 779 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
780 780
781 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, 781 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
782 c4pl, pci_unmap_addr(c4pl, mapping)); 782 c4pl, dma_unmap_addr(c4pl, mapping));
783} 783}
784 784
785int c4iw_dereg_mr(struct ib_mr *ib_mr) 785int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 0c28ed1eafa6..7065cb310553 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -40,10 +40,10 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
40 */ 40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev), 41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue, 42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping)); 43 dma_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev), 44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue, 45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping)); 46 dma_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq); 48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq); 49 kfree(wq->sq.sw_sq);
@@ -99,7 +99,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
99 if (!wq->sq.queue) 99 if (!wq->sq.queue)
100 goto err5; 100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize); 101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 102 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103 103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), 104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr), 105 wq->rq.memsize, &(wq->rq.dma_addr),
@@ -112,7 +112,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
112 wq->rq.queue, 112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue)); 113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize); 114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 115 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116 116
117 wq->db = rdev->lldi.db_reg; 117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg; 118 wq->gts = rdev->lldi.gts_reg;
@@ -217,11 +217,11 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
217err7: 217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev), 218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue, 219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping)); 220 dma_unmap_addr(&wq->rq, mapping));
221err6: 221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev), 222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue, 223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping)); 224 dma_unmap_addr(&wq->sq, mapping));
225err5: 225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4: 227err4:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 1057cb96302e..9cf8d85bfcff 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -279,7 +279,7 @@ struct t4_swsqe {
279struct t4_sq { 279struct t4_sq {
280 union t4_wr *queue; 280 union t4_wr *queue;
281 dma_addr_t dma_addr; 281 dma_addr_t dma_addr;
282 DECLARE_PCI_UNMAP_ADDR(mapping); 282 DEFINE_DMA_UNMAP_ADDR(mapping);
283 struct t4_swsqe *sw_sq; 283 struct t4_swsqe *sw_sq;
284 struct t4_swsqe *oldest_read; 284 struct t4_swsqe *oldest_read;
285 u64 udb; 285 u64 udb;
@@ -298,7 +298,7 @@ struct t4_swrqe {
298struct t4_rq { 298struct t4_rq {
299 union t4_recv_wr *queue; 299 union t4_recv_wr *queue;
300 dma_addr_t dma_addr; 300 dma_addr_t dma_addr;
301 DECLARE_PCI_UNMAP_ADDR(mapping); 301 DEFINE_DMA_UNMAP_ADDR(mapping);
302 struct t4_swrqe *sw_rq; 302 struct t4_swrqe *sw_rq;
303 u64 udb; 303 u64 udb;
304 size_t memsize; 304 size_t memsize;
@@ -429,7 +429,7 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
429struct t4_cq { 429struct t4_cq {
430 struct t4_cqe *queue; 430 struct t4_cqe *queue;
431 dma_addr_t dma_addr; 431 dma_addr_t dma_addr;
432 DECLARE_PCI_UNMAP_ADDR(mapping); 432 DEFINE_DMA_UNMAP_ADDR(mapping);
433 struct t4_cqe *sw_queue; 433 struct t4_cqe *sw_queue;
434 void __iomem *gts; 434 void __iomem *gts;
435 struct c4iw_rdev *rdev; 435 struct c4iw_rdev *rdev;