diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-07-08 15:20:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-07-08 15:20:54 -0400 |
commit | e467e104bb7482170b79f516d2025e7cfcaaa733 (patch) | |
tree | d9de9b008b2cec2e5f46e7bbc83cef50d3d5d288 | |
parent | b9f399594d12e353dcb609c25219bdaa76c2a050 (diff) | |
parent | 9e770044a0f08a6dcf245152ec1575f7cb0b9631 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Fix world-writable child interface control sysfs attributes
IB/qib: Clean up properly if qib_init() fails
IB/qib: Completion queue callback needs to be single threaded
IB/qib: Update 7322 serdes tables
IB/qib: Clear 6120 hardware error register
IB/qib: Clear eager buffer memory for each new process
IB/qib: Mask hardware error during link reset
IB/qib: Don't mark VL15 bufs as WC to avoid a rare 7322 chip problem
RDMA/cxgb4: Derive smac_idx from port viid
RDMA/cxgb4: Avoid false GTS CIDX_INC overflows
RDMA/cxgb4: Don't call abort_connection() for active connect failures
RDMA/cxgb4: Use the DMA state API instead of the pci equivalents
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_7322_regs.h | 48 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_diag.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba6120.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 43 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_pcie.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_tx.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 4 |
15 files changed, 148 insertions, 66 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 30ce0a8eca09..855ee44fdb52 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -969,7 +969,8 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
969 | goto err; | 969 | goto err; |
970 | goto out; | 970 | goto out; |
971 | err: | 971 | err: |
972 | abort_connection(ep, skb, GFP_KERNEL); | 972 | state_set(&ep->com, ABORTING); |
973 | send_abort(ep, skb, GFP_KERNEL); | ||
973 | out: | 974 | out: |
974 | connect_reply_upcall(ep, err); | 975 | connect_reply_upcall(ep, err); |
975 | return; | 976 | return; |
@@ -1372,7 +1373,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1372 | pdev, 0); | 1373 | pdev, 0); |
1373 | mtu = pdev->mtu; | 1374 | mtu = pdev->mtu; |
1374 | tx_chan = cxgb4_port_chan(pdev); | 1375 | tx_chan = cxgb4_port_chan(pdev); |
1375 | smac_idx = tx_chan << 1; | 1376 | smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; |
1376 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; | 1377 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; |
1377 | txq_idx = cxgb4_port_idx(pdev) * step; | 1378 | txq_idx = cxgb4_port_idx(pdev) * step; |
1378 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 1379 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
@@ -1383,7 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1383 | dst->neighbour->dev, 0); | 1384 | dst->neighbour->dev, 0); |
1384 | mtu = dst_mtu(dst); | 1385 | mtu = dst_mtu(dst); |
1385 | tx_chan = cxgb4_port_chan(dst->neighbour->dev); | 1386 | tx_chan = cxgb4_port_chan(dst->neighbour->dev); |
1386 | smac_idx = tx_chan << 1; | 1387 | smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; |
1387 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; | 1388 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; |
1388 | txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; | 1389 | txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; |
1389 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 1390 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
@@ -1950,7 +1951,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1950 | pdev, 0); | 1951 | pdev, 0); |
1951 | ep->mtu = pdev->mtu; | 1952 | ep->mtu = pdev->mtu; |
1952 | ep->tx_chan = cxgb4_port_chan(pdev); | 1953 | ep->tx_chan = cxgb4_port_chan(pdev); |
1953 | ep->smac_idx = ep->tx_chan << 1; | 1954 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; |
1954 | step = ep->com.dev->rdev.lldi.ntxq / | 1955 | step = ep->com.dev->rdev.lldi.ntxq / |
1955 | ep->com.dev->rdev.lldi.nchan; | 1956 | ep->com.dev->rdev.lldi.nchan; |
1956 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | 1957 | ep->txq_idx = cxgb4_port_idx(pdev) * step; |
@@ -1965,7 +1966,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1965 | ep->dst->neighbour->dev, 0); | 1966 | ep->dst->neighbour->dev, 0); |
1966 | ep->mtu = dst_mtu(ep->dst); | 1967 | ep->mtu = dst_mtu(ep->dst); |
1967 | ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); | 1968 | ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); |
1968 | ep->smac_idx = ep->tx_chan << 1; | 1969 | ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) & |
1970 | 0x7F) << 1; | ||
1969 | step = ep->com.dev->rdev.lldi.ntxq / | 1971 | step = ep->com.dev->rdev.lldi.ntxq / |
1970 | ep->com.dev->rdev.lldi.nchan; | 1972 | ep->com.dev->rdev.lldi.nchan; |
1971 | ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; | 1973 | ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 2447f5295482..fac5c6e68011 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -77,7 +77,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
77 | kfree(cq->sw_queue); | 77 | kfree(cq->sw_queue); |
78 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 78 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
79 | cq->memsize, cq->queue, | 79 | cq->memsize, cq->queue, |
80 | pci_unmap_addr(cq, mapping)); | 80 | dma_unmap_addr(cq, mapping)); |
81 | c4iw_put_cqid(rdev, cq->cqid, uctx); | 81 | c4iw_put_cqid(rdev, cq->cqid, uctx); |
82 | return ret; | 82 | return ret; |
83 | } | 83 | } |
@@ -112,7 +112,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
112 | ret = -ENOMEM; | 112 | ret = -ENOMEM; |
113 | goto err3; | 113 | goto err3; |
114 | } | 114 | } |
115 | pci_unmap_addr_set(cq, mapping, cq->dma_addr); | 115 | dma_unmap_addr_set(cq, mapping, cq->dma_addr); |
116 | memset(cq->queue, 0, cq->memsize); | 116 | memset(cq->queue, 0, cq->memsize); |
117 | 117 | ||
118 | /* build fw_ri_res_wr */ | 118 | /* build fw_ri_res_wr */ |
@@ -179,7 +179,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
179 | return 0; | 179 | return 0; |
180 | err4: | 180 | err4: |
181 | dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, | 181 | dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, |
182 | pci_unmap_addr(cq, mapping)); | 182 | dma_unmap_addr(cq, mapping)); |
183 | err3: | 183 | err3: |
184 | kfree(cq->sw_queue); | 184 | kfree(cq->sw_queue); |
185 | err2: | 185 | err2: |
@@ -764,7 +764,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
764 | struct c4iw_create_cq_resp uresp; | 764 | struct c4iw_create_cq_resp uresp; |
765 | struct c4iw_ucontext *ucontext = NULL; | 765 | struct c4iw_ucontext *ucontext = NULL; |
766 | int ret; | 766 | int ret; |
767 | size_t memsize; | 767 | size_t memsize, hwentries; |
768 | struct c4iw_mm_entry *mm, *mm2; | 768 | struct c4iw_mm_entry *mm, *mm2; |
769 | 769 | ||
770 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); | 770 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); |
@@ -788,14 +788,29 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
788 | * entries must be multiple of 16 for HW. | 788 | * entries must be multiple of 16 for HW. |
789 | */ | 789 | */ |
790 | entries = roundup(entries, 16); | 790 | entries = roundup(entries, 16); |
791 | memsize = entries * sizeof *chp->cq.queue; | 791 | |
792 | /* | ||
793 | * Make actual HW queue 2x to avoid cdix_inc overflows. | ||
794 | */ | ||
795 | hwentries = entries * 2; | ||
796 | |||
797 | /* | ||
798 | * Make HW queue at least 64 entries so GTS updates aren't too | ||
799 | * frequent. | ||
800 | */ | ||
801 | if (hwentries < 64) | ||
802 | hwentries = 64; | ||
803 | |||
804 | memsize = hwentries * sizeof *chp->cq.queue; | ||
792 | 805 | ||
793 | /* | 806 | /* |
794 | * memsize must be a multiple of the page size if its a user cq. | 807 | * memsize must be a multiple of the page size if its a user cq. |
795 | */ | 808 | */ |
796 | if (ucontext) | 809 | if (ucontext) { |
797 | memsize = roundup(memsize, PAGE_SIZE); | 810 | memsize = roundup(memsize, PAGE_SIZE); |
798 | chp->cq.size = entries; | 811 | hwentries = memsize / sizeof *chp->cq.queue; |
812 | } | ||
813 | chp->cq.size = hwentries; | ||
799 | chp->cq.memsize = memsize; | 814 | chp->cq.memsize = memsize; |
800 | 815 | ||
801 | ret = create_cq(&rhp->rdev, &chp->cq, | 816 | ret = create_cq(&rhp->rdev, &chp->cq, |
@@ -805,7 +820,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
805 | 820 | ||
806 | chp->rhp = rhp; | 821 | chp->rhp = rhp; |
807 | chp->cq.size--; /* status page */ | 822 | chp->cq.size--; /* status page */ |
808 | chp->ibcq.cqe = chp->cq.size - 1; | 823 | chp->ibcq.cqe = entries - 2; |
809 | spin_lock_init(&chp->lock); | 824 | spin_lock_init(&chp->lock); |
810 | atomic_set(&chp->refcnt, 1); | 825 | atomic_set(&chp->refcnt, 1); |
811 | init_waitqueue_head(&chp->wait); | 826 | init_waitqueue_head(&chp->wait); |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 277ab589b44d..d33e1a668811 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -261,7 +261,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) | |||
261 | 261 | ||
262 | struct c4iw_fr_page_list { | 262 | struct c4iw_fr_page_list { |
263 | struct ib_fast_reg_page_list ibpl; | 263 | struct ib_fast_reg_page_list ibpl; |
264 | DECLARE_PCI_UNMAP_ADDR(mapping); | 264 | DEFINE_DMA_UNMAP_ADDR(mapping); |
265 | dma_addr_t dma_addr; | 265 | dma_addr_t dma_addr; |
266 | struct c4iw_dev *dev; | 266 | struct c4iw_dev *dev; |
267 | int size; | 267 | int size; |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 7f94da1a2437..82b5703b8947 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -764,7 +764,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, | |||
764 | if (!c4pl) | 764 | if (!c4pl) |
765 | return ERR_PTR(-ENOMEM); | 765 | return ERR_PTR(-ENOMEM); |
766 | 766 | ||
767 | pci_unmap_addr_set(c4pl, mapping, dma_addr); | 767 | dma_unmap_addr_set(c4pl, mapping, dma_addr); |
768 | c4pl->dma_addr = dma_addr; | 768 | c4pl->dma_addr = dma_addr; |
769 | c4pl->dev = dev; | 769 | c4pl->dev = dev; |
770 | c4pl->size = size; | 770 | c4pl->size = size; |
@@ -779,7 +779,7 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) | |||
779 | struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); | 779 | struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); |
780 | 780 | ||
781 | dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, | 781 | dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, |
782 | c4pl, pci_unmap_addr(c4pl, mapping)); | 782 | c4pl, dma_unmap_addr(c4pl, mapping)); |
783 | } | 783 | } |
784 | 784 | ||
785 | int c4iw_dereg_mr(struct ib_mr *ib_mr) | 785 | int c4iw_dereg_mr(struct ib_mr *ib_mr) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 0c28ed1eafa6..7065cb310553 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -40,10 +40,10 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
40 | */ | 40 | */ |
41 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 41 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
42 | wq->rq.memsize, wq->rq.queue, | 42 | wq->rq.memsize, wq->rq.queue, |
43 | pci_unmap_addr(&wq->rq, mapping)); | 43 | dma_unmap_addr(&wq->rq, mapping)); |
44 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 44 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
45 | wq->sq.memsize, wq->sq.queue, | 45 | wq->sq.memsize, wq->sq.queue, |
46 | pci_unmap_addr(&wq->sq, mapping)); | 46 | dma_unmap_addr(&wq->sq, mapping)); |
47 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | 47 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
48 | kfree(wq->rq.sw_rq); | 48 | kfree(wq->rq.sw_rq); |
49 | kfree(wq->sq.sw_sq); | 49 | kfree(wq->sq.sw_sq); |
@@ -99,7 +99,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
99 | if (!wq->sq.queue) | 99 | if (!wq->sq.queue) |
100 | goto err5; | 100 | goto err5; |
101 | memset(wq->sq.queue, 0, wq->sq.memsize); | 101 | memset(wq->sq.queue, 0, wq->sq.memsize); |
102 | pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); | 102 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
103 | 103 | ||
104 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | 104 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), |
105 | wq->rq.memsize, &(wq->rq.dma_addr), | 105 | wq->rq.memsize, &(wq->rq.dma_addr), |
@@ -112,7 +112,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
112 | wq->rq.queue, | 112 | wq->rq.queue, |
113 | (unsigned long long)virt_to_phys(wq->rq.queue)); | 113 | (unsigned long long)virt_to_phys(wq->rq.queue)); |
114 | memset(wq->rq.queue, 0, wq->rq.memsize); | 114 | memset(wq->rq.queue, 0, wq->rq.memsize); |
115 | pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); | 115 | dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); |
116 | 116 | ||
117 | wq->db = rdev->lldi.db_reg; | 117 | wq->db = rdev->lldi.db_reg; |
118 | wq->gts = rdev->lldi.gts_reg; | 118 | wq->gts = rdev->lldi.gts_reg; |
@@ -217,11 +217,11 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
217 | err7: | 217 | err7: |
218 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 218 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
219 | wq->rq.memsize, wq->rq.queue, | 219 | wq->rq.memsize, wq->rq.queue, |
220 | pci_unmap_addr(&wq->rq, mapping)); | 220 | dma_unmap_addr(&wq->rq, mapping)); |
221 | err6: | 221 | err6: |
222 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 222 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
223 | wq->sq.memsize, wq->sq.queue, | 223 | wq->sq.memsize, wq->sq.queue, |
224 | pci_unmap_addr(&wq->sq, mapping)); | 224 | dma_unmap_addr(&wq->sq, mapping)); |
225 | err5: | 225 | err5: |
226 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | 226 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
227 | err4: | 227 | err4: |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 1057cb96302e..9cf8d85bfcff 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -279,7 +279,7 @@ struct t4_swsqe { | |||
279 | struct t4_sq { | 279 | struct t4_sq { |
280 | union t4_wr *queue; | 280 | union t4_wr *queue; |
281 | dma_addr_t dma_addr; | 281 | dma_addr_t dma_addr; |
282 | DECLARE_PCI_UNMAP_ADDR(mapping); | 282 | DEFINE_DMA_UNMAP_ADDR(mapping); |
283 | struct t4_swsqe *sw_sq; | 283 | struct t4_swsqe *sw_sq; |
284 | struct t4_swsqe *oldest_read; | 284 | struct t4_swsqe *oldest_read; |
285 | u64 udb; | 285 | u64 udb; |
@@ -298,7 +298,7 @@ struct t4_swrqe { | |||
298 | struct t4_rq { | 298 | struct t4_rq { |
299 | union t4_recv_wr *queue; | 299 | union t4_recv_wr *queue; |
300 | dma_addr_t dma_addr; | 300 | dma_addr_t dma_addr; |
301 | DECLARE_PCI_UNMAP_ADDR(mapping); | 301 | DEFINE_DMA_UNMAP_ADDR(mapping); |
302 | struct t4_swrqe *sw_rq; | 302 | struct t4_swrqe *sw_rq; |
303 | u64 udb; | 303 | u64 udb; |
304 | size_t memsize; | 304 | size_t memsize; |
@@ -429,7 +429,7 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) | |||
429 | struct t4_cq { | 429 | struct t4_cq { |
430 | struct t4_cqe *queue; | 430 | struct t4_cqe *queue; |
431 | dma_addr_t dma_addr; | 431 | dma_addr_t dma_addr; |
432 | DECLARE_PCI_UNMAP_ADDR(mapping); | 432 | DEFINE_DMA_UNMAP_ADDR(mapping); |
433 | struct t4_cqe *sw_queue; | 433 | struct t4_cqe *sw_queue; |
434 | void __iomem *gts; | 434 | void __iomem *gts; |
435 | struct c4iw_rdev *rdev; | 435 | struct c4iw_rdev *rdev; |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 32d9208efcff..3593983df7ba 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -686,6 +686,7 @@ struct qib_devdata { | |||
686 | void __iomem *piobase; | 686 | void __iomem *piobase; |
687 | /* mem-mapped pointer to base of user chip regs (if using WC PAT) */ | 687 | /* mem-mapped pointer to base of user chip regs (if using WC PAT) */ |
688 | u64 __iomem *userbase; | 688 | u64 __iomem *userbase; |
689 | void __iomem *piovl15base; /* base of VL15 buffers, if not WC */ | ||
689 | /* | 690 | /* |
690 | * points to area where PIOavail registers will be DMA'ed. | 691 | * points to area where PIOavail registers will be DMA'ed. |
691 | * Has to be on a page of it's own, because the page will be | 692 | * Has to be on a page of it's own, because the page will be |
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h index a97440ba924c..32dc81ff8d4a 100644 --- a/drivers/infiniband/hw/qib/qib_7322_regs.h +++ b/drivers/infiniband/hw/qib/qib_7322_regs.h | |||
@@ -742,15 +742,15 @@ | |||
742 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF | 742 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF |
743 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF | 743 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF |
744 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1 | 744 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1 |
745 | #define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE | 745 | #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE |
746 | #define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE | 746 | #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE |
747 | #define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1 | 747 | #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1 |
748 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD | 748 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD |
749 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD | 749 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD |
750 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1 | 750 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1 |
751 | #define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC | 751 | #define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC |
752 | #define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC | 752 | #define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC |
753 | #define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1 | 753 | #define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1 |
754 | #define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB | 754 | #define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB |
755 | #define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB | 755 | #define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB |
756 | #define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1 | 756 | #define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1 |
@@ -796,15 +796,15 @@ | |||
796 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF | 796 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF |
797 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF | 797 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF |
798 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1 | 798 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1 |
799 | #define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE | 799 | #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE |
800 | #define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE | 800 | #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE |
801 | #define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1 | 801 | #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1 |
802 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD | 802 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD |
803 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD | 803 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD |
804 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1 | 804 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1 |
805 | #define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC | 805 | #define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC |
806 | #define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC | 806 | #define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC |
807 | #define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1 | 807 | #define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1 |
808 | #define QIB_7322_HwErrStatus_LATriggered_LSB 0xB | 808 | #define QIB_7322_HwErrStatus_LATriggered_LSB 0xB |
809 | #define QIB_7322_HwErrStatus_LATriggered_MSB 0xB | 809 | #define QIB_7322_HwErrStatus_LATriggered_MSB 0xB |
810 | #define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1 | 810 | #define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1 |
@@ -850,15 +850,15 @@ | |||
850 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF | 850 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF |
851 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF | 851 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF |
852 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1 | 852 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1 |
853 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE | 853 | #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE |
854 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE | 854 | #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE |
855 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1 | 855 | #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1 |
856 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD | 856 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD |
857 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD | 857 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD |
858 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1 | 858 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1 |
859 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC | 859 | #define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC |
860 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC | 860 | #define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC |
861 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1 | 861 | #define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1 |
862 | #define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB | 862 | #define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB |
863 | #define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB | 863 | #define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB |
864 | #define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1 | 864 | #define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1 |
@@ -880,15 +880,15 @@ | |||
880 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF | 880 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF |
881 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF | 881 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF |
882 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1 | 882 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1 |
883 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE | 883 | #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE |
884 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE | 884 | #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE |
885 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1 | 885 | #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1 |
886 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD | 886 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD |
887 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD | 887 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD |
888 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1 | 888 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1 |
889 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC | 889 | #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC |
890 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC | 890 | #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC |
891 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1 | 891 | #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1 |
892 | 892 | ||
893 | #define QIB_7322_EXTStatus_OFFS 0xC0 | 893 | #define QIB_7322_EXTStatus_OFFS 0xC0 |
894 | #define QIB_7322_EXTStatus_DEF 0x000000000000X000 | 894 | #define QIB_7322_EXTStatus_DEF 0x000000000000X000 |
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index ca98dd523752..05dcf0d9a7d3 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c | |||
@@ -233,6 +233,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, | |||
233 | u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; | 233 | u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; |
234 | u32 __iomem *map = NULL; | 234 | u32 __iomem *map = NULL; |
235 | u32 cnt = 0; | 235 | u32 cnt = 0; |
236 | u32 tot4k, offs4k; | ||
236 | 237 | ||
237 | /* First, simplest case, offset is within the first map. */ | 238 | /* First, simplest case, offset is within the first map. */ |
238 | kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); | 239 | kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); |
@@ -250,7 +251,8 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, | |||
250 | if (dd->userbase) { | 251 | if (dd->userbase) { |
251 | /* If user regs mapped, they are after send, so set limit. */ | 252 | /* If user regs mapped, they are after send, so set limit. */ |
252 | u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; | 253 | u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; |
253 | snd_lim = dd->uregbase; | 254 | if (!dd->piovl15base) |
255 | snd_lim = dd->uregbase; | ||
254 | krb32 = (u32 __iomem *)dd->userbase; | 256 | krb32 = (u32 __iomem *)dd->userbase; |
255 | if (offset >= dd->uregbase && offset < ulim) { | 257 | if (offset >= dd->uregbase && offset < ulim) { |
256 | map = krb32 + (offset - dd->uregbase) / sizeof(u32); | 258 | map = krb32 + (offset - dd->uregbase) / sizeof(u32); |
@@ -277,14 +279,14 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, | |||
277 | /* If 4k buffers exist, account for them by bumping | 279 | /* If 4k buffers exist, account for them by bumping |
278 | * appropriate limit. | 280 | * appropriate limit. |
279 | */ | 281 | */ |
282 | tot4k = dd->piobcnt4k * dd->align4k; | ||
283 | offs4k = dd->piobufbase >> 32; | ||
280 | if (dd->piobcnt4k) { | 284 | if (dd->piobcnt4k) { |
281 | u32 tot4k = dd->piobcnt4k * dd->align4k; | ||
282 | u32 offs4k = dd->piobufbase >> 32; | ||
283 | if (snd_bottom > offs4k) | 285 | if (snd_bottom > offs4k) |
284 | snd_bottom = offs4k; | 286 | snd_bottom = offs4k; |
285 | else { | 287 | else { |
286 | /* 4k above 2k. Bump snd_lim, if needed*/ | 288 | /* 4k above 2k. Bump snd_lim, if needed*/ |
287 | if (!dd->userbase) | 289 | if (!dd->userbase || dd->piovl15base) |
288 | snd_lim = offs4k + tot4k; | 290 | snd_lim = offs4k + tot4k; |
289 | } | 291 | } |
290 | } | 292 | } |
@@ -298,6 +300,15 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, | |||
298 | cnt = snd_lim - offset; | 300 | cnt = snd_lim - offset; |
299 | } | 301 | } |
300 | 302 | ||
303 | if (!map && offs4k && dd->piovl15base) { | ||
304 | snd_lim = offs4k + tot4k + 2 * dd->align4k; | ||
305 | if (offset >= (offs4k + tot4k) && offset < snd_lim) { | ||
306 | map = (u32 __iomem *)dd->piovl15base + | ||
307 | ((offset - (offs4k + tot4k)) / sizeof(u32)); | ||
308 | cnt = snd_lim - offset; | ||
309 | } | ||
310 | } | ||
311 | |||
301 | mapped: | 312 | mapped: |
302 | if (cntp) | 313 | if (cntp) |
303 | *cntp = cnt; | 314 | *cntp = cnt; |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 1eadadc13da8..a5e29dbb9537 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -1355,8 +1355,7 @@ static int qib_6120_bringup_serdes(struct qib_pportdata *ppd) | |||
1355 | hwstat = qib_read_kreg64(dd, kr_hwerrstatus); | 1355 | hwstat = qib_read_kreg64(dd, kr_hwerrstatus); |
1356 | if (hwstat) { | 1356 | if (hwstat) { |
1357 | /* should just have PLL, clear all set, in an case */ | 1357 | /* should just have PLL, clear all set, in an case */ |
1358 | if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED) | 1358 | qib_write_kreg(dd, kr_hwerrclear, hwstat); |
1359 | qib_write_kreg(dd, kr_hwerrclear, hwstat); | ||
1360 | qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr)); | 1359 | qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr)); |
1361 | } | 1360 | } |
1362 | 1361 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 503992d9c5ce..5eedf83e2c3b 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -543,7 +543,7 @@ struct vendor_txdds_ent { | |||
543 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | 543 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); |
544 | 544 | ||
545 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | 545 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ |
546 | #define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */ | 546 | #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ |
547 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ | 547 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ |
548 | 548 | ||
549 | #define H1_FORCE_VAL 8 | 549 | #define H1_FORCE_VAL 8 |
@@ -1100,9 +1100,9 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { | |||
1100 | HWE_AUTO_P(SDmaMemReadErr, 1), | 1100 | HWE_AUTO_P(SDmaMemReadErr, 1), |
1101 | HWE_AUTO_P(SDmaMemReadErr, 0), | 1101 | HWE_AUTO_P(SDmaMemReadErr, 0), |
1102 | HWE_AUTO_P(IBCBusFromSPCParityErr, 1), | 1102 | HWE_AUTO_P(IBCBusFromSPCParityErr, 1), |
1103 | HWE_AUTO_P(IBCBusToSPCParityErr, 1), | ||
1103 | HWE_AUTO_P(IBCBusFromSPCParityErr, 0), | 1104 | HWE_AUTO_P(IBCBusFromSPCParityErr, 0), |
1104 | HWE_AUTO_P(statusValidNoEop, 1), | 1105 | HWE_AUTO(statusValidNoEop), |
1105 | HWE_AUTO_P(statusValidNoEop, 0), | ||
1106 | HWE_AUTO(LATriggered), | 1106 | HWE_AUTO(LATriggered), |
1107 | { .mask = 0 } | 1107 | { .mask = 0 } |
1108 | }; | 1108 | }; |
@@ -4763,6 +4763,8 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) | |||
4763 | SYM_MASK(IBPCSConfig_0, tx_rx_reset); | 4763 | SYM_MASK(IBPCSConfig_0, tx_rx_reset); |
4764 | 4764 | ||
4765 | val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); | 4765 | val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); |
4766 | qib_write_kreg(dd, kr_hwerrmask, | ||
4767 | dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); | ||
4766 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | 4768 | qib_write_kreg_port(ppd, krp_ibcctrl_a, |
4767 | ppd->cpspec->ibcctrl_a & | 4769 | ppd->cpspec->ibcctrl_a & |
4768 | ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); | 4770 | ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); |
@@ -4772,6 +4774,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) | |||
4772 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); | 4774 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); |
4773 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | 4775 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); |
4774 | qib_write_kreg(dd, kr_scratch, 0ULL); | 4776 | qib_write_kreg(dd, kr_scratch, 0ULL); |
4777 | qib_write_kreg(dd, kr_hwerrclear, | ||
4778 | SYM_MASK(HwErrClear, statusValidNoEopClear)); | ||
4779 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
4775 | } | 4780 | } |
4776 | 4781 | ||
4777 | /* | 4782 | /* |
@@ -5624,6 +5629,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5624 | if (ppd->port != port || !ppd->link_speed_supported) | 5629 | if (ppd->port != port || !ppd->link_speed_supported) |
5625 | continue; | 5630 | continue; |
5626 | ppd->cpspec->no_eep = val; | 5631 | ppd->cpspec->no_eep = val; |
5632 | if (seth1) | ||
5633 | ppd->cpspec->h1_val = h1; | ||
5627 | /* now change the IBC and serdes, overriding generic */ | 5634 | /* now change the IBC and serdes, overriding generic */ |
5628 | init_txdds_table(ppd, 1); | 5635 | init_txdds_table(ppd, 1); |
5629 | any++; | 5636 | any++; |
@@ -6064,9 +6071,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6064 | * the "cable info" setup here. Can be overridden | 6071 | * the "cable info" setup here. Can be overridden |
6065 | * in adapter-specific routines. | 6072 | * in adapter-specific routines. |
6066 | */ | 6073 | */ |
6067 | if (!(ppd->dd->flags & QIB_HAS_QSFP)) { | 6074 | if (!(dd->flags & QIB_HAS_QSFP)) { |
6068 | if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) | 6075 | if (!IS_QMH(dd) && !IS_QME(dd)) |
6069 | qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " | 6076 | qib_devinfo(dd->pcidev, "IB%u:%u: " |
6070 | "Unknown mezzanine card type\n", | 6077 | "Unknown mezzanine card type\n", |
6071 | dd->unit, ppd->port); | 6078 | dd->unit, ppd->port); |
6072 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; | 6079 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; |
@@ -6119,9 +6126,25 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6119 | qib_set_ctxtcnt(dd); | 6126 | qib_set_ctxtcnt(dd); |
6120 | 6127 | ||
6121 | if (qib_wc_pat) { | 6128 | if (qib_wc_pat) { |
6122 | ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k); | 6129 | resource_size_t vl15off; |
6130 | /* | ||
6131 | * We do not set WC on the VL15 buffers to avoid | ||
6132 | * a rare problem with unaligned writes from | ||
6133 | * interrupt-flushed store buffers, so we need | ||
6134 | * to map those separately here. We can't solve | ||
6135 | * this for the rarely used mtrr case. | ||
6136 | */ | ||
6137 | ret = init_chip_wc_pat(dd, 0); | ||
6123 | if (ret) | 6138 | if (ret) |
6124 | goto bail; | 6139 | goto bail; |
6140 | |||
6141 | /* vl15 buffers start just after the 4k buffers */ | ||
6142 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + | ||
6143 | dd->piobcnt4k * dd->align4k; | ||
6144 | dd->piovl15base = ioremap_nocache(vl15off, | ||
6145 | NUM_VL15_BUFS * dd->align4k); | ||
6146 | if (!dd->piovl15base) | ||
6147 | goto bail; | ||
6125 | } | 6148 | } |
6126 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ | 6149 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ |
6127 | 6150 | ||
@@ -6932,6 +6955,8 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { | |||
6932 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | 6955 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ |
6933 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | 6956 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ |
6934 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ | 6957 | { 0, 0, 0, 11 }, /* QME7342 backplane settings */ |
6958 | { 0, 0, 0, 3 }, /* QMH7342 backplane settings */ | ||
6959 | { 0, 0, 0, 4 }, /* QMH7342 backplane settings */ | ||
6935 | }; | 6960 | }; |
6936 | 6961 | ||
6937 | static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | 6962 | static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { |
@@ -6947,6 +6972,8 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | |||
6947 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | 6972 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ |
6948 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | 6973 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ |
6949 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ | 6974 | { 0, 0, 0, 13 }, /* QME7342 backplane settings */ |
6975 | { 0, 0, 0, 9 }, /* QMH7342 backplane settings */ | ||
6976 | { 0, 0, 0, 10 }, /* QMH7342 backplane settings */ | ||
6950 | }; | 6977 | }; |
6951 | 6978 | ||
6952 | static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | 6979 | static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { |
@@ -6962,6 +6989,8 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | |||
6962 | { 0, 1, 12, 6 }, /* QME7342 backplane setting */ | 6989 | { 0, 1, 12, 6 }, /* QME7342 backplane setting */ |
6963 | { 0, 1, 12, 7 }, /* QME7342 backplane setting */ | 6990 | { 0, 1, 12, 7 }, /* QME7342 backplane setting */ |
6964 | { 0, 1, 12, 8 }, /* QME7342 backplane setting */ | 6991 | { 0, 1, 12, 8 }, /* QME7342 backplane setting */ |
6992 | { 0, 1, 0, 10 }, /* QMH7342 backplane settings */ | ||
6993 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ | ||
6965 | }; | 6994 | }; |
6966 | 6995 | ||
6967 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, | 6996 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 9b40f345ac3f..a873dd596e81 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -1059,7 +1059,7 @@ static int __init qlogic_ib_init(void) | |||
1059 | goto bail_dev; | 1059 | goto bail_dev; |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | qib_cq_wq = create_workqueue("qib_cq"); | 1062 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); |
1063 | if (!qib_cq_wq) { | 1063 | if (!qib_cq_wq) { |
1064 | ret = -ENOMEM; | 1064 | ret = -ENOMEM; |
1065 | goto bail_wq; | 1065 | goto bail_wq; |
@@ -1289,8 +1289,18 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1289 | 1289 | ||
1290 | if (qib_mini_init || initfail || ret) { | 1290 | if (qib_mini_init || initfail || ret) { |
1291 | qib_stop_timers(dd); | 1291 | qib_stop_timers(dd); |
1292 | flush_scheduled_work(); | ||
1292 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1293 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
1293 | dd->f_quiet_serdes(dd->pport + pidx); | 1294 | dd->f_quiet_serdes(dd->pport + pidx); |
1295 | if (qib_mini_init) | ||
1296 | goto bail; | ||
1297 | if (!j) { | ||
1298 | (void) qibfs_remove(dd); | ||
1299 | qib_device_remove(dd); | ||
1300 | } | ||
1301 | if (!ret) | ||
1302 | qib_unregister_ib_device(dd); | ||
1303 | qib_postinit_cleanup(dd); | ||
1294 | if (initfail) | 1304 | if (initfail) |
1295 | ret = initfail; | 1305 | ret = initfail; |
1296 | goto bail; | 1306 | goto bail; |
@@ -1472,6 +1482,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) | |||
1472 | dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; | 1482 | dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; |
1473 | unsigned i; | 1483 | unsigned i; |
1474 | 1484 | ||
1485 | /* clear for security and sanity on each use */ | ||
1486 | memset(rcd->rcvegrbuf[chunk], 0, size); | ||
1487 | |||
1475 | for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { | 1488 | for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { |
1476 | dd->f_put_tid(dd, e + egroff + | 1489 | dd->f_put_tid(dd, e + egroff + |
1477 | (u64 __iomem *) | 1490 | (u64 __iomem *) |
@@ -1499,6 +1512,12 @@ bail: | |||
1499 | return -ENOMEM; | 1512 | return -ENOMEM; |
1500 | } | 1513 | } |
1501 | 1514 | ||
1515 | /* | ||
1516 | * Note: Changes to this routine should be mirrored | ||
1517 | * for the diagnostics routine qib_remap_ioaddr32(). | ||
1518 | * There is also related code for VL15 buffers in qib_init_7322_variables(). | ||
1519 | * The teardown code that unmaps is in qib_pcie_ddcleanup() | ||
1520 | */ | ||
1502 | int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) | 1521 | int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) |
1503 | { | 1522 | { |
1504 | u64 __iomem *qib_kregbase = NULL; | 1523 | u64 __iomem *qib_kregbase = NULL; |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index c926bf4541df..7fa6e5592630 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -179,6 +179,8 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd) | |||
179 | iounmap(dd->piobase); | 179 | iounmap(dd->piobase); |
180 | if (dd->userbase) | 180 | if (dd->userbase) |
181 | iounmap(dd->userbase); | 181 | iounmap(dd->userbase); |
182 | if (dd->piovl15base) | ||
183 | iounmap(dd->piovl15base); | ||
182 | 184 | ||
183 | pci_disable_device(dd->pcidev); | 185 | pci_disable_device(dd->pcidev); |
184 | pci_release_regions(dd->pcidev); | 186 | pci_release_regions(dd->pcidev); |
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index f7eb1ddff5f3..af30232b6831 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c | |||
@@ -340,9 +340,13 @@ rescan: | |||
340 | if (i < dd->piobcnt2k) | 340 | if (i < dd->piobcnt2k) |
341 | buf = (u32 __iomem *)(dd->pio2kbase + | 341 | buf = (u32 __iomem *)(dd->pio2kbase + |
342 | i * dd->palign); | 342 | i * dd->palign); |
343 | else | 343 | else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) |
344 | buf = (u32 __iomem *)(dd->pio4kbase + | 344 | buf = (u32 __iomem *)(dd->pio4kbase + |
345 | (i - dd->piobcnt2k) * dd->align4k); | 345 | (i - dd->piobcnt2k) * dd->align4k); |
346 | else | ||
347 | buf = (u32 __iomem *)(dd->piovl15base + | ||
348 | (i - (dd->piobcnt2k + dd->piobcnt4k)) * | ||
349 | dd->align4k); | ||
346 | if (pbufnum) | 350 | if (pbufnum) |
347 | *pbufnum = i; | 351 | *pbufnum = i; |
348 | dd->upd_pio_shadow = 0; | 352 | dd->upd_pio_shadow = 0; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index df3eb8c9fd96..b4b22576f12a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev, | |||
1163 | 1163 | ||
1164 | return ret ? ret : count; | 1164 | return ret ? ret : count; |
1165 | } | 1165 | } |
1166 | static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); | 1166 | static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); |
1167 | 1167 | ||
1168 | static ssize_t delete_child(struct device *dev, | 1168 | static ssize_t delete_child(struct device *dev, |
1169 | struct device_attribute *attr, | 1169 | struct device_attribute *attr, |
@@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev, | |||
1183 | return ret ? ret : count; | 1183 | return ret ? ret : count; |
1184 | 1184 | ||
1185 | } | 1185 | } |
1186 | static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); | 1186 | static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); |
1187 | 1187 | ||
1188 | int ipoib_add_pkey_attr(struct net_device *dev) | 1188 | int ipoib_add_pkey_attr(struct net_device *dev) |
1189 | { | 1189 | { |