diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/infiniband/hw/cxgb3 | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/infiniband/hw/cxgb3')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/Kconfig | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/Makefile | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/cxio_hal.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/cxio_wr.h | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_cm.c | 53 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_ev.c | 17 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 87 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_user.h | 8 |
10 files changed, 106 insertions, 116 deletions
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig index 2acec3fadf69..2b6352b85485 100644 --- a/drivers/infiniband/hw/cxgb3/Kconfig +++ b/drivers/infiniband/hw/cxgb3/Kconfig | |||
@@ -10,7 +10,7 @@ config INFINIBAND_CXGB3 | |||
10 | our website at <http://www.chelsio.com>. | 10 | our website at <http://www.chelsio.com>. |
11 | 11 | ||
12 | For customer support, please visit our customer support page at | 12 | For customer support, please visit our customer support page at |
13 | <http://www.chelsio.com/support.htm>. | 13 | <http://www.chelsio.com/support.html>. |
14 | 14 | ||
15 | Please send feedback to <linux-bugs@chelsio.com>. | 15 | Please send feedback to <linux-bugs@chelsio.com>. |
16 | 16 | ||
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile index 7e7b5a66f042..621619c794e5 100644 --- a/drivers/infiniband/hw/cxgb3/Makefile +++ b/drivers/infiniband/hw/cxgb3/Makefile | |||
@@ -1,10 +1,8 @@ | |||
1 | EXTRA_CFLAGS += -Idrivers/net/cxgb3 | 1 | ccflags-y := -Idrivers/net/cxgb3 |
2 | 2 | ||
3 | obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o | 3 | obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o |
4 | 4 | ||
5 | iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \ | 5 | iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \ |
6 | iwch_provider.o iwch.o cxio_hal.o cxio_resource.o | 6 | iwch_provider.o iwch.o cxio_hal.o cxio_resource.o |
7 | 7 | ||
8 | ifdef CONFIG_INFINIBAND_CXGB3_DEBUG | 8 | ccflags-$(CONFIG_INFINIBAND_CXGB3_DEBUG) += -DDEBUG |
9 | EXTRA_CFLAGS += -DDEBUG | ||
10 | endif | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 005b7b52bc1e..c3f5aca4ef00 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -160,6 +160,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) | |||
160 | struct rdma_cq_setup setup; | 160 | struct rdma_cq_setup setup; |
161 | int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); | 161 | int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); |
162 | 162 | ||
163 | size += 1; /* one extra page for storing cq-in-err state */ | ||
163 | cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); | 164 | cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); |
164 | if (!cq->cqid) | 165 | if (!cq->cqid) |
165 | return -ENOMEM; | 166 | return -ENOMEM; |
@@ -188,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) | |||
188 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | 189 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); |
189 | } | 190 | } |
190 | 191 | ||
192 | #ifdef notyet | ||
191 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | 193 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) |
192 | { | 194 | { |
193 | struct rdma_cq_setup setup; | 195 | struct rdma_cq_setup setup; |
@@ -199,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |||
199 | setup.ovfl_mode = 1; | 201 | setup.ovfl_mode = 1; |
200 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | 202 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); |
201 | } | 203 | } |
204 | #endif | ||
202 | 205 | ||
203 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | 206 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) |
204 | { | 207 | { |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index e5ddb63e7d23..83d2e19d31ae 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h | |||
@@ -689,7 +689,7 @@ struct t3_swrq { | |||
689 | * A T3 WQ implements both the SQ and RQ. | 689 | * A T3 WQ implements both the SQ and RQ. |
690 | */ | 690 | */ |
691 | struct t3_wq { | 691 | struct t3_wq { |
692 | union t3_wr *queue; /* DMA accessable memory */ | 692 | union t3_wr *queue; /* DMA accessible memory */ |
693 | dma_addr_t dma_addr; /* DMA address for HW */ | 693 | dma_addr_t dma_addr; /* DMA address for HW */ |
694 | DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */ | 694 | DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */ |
695 | u32 error; /* 1 once we go to ERROR */ | 695 | u32 error; /* 1 once we go to ERROR */ |
@@ -728,6 +728,22 @@ struct t3_cq { | |||
728 | #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ | 728 | #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ |
729 | CQE_GENBIT(*cqe)) | 729 | CQE_GENBIT(*cqe)) |
730 | 730 | ||
731 | struct t3_cq_status_page { | ||
732 | u32 cq_err; | ||
733 | }; | ||
734 | |||
735 | static inline int cxio_cq_in_error(struct t3_cq *cq) | ||
736 | { | ||
737 | return ((struct t3_cq_status_page *) | ||
738 | &cq->queue[1 << cq->size_log2])->cq_err; | ||
739 | } | ||
740 | |||
741 | static inline void cxio_set_cq_in_error(struct t3_cq *cq) | ||
742 | { | ||
743 | ((struct t3_cq_status_page *) | ||
744 | &cq->queue[1 << cq->size_log2])->cq_err = 1; | ||
745 | } | ||
746 | |||
731 | static inline void cxio_set_wq_in_error(struct t3_wq *wq) | 747 | static inline void cxio_set_wq_in_error(struct t3_wq *wq) |
732 | { | 748 | { |
733 | wq->queue->wq_in_err.err |= 1; | 749 | wq->queue->wq_in_err.err |= 1; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 13c88871dc3b..0a5008fbebac 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -338,23 +338,12 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip, | |||
338 | __be16 peer_port, u8 tos) | 338 | __be16 peer_port, u8 tos) |
339 | { | 339 | { |
340 | struct rtable *rt; | 340 | struct rtable *rt; |
341 | struct flowi fl = { | 341 | struct flowi4 fl4; |
342 | .oif = 0, | 342 | |
343 | .nl_u = { | 343 | rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, |
344 | .ip4_u = { | 344 | peer_port, local_port, IPPROTO_TCP, |
345 | .daddr = peer_ip, | 345 | tos, 0); |
346 | .saddr = local_ip, | 346 | if (IS_ERR(rt)) |
347 | .tos = tos} | ||
348 | }, | ||
349 | .proto = IPPROTO_TCP, | ||
350 | .uli_u = { | ||
351 | .ports = { | ||
352 | .sport = local_port, | ||
353 | .dport = peer_port} | ||
354 | } | ||
355 | }; | ||
356 | |||
357 | if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) | ||
358 | return NULL; | 347 | return NULL; |
359 | return rt; | 348 | return rt; |
360 | } | 349 | } |
@@ -925,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | |||
925 | goto err; | 914 | goto err; |
926 | 915 | ||
927 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { | 916 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { |
928 | iwch_post_zb_read(ep->com.qp); | 917 | iwch_post_zb_read(ep); |
929 | } | 918 | } |
930 | 919 | ||
931 | goto out; | 920 | goto out; |
@@ -1089,37 +1078,45 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1089 | struct iwch_ep *ep = ctx; | 1078 | struct iwch_ep *ep = ctx; |
1090 | struct cpl_wr_ack *hdr = cplhdr(skb); | 1079 | struct cpl_wr_ack *hdr = cplhdr(skb); |
1091 | unsigned int credits = ntohs(hdr->credits); | 1080 | unsigned int credits = ntohs(hdr->credits); |
1081 | unsigned long flags; | ||
1082 | int post_zb = 0; | ||
1092 | 1083 | ||
1093 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); | 1084 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); |
1094 | 1085 | ||
1095 | if (credits == 0) { | 1086 | if (credits == 0) { |
1096 | PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n", | 1087 | PDBG("%s 0 credit ack ep %p state %u\n", |
1097 | __func__, ep, state_read(&ep->com)); | 1088 | __func__, ep, state_read(&ep->com)); |
1098 | return CPL_RET_BUF_DONE; | 1089 | return CPL_RET_BUF_DONE; |
1099 | } | 1090 | } |
1100 | 1091 | ||
1092 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1101 | BUG_ON(credits != 1); | 1093 | BUG_ON(credits != 1); |
1102 | dst_confirm(ep->dst); | 1094 | dst_confirm(ep->dst); |
1103 | if (!ep->mpa_skb) { | 1095 | if (!ep->mpa_skb) { |
1104 | PDBG("%s rdma_init wr_ack ep %p state %u\n", | 1096 | PDBG("%s rdma_init wr_ack ep %p state %u\n", |
1105 | __func__, ep, state_read(&ep->com)); | 1097 | __func__, ep, ep->com.state); |
1106 | if (ep->mpa_attr.initiator) { | 1098 | if (ep->mpa_attr.initiator) { |
1107 | PDBG("%s initiator ep %p state %u\n", | 1099 | PDBG("%s initiator ep %p state %u\n", |
1108 | __func__, ep, state_read(&ep->com)); | 1100 | __func__, ep, ep->com.state); |
1109 | if (peer2peer) | 1101 | if (peer2peer && ep->com.state == FPDU_MODE) |
1110 | iwch_post_zb_read(ep->com.qp); | 1102 | post_zb = 1; |
1111 | } else { | 1103 | } else { |
1112 | PDBG("%s responder ep %p state %u\n", | 1104 | PDBG("%s responder ep %p state %u\n", |
1113 | __func__, ep, state_read(&ep->com)); | 1105 | __func__, ep, ep->com.state); |
1114 | ep->com.rpl_done = 1; | 1106 | if (ep->com.state == MPA_REQ_RCVD) { |
1115 | wake_up(&ep->com.waitq); | 1107 | ep->com.rpl_done = 1; |
1108 | wake_up(&ep->com.waitq); | ||
1109 | } | ||
1116 | } | 1110 | } |
1117 | } else { | 1111 | } else { |
1118 | PDBG("%s lsm ack ep %p state %u freeing skb\n", | 1112 | PDBG("%s lsm ack ep %p state %u freeing skb\n", |
1119 | __func__, ep, state_read(&ep->com)); | 1113 | __func__, ep, ep->com.state); |
1120 | kfree_skb(ep->mpa_skb); | 1114 | kfree_skb(ep->mpa_skb); |
1121 | ep->mpa_skb = NULL; | 1115 | ep->mpa_skb = NULL; |
1122 | } | 1116 | } |
1117 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1118 | if (post_zb) | ||
1119 | iwch_post_zb_read(ep); | ||
1123 | return CPL_RET_BUF_DONE; | 1120 | return CPL_RET_BUF_DONE; |
1124 | } | 1121 | } |
1125 | 1122 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 6afc89e7572c..71e0d845da3d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c | |||
@@ -76,6 +76,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
76 | atomic_inc(&qhp->refcnt); | 76 | atomic_inc(&qhp->refcnt); |
77 | spin_unlock(&rnicp->lock); | 77 | spin_unlock(&rnicp->lock); |
78 | 78 | ||
79 | if (qhp->attr.state == IWCH_QP_STATE_RTS) { | ||
80 | attrs.next_state = IWCH_QP_STATE_TERMINATE; | ||
81 | iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, | ||
82 | &attrs, 1); | ||
83 | if (send_term) | ||
84 | iwch_post_terminate(qhp, rsp_msg); | ||
85 | } | ||
86 | |||
79 | event.event = ib_event; | 87 | event.event = ib_event; |
80 | event.device = chp->ibcq.device; | 88 | event.device = chp->ibcq.device; |
81 | if (ib_event == IB_EVENT_CQ_ERR) | 89 | if (ib_event == IB_EVENT_CQ_ERR) |
@@ -86,13 +94,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
86 | if (qhp->ibqp.event_handler) | 94 | if (qhp->ibqp.event_handler) |
87 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); | 95 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); |
88 | 96 | ||
89 | if (qhp->attr.state == IWCH_QP_STATE_RTS) { | 97 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
90 | attrs.next_state = IWCH_QP_STATE_TERMINATE; | ||
91 | iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, | ||
92 | &attrs, 1); | ||
93 | if (send_term) | ||
94 | iwch_post_terminate(qhp, rsp_msg); | ||
95 | } | ||
96 | 98 | ||
97 | if (atomic_dec_and_test(&qhp->refcnt)) | 99 | if (atomic_dec_and_test(&qhp->refcnt)) |
98 | wake_up(&qhp->wait); | 100 | wake_up(&qhp->wait); |
@@ -179,7 +181,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) | |||
179 | case TPT_ERR_BOUND: | 181 | case TPT_ERR_BOUND: |
180 | case TPT_ERR_INVALIDATE_SHARED_MR: | 182 | case TPT_ERR_INVALIDATE_SHARED_MR: |
181 | case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: | 183 | case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: |
182 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | ||
183 | post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); | 184 | post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); |
184 | break; | 185 | break; |
185 | 186 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index fca0b4b747e4..2e2741307af4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -154,6 +154,8 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
154 | struct iwch_create_cq_resp uresp; | 154 | struct iwch_create_cq_resp uresp; |
155 | struct iwch_create_cq_req ureq; | 155 | struct iwch_create_cq_req ureq; |
156 | struct iwch_ucontext *ucontext = NULL; | 156 | struct iwch_ucontext *ucontext = NULL; |
157 | static int warned; | ||
158 | size_t resplen; | ||
157 | 159 | ||
158 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); | 160 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); |
159 | rhp = to_iwch_dev(ibdev); | 161 | rhp = to_iwch_dev(ibdev); |
@@ -217,15 +219,26 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
217 | uresp.key = ucontext->key; | 219 | uresp.key = ucontext->key; |
218 | ucontext->key += PAGE_SIZE; | 220 | ucontext->key += PAGE_SIZE; |
219 | spin_unlock(&ucontext->mmap_lock); | 221 | spin_unlock(&ucontext->mmap_lock); |
220 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | 222 | mm->key = uresp.key; |
223 | mm->addr = virt_to_phys(chp->cq.queue); | ||
224 | if (udata->outlen < sizeof uresp) { | ||
225 | if (!warned++) | ||
226 | printk(KERN_WARNING MOD "Warning - " | ||
227 | "downlevel libcxgb3 (non-fatal).\n"); | ||
228 | mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * | ||
229 | sizeof(struct t3_cqe)); | ||
230 | resplen = sizeof(struct iwch_create_cq_resp_v0); | ||
231 | } else { | ||
232 | mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * | ||
233 | sizeof(struct t3_cqe)); | ||
234 | uresp.memsize = mm->len; | ||
235 | resplen = sizeof uresp; | ||
236 | } | ||
237 | if (ib_copy_to_udata(udata, &uresp, resplen)) { | ||
221 | kfree(mm); | 238 | kfree(mm); |
222 | iwch_destroy_cq(&chp->ibcq); | 239 | iwch_destroy_cq(&chp->ibcq); |
223 | return ERR_PTR(-EFAULT); | 240 | return ERR_PTR(-EFAULT); |
224 | } | 241 | } |
225 | mm->key = uresp.key; | ||
226 | mm->addr = virt_to_phys(chp->cq.queue); | ||
227 | mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * | ||
228 | sizeof (struct t3_cqe)); | ||
229 | insert_mmap(ucontext, mm); | 242 | insert_mmap(ucontext, mm); |
230 | } | 243 | } |
231 | PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", | 244 | PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", |
@@ -1414,6 +1427,7 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1414 | dev->ibdev.post_send = iwch_post_send; | 1427 | dev->ibdev.post_send = iwch_post_send; |
1415 | dev->ibdev.post_recv = iwch_post_receive; | 1428 | dev->ibdev.post_recv = iwch_post_receive; |
1416 | dev->ibdev.get_protocol_stats = iwch_get_mib; | 1429 | dev->ibdev.get_protocol_stats = iwch_get_mib; |
1430 | dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; | ||
1417 | 1431 | ||
1418 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); | 1432 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); |
1419 | if (!dev->ibdev.iwcm) | 1433 | if (!dev->ibdev.iwcm) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index a237d49bdcc9..9a342c9b220d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -332,11 +332,9 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
332 | struct ib_mw_bind *mw_bind); | 332 | struct ib_mw_bind *mw_bind); |
333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); | 334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); |
335 | int iwch_post_zb_read(struct iwch_qp *qhp); | 335 | int iwch_post_zb_read(struct iwch_ep *ep); |
336 | int iwch_register_device(struct iwch_dev *dev); | 336 | int iwch_register_device(struct iwch_dev *dev); |
337 | void iwch_unregister_device(struct iwch_dev *dev); | 337 | void iwch_unregister_device(struct iwch_dev *dev); |
338 | int iwch_quiesce_qps(struct iwch_cq *chp); | ||
339 | int iwch_resume_qps(struct iwch_cq *chp); | ||
340 | void stop_read_rep_timer(struct iwch_qp *qhp); | 338 | void stop_read_rep_timer(struct iwch_qp *qhp); |
341 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, | 339 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, |
342 | struct iwch_mr *mhp, int shift); | 340 | struct iwch_mr *mhp, int shift); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index c64d27bf2c15..ecd313f359a4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg, | |||
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
741 | int iwch_post_zb_read(struct iwch_qp *qhp) | 741 | int iwch_post_zb_read(struct iwch_ep *ep) |
742 | { | 742 | { |
743 | union t3_wr *wqe; | 743 | union t3_wr *wqe; |
744 | struct sk_buff *skb; | 744 | struct sk_buff *skb; |
@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp) | |||
761 | wqe->read.local_len = cpu_to_be32(0); | 761 | wqe->read.local_len = cpu_to_be32(0); |
762 | wqe->read.local_to = cpu_to_be64(1); | 762 | wqe->read.local_to = cpu_to_be64(1); |
763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); | 763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); |
764 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| | 764 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| |
765 | V_FW_RIWR_LEN(flit_cnt)); | 765 | V_FW_RIWR_LEN(flit_cnt)); |
766 | skb->priority = CPL_PRIORITY_DATA; | 766 | skb->priority = CPL_PRIORITY_DATA; |
767 | return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); | 767 | return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
@@ -802,14 +802,12 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) | |||
802 | /* | 802 | /* |
803 | * Assumes qhp lock is held. | 803 | * Assumes qhp lock is held. |
804 | */ | 804 | */ |
805 | static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 805 | static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, |
806 | struct iwch_cq *schp, unsigned long *flag) | ||
806 | { | 807 | { |
807 | struct iwch_cq *rchp, *schp; | ||
808 | int count; | 808 | int count; |
809 | int flushed; | 809 | int flushed; |
810 | 810 | ||
811 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | ||
812 | schp = get_chp(qhp->rhp, qhp->attr.scq); | ||
813 | 811 | ||
814 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 812 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
815 | /* take a ref on the qhp since we must release the lock */ | 813 | /* take a ref on the qhp since we must release the lock */ |
@@ -847,10 +845,23 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
847 | 845 | ||
848 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 846 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) |
849 | { | 847 | { |
850 | if (qhp->ibqp.uobject) | 848 | struct iwch_cq *rchp, *schp; |
849 | |||
850 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | ||
851 | schp = get_chp(qhp->rhp, qhp->attr.scq); | ||
852 | |||
853 | if (qhp->ibqp.uobject) { | ||
851 | cxio_set_wq_in_error(&qhp->wq); | 854 | cxio_set_wq_in_error(&qhp->wq); |
852 | else | 855 | cxio_set_cq_in_error(&rchp->cq); |
853 | __flush_qp(qhp, flag); | 856 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
857 | if (schp != rchp) { | ||
858 | cxio_set_cq_in_error(&schp->cq); | ||
859 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
860 | schp->ibcq.cq_context); | ||
861 | } | ||
862 | return; | ||
863 | } | ||
864 | __flush_qp(qhp, rchp, schp, flag); | ||
854 | } | 865 | } |
855 | 866 | ||
856 | 867 | ||
@@ -1138,59 +1149,3 @@ out: | |||
1138 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); | 1149 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); |
1139 | return ret; | 1150 | return ret; |
1140 | } | 1151 | } |
1141 | |||
1142 | static int quiesce_qp(struct iwch_qp *qhp) | ||
1143 | { | ||
1144 | spin_lock_irq(&qhp->lock); | ||
1145 | iwch_quiesce_tid(qhp->ep); | ||
1146 | qhp->flags |= QP_QUIESCED; | ||
1147 | spin_unlock_irq(&qhp->lock); | ||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | static int resume_qp(struct iwch_qp *qhp) | ||
1152 | { | ||
1153 | spin_lock_irq(&qhp->lock); | ||
1154 | iwch_resume_tid(qhp->ep); | ||
1155 | qhp->flags &= ~QP_QUIESCED; | ||
1156 | spin_unlock_irq(&qhp->lock); | ||
1157 | return 0; | ||
1158 | } | ||
1159 | |||
1160 | int iwch_quiesce_qps(struct iwch_cq *chp) | ||
1161 | { | ||
1162 | int i; | ||
1163 | struct iwch_qp *qhp; | ||
1164 | |||
1165 | for (i=0; i < T3_MAX_NUM_QP; i++) { | ||
1166 | qhp = get_qhp(chp->rhp, i); | ||
1167 | if (!qhp) | ||
1168 | continue; | ||
1169 | if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) { | ||
1170 | quiesce_qp(qhp); | ||
1171 | continue; | ||
1172 | } | ||
1173 | if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp)) | ||
1174 | quiesce_qp(qhp); | ||
1175 | } | ||
1176 | return 0; | ||
1177 | } | ||
1178 | |||
1179 | int iwch_resume_qps(struct iwch_cq *chp) | ||
1180 | { | ||
1181 | int i; | ||
1182 | struct iwch_qp *qhp; | ||
1183 | |||
1184 | for (i=0; i < T3_MAX_NUM_QP; i++) { | ||
1185 | qhp = get_qhp(chp->rhp, i); | ||
1186 | if (!qhp) | ||
1187 | continue; | ||
1188 | if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) { | ||
1189 | resume_qp(qhp); | ||
1190 | continue; | ||
1191 | } | ||
1192 | if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp)) | ||
1193 | resume_qp(qhp); | ||
1194 | } | ||
1195 | return 0; | ||
1196 | } | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/hw/cxgb3/iwch_user.h index cb7086f558c1..a277c31fcaf7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_user.h +++ b/drivers/infiniband/hw/cxgb3/iwch_user.h | |||
@@ -45,10 +45,18 @@ struct iwch_create_cq_req { | |||
45 | __u64 user_rptr_addr; | 45 | __u64 user_rptr_addr; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct iwch_create_cq_resp_v0 { | ||
49 | __u64 key; | ||
50 | __u32 cqid; | ||
51 | __u32 size_log2; | ||
52 | }; | ||
53 | |||
48 | struct iwch_create_cq_resp { | 54 | struct iwch_create_cq_resp { |
49 | __u64 key; | 55 | __u64 key; |
50 | __u32 cqid; | 56 | __u32 cqid; |
51 | __u32 size_log2; | 57 | __u32 size_log2; |
58 | __u32 memsize; | ||
59 | __u32 reserved; | ||
52 | }; | 60 | }; |
53 | 61 | ||
54 | struct iwch_create_qp_resp { | 62 | struct iwch_create_qp_resp { |