aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:18:21 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:18:21 -0400
commit972d45fb43f0f0793fa275c4a22998106760cd61 (patch)
treef80ac6698044b179bf3fb9d686bd33083033ccb5 /drivers/infiniband/hw
parent5b6b54982258c330247957a8d877b9851ac69d53 (diff)
parent8d1cc86a6278687efbab7b8c294ab01efe4d4231 (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Convert to NAPI IB: Return "maybe missed event" hint from ib_req_notify_cq() IB: Add CQ comp_vector support IB/ipath: Fix a race condition when generating ACKs IB/ipath: Fix two more spin lock problems IB/fmr_pool: Add prefix to all printks IB/srp: Set proc_name IB/srp: Add orig_dgid sysfs attribute to scsi_host IPoIB/cm: Don't crash if remote side uses one QP for both directions RDMA/cxgb3: Support for new abort logic RDMA/cxgb3: Initialize cpu_idx field in cpl_close_listserv_req message RDMA/cxgb3: Fail qp creation if the requested max_inline is too large RDMA/cxgb3: Fix TERM codes IPoIB/cm: Fix error handling in ipoib_cm_dev_open() IB/ipath: Don't corrupt pending mmap list when unmapped objects are freed IB/mthca: Work around kernel QP starvation IB/ipath: Don't put QP in timeout queue if waiting to send IB/ipath: Don't call spin_lock_irq() from interrupt context
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c16
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c69
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c14
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c68
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c64
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c52
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c55
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c55
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
25 files changed, 337 insertions, 180 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 04a9db5de881..fa58200217a1 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); 519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); 520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); 522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
523 523
524/* CM */ 524/* CM */
525extern int c2_llp_connect(struct iw_cm_id *cm_id, 525extern int c2_llp_connect(struct iw_cm_id *cm_id,
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 5175c99ee586..d2b3366786d6 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
217 return npolled; 217 return npolled;
218} 218}
219 219
220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
221{ 221{
222 struct c2_mq_shared __iomem *shared; 222 struct c2_mq_shared __iomem *shared;
223 struct c2_cq *cq; 223 struct c2_cq *cq;
224 unsigned long flags;
225 int ret = 0;
224 226
225 cq = to_c2cq(ibcq); 227 cq = to_c2cq(ibcq);
226 shared = cq->mq.peer; 228 shared = cq->mq.peer;
227 229
228 if (notify == IB_CQ_NEXT_COMP) 230 if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
229 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); 231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
230 else if (notify == IB_CQ_SOLICITED) 232 else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); 233 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
232 else 234 else
233 return -EINVAL; 235 return -EINVAL;
@@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
241 */ 243 */
242 readb(&shared->armed); 244 readb(&shared->armed);
243 245
244 return 0; 246 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
247 spin_lock_irqsave(&cq->lock, flags);
248 ret = !c2_mq_empty(&cq->mq);
249 spin_unlock_irqrestore(&cq->lock, flags);
250 }
251
252 return ret;
245} 253}
246 254
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 255static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 607c09bf764c..109166223c09 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
290 return 0; 290 return 0;
291} 291}
292 292
293static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, 293static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector,
294 struct ib_ucontext *context, 294 struct ib_ucontext *context,
295 struct ib_udata *udata) 295 struct ib_udata *udata)
296{ 296{
@@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev)
795 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 795 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
796 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); 796 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
797 dev->ibdev.phys_port_cnt = 1; 797 dev->ibdev.phys_port_cnt = 1;
798 dev->ibdev.num_comp_vectors = 1;
798 dev->ibdev.dma_device = &dev->pcidev->dev; 799 dev->ibdev.dma_device = &dev->pcidev->dev;
799 dev->ibdev.query_device = c2_query_device; 800 dev->ibdev.query_device = c2_query_device;
800 dev->ibdev.query_port = c2_query_port; 801 dev->ibdev.query_port = c2_query_port;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index f5e9aeec6f6e..76049afc7655 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
114 return -EIO; 114 return -EIO;
115 } 115 }
116 } 116 }
117
118 return 1;
117 } 119 }
120
118 return 0; 121 return 0;
119} 122}
120 123
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 90d7b8972cb4..ff7290eacefb 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -38,6 +38,7 @@
38#include "firmware_exports.h" 38#include "firmware_exports.h"
39 39
40#define T3_MAX_SGE 4 40#define T3_MAX_SGE 4
41#define T3_MAX_INLINE 64
41 42
42#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr)) 43#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
43#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \ 44#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3b4b0acd707f..b2faff5abce8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1109,6 +1109,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1109 1109
1110 PDBG("%s ep %p\n", __FUNCTION__, ep); 1110 PDBG("%s ep %p\n", __FUNCTION__, ep);
1111 1111
1112 /*
1113 * We get 2 abort replies from the HW. The first one must
1114 * be ignored except for scribbling that we need one more.
1115 */
1116 if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
1117 ep->flags |= ABORT_REQ_IN_PROGRESS;
1118 return CPL_RET_BUF_DONE;
1119 }
1120
1112 close_complete_upcall(ep); 1121 close_complete_upcall(ep);
1113 state_set(&ep->com, DEAD); 1122 state_set(&ep->com, DEAD);
1114 release_ep_resources(ep); 1123 release_ep_resources(ep);
@@ -1189,6 +1198,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
1189 } 1198 }
1190 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); 1199 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1191 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1200 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1201 req->cpu_idx = 0;
1192 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); 1202 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1193 skb->priority = 1; 1203 skb->priority = 1;
1194 ep->com.tdev->send(ep->com.tdev, skb); 1204 ep->com.tdev->send(ep->com.tdev, skb);
@@ -1475,6 +1485,15 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1475 int ret; 1485 int ret;
1476 int state; 1486 int state;
1477 1487
1488 /*
1489 * We get 2 peer aborts from the HW. The first one must
1490 * be ignored except for scribbling that we need one more.
1491 */
1492 if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
1493 ep->flags |= PEER_ABORT_IN_PROGRESS;
1494 return CPL_RET_BUF_DONE;
1495 }
1496
1478 if (is_neg_adv_abort(req->status)) { 1497 if (is_neg_adv_abort(req->status)) {
1479 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, 1498 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
1480 ep->hwtid); 1499 ep->hwtid);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 0c6f281bd4a0..21a388c313cf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -143,6 +143,11 @@ enum iwch_ep_state {
143 DEAD, 143 DEAD,
144}; 144};
145 145
146enum iwch_ep_flags {
147 PEER_ABORT_IN_PROGRESS = (1 << 0),
148 ABORT_REQ_IN_PROGRESS = (1 << 1),
149};
150
146struct iwch_ep_common { 151struct iwch_ep_common {
147 struct iw_cm_id *cm_id; 152 struct iw_cm_id *cm_id;
148 struct iwch_qp *qp; 153 struct iwch_qp *qp;
@@ -181,6 +186,7 @@ struct iwch_ep {
181 u16 plen; 186 u16 plen;
182 u32 ird; 187 u32 ird;
183 u32 ord; 188 u32 ord;
189 u32 flags;
184}; 190};
185 191
186static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) 192static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index af28a317016d..a891493fd340 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
139 return 0; 139 return 0;
140} 140}
141 141
142static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, 142static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
143 struct ib_ucontext *ib_context, 143 struct ib_ucontext *ib_context,
144 struct ib_udata *udata) 144 struct ib_udata *udata)
145{ 145{
@@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
292#endif 292#endif
293} 293}
294 294
295static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 295static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
296{ 296{
297 struct iwch_dev *rhp; 297 struct iwch_dev *rhp;
298 struct iwch_cq *chp; 298 struct iwch_cq *chp;
@@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
303 303
304 chp = to_iwch_cq(ibcq); 304 chp = to_iwch_cq(ibcq);
305 rhp = chp->rhp; 305 rhp = chp->rhp;
306 if (notify == IB_CQ_SOLICITED) 306 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
307 cq_op = CQ_ARM_SE; 307 cq_op = CQ_ARM_SE;
308 else 308 else
309 cq_op = CQ_ARM_AN; 309 cq_op = CQ_ARM_AN;
@@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
317 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 317 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
318 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 318 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
319 spin_unlock_irqrestore(&chp->lock, flag); 319 spin_unlock_irqrestore(&chp->lock, flag);
320 if (err) 320 if (err < 0)
321 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, 321 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
322 chp->cq.cqid); 322 chp->cq.cqid);
323 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
324 err = 0;
323 return err; 325 return err;
324} 326}
325 327
@@ -780,6 +782,9 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
780 if (rqsize > T3_MAX_RQ_SIZE) 782 if (rqsize > T3_MAX_RQ_SIZE)
781 return ERR_PTR(-EINVAL); 783 return ERR_PTR(-EINVAL);
782 784
785 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
786 return ERR_PTR(-EINVAL);
787
783 /* 788 /*
784 * NOTE: The SQ and total WQ sizes don't need to be 789 * NOTE: The SQ and total WQ sizes don't need to be
785 * a power of two. However, all the code assumes 790 * a power of two. However, all the code assumes
@@ -1107,6 +1112,7 @@ int iwch_register_device(struct iwch_dev *dev)
1107 dev->ibdev.node_type = RDMA_NODE_RNIC; 1112 dev->ibdev.node_type = RDMA_NODE_RNIC;
1108 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1113 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1109 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1114 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1115 dev->ibdev.num_comp_vectors = 1;
1110 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1116 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1111 dev->ibdev.query_device = iwch_query_device; 1117 dev->ibdev.query_device = iwch_query_device;
1112 dev->ibdev.query_port = iwch_query_port; 1118 dev->ibdev.query_port = iwch_query_port;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0a472c9b44db..714dddbc9a98 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -471,43 +471,62 @@ int iwch_bind_mw(struct ib_qp *qp,
471 return err; 471 return err;
472} 472}
473 473
474static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged) 474static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
475 u8 *layer_type, u8 *ecode)
475{ 476{
476 switch (t3err) { 477 int status = TPT_ERR_INTERNAL_ERR;
478 int tagged = 0;
479 int opcode = -1;
480 int rqtype = 0;
481 int send_inv = 0;
482
483 if (rsp_msg) {
484 status = CQE_STATUS(rsp_msg->cqe);
485 opcode = CQE_OPCODE(rsp_msg->cqe);
486 rqtype = RQ_TYPE(rsp_msg->cqe);
487 send_inv = (opcode == T3_SEND_WITH_INV) ||
488 (opcode == T3_SEND_WITH_SE_INV);
489 tagged = (opcode == T3_RDMA_WRITE) ||
490 (rqtype && (opcode == T3_READ_RESP));
491 }
492
493 switch (status) {
477 case TPT_ERR_STAG: 494 case TPT_ERR_STAG:
478 if (tagged == 1) { 495 if (send_inv) {
479 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 496 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
480 *ecode = DDPT_INV_STAG; 497 *ecode = RDMAP_CANT_INV_STAG;
481 } else if (tagged == 2) { 498 } else {
482 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 499 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
483 *ecode = RDMAP_INV_STAG; 500 *ecode = RDMAP_INV_STAG;
484 } 501 }
485 break; 502 break;
486 case TPT_ERR_PDID: 503 case TPT_ERR_PDID:
504 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
505 if ((opcode == T3_SEND_WITH_INV) ||
506 (opcode == T3_SEND_WITH_SE_INV))
507 *ecode = RDMAP_CANT_INV_STAG;
508 else
509 *ecode = RDMAP_STAG_NOT_ASSOC;
510 break;
487 case TPT_ERR_QPID: 511 case TPT_ERR_QPID:
512 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
513 *ecode = RDMAP_STAG_NOT_ASSOC;
514 break;
488 case TPT_ERR_ACCESS: 515 case TPT_ERR_ACCESS:
489 if (tagged == 1) { 516 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
490 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 517 *ecode = RDMAP_ACC_VIOL;
491 *ecode = DDPT_STAG_NOT_ASSOC;
492 } else if (tagged == 2) {
493 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
494 *ecode = RDMAP_STAG_NOT_ASSOC;
495 }
496 break; 518 break;
497 case TPT_ERR_WRAP: 519 case TPT_ERR_WRAP:
498 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 520 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
499 *ecode = RDMAP_TO_WRAP; 521 *ecode = RDMAP_TO_WRAP;
500 break; 522 break;
501 case TPT_ERR_BOUND: 523 case TPT_ERR_BOUND:
502 if (tagged == 1) { 524 if (tagged) {
503 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 525 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
504 *ecode = DDPT_BASE_BOUNDS; 526 *ecode = DDPT_BASE_BOUNDS;
505 } else if (tagged == 2) { 527 } else {
506 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 528 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
507 *ecode = RDMAP_BASE_BOUNDS; 529 *ecode = RDMAP_BASE_BOUNDS;
508 } else {
509 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
510 *ecode = DDPU_MSG_TOOBIG;
511 } 530 }
512 break; 531 break;
513 case TPT_ERR_INVALIDATE_SHARED_MR: 532 case TPT_ERR_INVALIDATE_SHARED_MR:
@@ -591,8 +610,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
591{ 610{
592 union t3_wr *wqe; 611 union t3_wr *wqe;
593 struct terminate_message *term; 612 struct terminate_message *term;
594 int status;
595 int tagged = 0;
596 struct sk_buff *skb; 613 struct sk_buff *skb;
597 614
598 PDBG("%s %d\n", __FUNCTION__, __LINE__); 615 PDBG("%s %d\n", __FUNCTION__, __LINE__);
@@ -610,17 +627,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
610 627
611 /* immediate data starts here. */ 628 /* immediate data starts here. */
612 term = (struct terminate_message *)wqe->send.sgl; 629 term = (struct terminate_message *)wqe->send.sgl;
613 if (rsp_msg) { 630 build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
614 status = CQE_STATUS(rsp_msg->cqe);
615 if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)
616 tagged = 1;
617 if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) ||
618 (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP))
619 tagged = 2;
620 } else {
621 status = TPT_ERR_INTERNAL_ERR;
622 }
623 build_term_codes(status, &term->layer_etype, &term->ecode, tagged);
624 build_fw_riwrh((void *)wqe, T3_WR_SEND, 631 build_fw_riwrh((void *)wqe, T3_WR_SEND,
625 T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1, 632 T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1,
626 qhp->ep->hwtid, 5); 633 qhp->ep->hwtid, 5);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index e2cdc1a16fe9..67f0670fe3b1 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
113 return ret; 113 return ret;
114} 114}
115 115
116struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, 116struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
117 struct ib_ucontext *context, 117 struct ib_ucontext *context,
118 struct ib_udata *udata) 118 struct ib_udata *udata)
119{ 119{
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 95fd59fb4528..e14b029332c8 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -123,7 +123,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
123void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); 123void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
124 124
125 125
126struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, 126struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
127 struct ib_ucontext *context, 127 struct ib_ucontext *context,
128 struct ib_udata *udata); 128 struct ib_udata *udata);
129 129
@@ -135,7 +135,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
135 135
136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); 136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
137 137
138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); 138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
139 139
140struct ib_qp *ehca_create_qp(struct ib_pd *pd, 140struct ib_qp *ehca_create_qp(struct ib_pd *pd,
141 struct ib_qp_init_attr *init_attr, 141 struct ib_qp_init_attr *init_attr,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 4700085ba834..2d370543e96d 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -313,6 +313,7 @@ int ehca_init_device(struct ehca_shca *shca)
313 313
314 shca->ib_device.node_type = RDMA_NODE_IB_CA; 314 shca->ib_device.node_type = RDMA_NODE_IB_CA;
315 shca->ib_device.phys_port_cnt = shca->num_ports; 315 shca->ib_device.phys_port_cnt = shca->num_ports;
316 shca->ib_device.num_comp_vectors = 1;
316 shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; 317 shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
317 shca->ib_device.query_device = ehca_query_device; 318 shca->ib_device.query_device = ehca_query_device;
318 shca->ib_device.query_port = ehca_query_port; 319 shca->ib_device.query_port = ehca_query_port;
@@ -375,7 +376,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
375 return -EPERM; 376 return -EPERM;
376 } 377 }
377 378
378 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); 379 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
379 if (IS_ERR(ibcq)) { 380 if (IS_ERR(ibcq)) {
380 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); 381 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
381 return PTR_ERR(ibcq); 382 return PTR_ERR(ibcq);
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 08d3f892d9f3..caec9dee09e1 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -634,11 +634,13 @@ poll_cq_exit0:
634 return ret; 634 return ret;
635} 635}
636 636
637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) 637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
638{ 638{
639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
640 unsigned long spl_flags;
641 int ret = 0;
640 642
641 switch (cq_notify) { 643 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
642 case IB_CQ_SOLICITED: 644 case IB_CQ_SOLICITED:
643 hipz_set_cqx_n0(my_cq, 1); 645 hipz_set_cqx_n0(my_cq, 1);
644 break; 646 break;
@@ -649,5 +651,11 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
649 return -EINVAL; 651 return -EINVAL;
650 } 652 }
651 653
652 return 0; 654 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
655 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
656 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
657 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
658 }
659
660 return ret;
653} 661}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 8199c45768a3..57f141a36bce 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -140,6 +140,14 @@ static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
140 return cqe; 140 return cqe;
141} 141}
142 142
143static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
144{
145 struct ehca_cqe *cqe = ipz_qeit_get(queue);
146 u32 cqe_flags = cqe->cqe_flags;
147
148 return cqe_flags >> 7 == (queue->toggle_state & 1);
149}
150
143/* 151/*
144 * returns and resets Queue Entry iterator 152 * returns and resets Queue Entry iterator
145 * returns address (kv) of first Queue Entry 153 * returns address (kv) of first Queue Entry
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index ea78e6dddc90..3e9241badba0 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -204,7 +204,7 @@ static void send_complete(unsigned long data)
204 * 204 *
205 * Called by ib_create_cq() in the generic verbs code. 205 * Called by ib_create_cq() in the generic verbs code.
206 */ 206 */
207struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, 207struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
208 struct ib_ucontext *context, 208 struct ib_ucontext *context,
209 struct ib_udata *udata) 209 struct ib_udata *udata)
210{ 210{
@@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
243 * See ipath_mmap() for details. 243 * See ipath_mmap() for details.
244 */ 244 */
245 if (udata && udata->outlen >= sizeof(__u64)) { 245 if (udata && udata->outlen >= sizeof(__u64)) {
246 struct ipath_mmap_info *ip;
247 __u64 offset = (__u64) wc;
248 int err; 246 int err;
247 u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
249 248
250 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 249 cq->ip = ipath_create_mmap_info(dev, s, context, wc);
251 if (err) { 250 if (!cq->ip) {
252 ret = ERR_PTR(err); 251 ret = ERR_PTR(-ENOMEM);
253 goto bail_wc; 252 goto bail_wc;
254 } 253 }
255 254
256 /* Allocate info for ipath_mmap(). */ 255 err = ib_copy_to_udata(udata, &cq->ip->offset,
257 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 256 sizeof(cq->ip->offset));
258 if (!ip) { 257 if (err) {
259 ret = ERR_PTR(-ENOMEM); 258 ret = ERR_PTR(err);
260 goto bail_wc; 259 goto bail_ip;
261 } 260 }
262 cq->ip = ip;
263 ip->context = context;
264 ip->obj = wc;
265 kref_init(&ip->ref);
266 ip->mmap_cnt = 0;
267 ip->size = PAGE_ALIGN(sizeof(*wc) +
268 sizeof(struct ib_wc) * entries);
269 spin_lock_irq(&dev->pending_lock);
270 ip->next = dev->pending_mmaps;
271 dev->pending_mmaps = ip;
272 spin_unlock_irq(&dev->pending_lock);
273 } else 261 } else
274 cq->ip = NULL; 262 cq->ip = NULL;
275 263
@@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
277 if (dev->n_cqs_allocated == ib_ipath_max_cqs) { 265 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
278 spin_unlock(&dev->n_cqs_lock); 266 spin_unlock(&dev->n_cqs_lock);
279 ret = ERR_PTR(-ENOMEM); 267 ret = ERR_PTR(-ENOMEM);
280 goto bail_wc; 268 goto bail_ip;
281 } 269 }
282 270
283 dev->n_cqs_allocated++; 271 dev->n_cqs_allocated++;
284 spin_unlock(&dev->n_cqs_lock); 272 spin_unlock(&dev->n_cqs_lock);
285 273
274 if (cq->ip) {
275 spin_lock_irq(&dev->pending_lock);
276 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
277 spin_unlock_irq(&dev->pending_lock);
278 }
279
286 /* 280 /*
287 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. 281 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
288 * The number of entries should be >= the number requested or return 282 * The number of entries should be >= the number requested or return
@@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
301 295
302 goto done; 296 goto done;
303 297
298bail_ip:
299 kfree(cq->ip);
304bail_wc: 300bail_wc:
305 vfree(wc); 301 vfree(wc);
306
307bail_cq: 302bail_cq:
308 kfree(cq); 303 kfree(cq);
309
310done: 304done:
311 return ret; 305 return ret;
312} 306}
@@ -340,17 +334,18 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
340/** 334/**
341 * ipath_req_notify_cq - change the notification type for a completion queue 335 * ipath_req_notify_cq - change the notification type for a completion queue
342 * @ibcq: the completion queue 336 * @ibcq: the completion queue
343 * @notify: the type of notification to request 337 * @notify_flags: the type of notification to request
344 * 338 *
345 * Returns 0 for success. 339 * Returns 0 for success.
346 * 340 *
347 * This may be called from interrupt context. Also called by 341 * This may be called from interrupt context. Also called by
348 * ib_req_notify_cq() in the generic verbs code. 342 * ib_req_notify_cq() in the generic verbs code.
349 */ 343 */
350int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 344int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
351{ 345{
352 struct ipath_cq *cq = to_icq(ibcq); 346 struct ipath_cq *cq = to_icq(ibcq);
353 unsigned long flags; 347 unsigned long flags;
348 int ret = 0;
354 349
355 spin_lock_irqsave(&cq->lock, flags); 350 spin_lock_irqsave(&cq->lock, flags);
356 /* 351 /*
@@ -358,9 +353,15 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
358 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). 353 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
359 */ 354 */
360 if (cq->notify != IB_CQ_NEXT_COMP) 355 if (cq->notify != IB_CQ_NEXT_COMP)
361 cq->notify = notify; 356 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
357
358 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
359 cq->queue->head != cq->queue->tail)
360 ret = 1;
361
362 spin_unlock_irqrestore(&cq->lock, flags); 362 spin_unlock_irqrestore(&cq->lock, flags);
363 return 0; 363
364 return ret;
364} 365}
365 366
366/** 367/**
@@ -443,13 +444,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
443 if (cq->ip) { 444 if (cq->ip) {
444 struct ipath_ibdev *dev = to_idev(ibcq->device); 445 struct ipath_ibdev *dev = to_idev(ibcq->device);
445 struct ipath_mmap_info *ip = cq->ip; 446 struct ipath_mmap_info *ip = cq->ip;
447 u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
446 448
447 ip->obj = wc; 449 ipath_update_mmap_info(dev, ip, s, wc);
448 ip->size = PAGE_ALIGN(sizeof(*wc) +
449 sizeof(struct ib_wc) * cqe);
450 spin_lock_irq(&dev->pending_lock); 450 spin_lock_irq(&dev->pending_lock);
451 ip->next = dev->pending_mmaps; 451 if (list_empty(&ip->pending_mmaps))
452 dev->pending_mmaps = ip; 452 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
453 spin_unlock_irq(&dev->pending_lock); 453 spin_unlock_irq(&dev->pending_lock);
454 } 454 }
455 455
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index a82157db4689..937bc3396b53 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref)
46{ 46{
47 struct ipath_mmap_info *ip = 47 struct ipath_mmap_info *ip =
48 container_of(ref, struct ipath_mmap_info, ref); 48 container_of(ref, struct ipath_mmap_info, ref);
49 struct ipath_ibdev *dev = to_idev(ip->context->device);
50
51 spin_lock_irq(&dev->pending_lock);
52 list_del(&ip->pending_mmaps);
53 spin_unlock_irq(&dev->pending_lock);
49 54
50 vfree(ip->obj); 55 vfree(ip->obj);
51 kfree(ip); 56 kfree(ip);
@@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma)
60 struct ipath_mmap_info *ip = vma->vm_private_data; 65 struct ipath_mmap_info *ip = vma->vm_private_data;
61 66
62 kref_get(&ip->ref); 67 kref_get(&ip->ref);
63 ip->mmap_cnt++;
64} 68}
65 69
66static void ipath_vma_close(struct vm_area_struct *vma) 70static void ipath_vma_close(struct vm_area_struct *vma)
67{ 71{
68 struct ipath_mmap_info *ip = vma->vm_private_data; 72 struct ipath_mmap_info *ip = vma->vm_private_data;
69 73
70 ip->mmap_cnt--;
71 kref_put(&ip->ref, ipath_release_mmap_info); 74 kref_put(&ip->ref, ipath_release_mmap_info);
72} 75}
73 76
@@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
87 struct ipath_ibdev *dev = to_idev(context->device); 90 struct ipath_ibdev *dev = to_idev(context->device);
88 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 91 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
89 unsigned long size = vma->vm_end - vma->vm_start; 92 unsigned long size = vma->vm_end - vma->vm_start;
90 struct ipath_mmap_info *ip, **pp; 93 struct ipath_mmap_info *ip, *pp;
91 int ret = -EINVAL; 94 int ret = -EINVAL;
92 95
93 /* 96 /*
@@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
96 * CQ, QP, or SRQ is soon followed by a call to mmap(). 99 * CQ, QP, or SRQ is soon followed by a call to mmap().
97 */ 100 */
98 spin_lock_irq(&dev->pending_lock); 101 spin_lock_irq(&dev->pending_lock);
99 for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { 102 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
103 pending_mmaps) {
100 /* Only the creator is allowed to mmap the object */ 104 /* Only the creator is allowed to mmap the object */
101 if (context != ip->context || (void *) offset != ip->obj) 105 if (context != ip->context || (__u64) offset != ip->offset)
102 continue; 106 continue;
103 /* Don't allow a mmap larger than the object. */ 107 /* Don't allow a mmap larger than the object. */
104 if (size > ip->size) 108 if (size > ip->size)
105 break; 109 break;
106 110
107 *pp = ip->next; 111 list_del_init(&ip->pending_mmaps);
108 spin_unlock_irq(&dev->pending_lock); 112 spin_unlock_irq(&dev->pending_lock);
109 113
110 ret = remap_vmalloc_range(vma, ip->obj, 0); 114 ret = remap_vmalloc_range(vma, ip->obj, 0);
@@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
119done: 123done:
120 return ret; 124 return ret;
121} 125}
126
127/*
128 * Allocate information for ipath_mmap
129 */
130struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
131 u32 size,
132 struct ib_ucontext *context,
133 void *obj) {
134 struct ipath_mmap_info *ip;
135
136 ip = kmalloc(sizeof *ip, GFP_KERNEL);
137 if (!ip)
138 goto bail;
139
140 size = PAGE_ALIGN(size);
141
142 spin_lock_irq(&dev->mmap_offset_lock);
143 if (dev->mmap_offset == 0)
144 dev->mmap_offset = PAGE_SIZE;
145 ip->offset = dev->mmap_offset;
146 dev->mmap_offset += size;
147 spin_unlock_irq(&dev->mmap_offset_lock);
148
149 INIT_LIST_HEAD(&ip->pending_mmaps);
150 ip->size = size;
151 ip->context = context;
152 ip->obj = obj;
153 kref_init(&ip->ref);
154
155bail:
156 return ip;
157}
158
159void ipath_update_mmap_info(struct ipath_ibdev *dev,
160 struct ipath_mmap_info *ip,
161 u32 size, void *obj) {
162 size = PAGE_ALIGN(size);
163
164 spin_lock_irq(&dev->mmap_offset_lock);
165 if (dev->mmap_offset == 0)
166 dev->mmap_offset = PAGE_SIZE;
167 ip->offset = dev->mmap_offset;
168 dev->mmap_offset += size;
169 spin_unlock_irq(&dev->mmap_offset_lock);
170
171 ip->size = size;
172 ip->obj = obj;
173}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 16db9ac0b402..bfef08ecd342 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
844 * See ipath_mmap() for details. 844 * See ipath_mmap() for details.
845 */ 845 */
846 if (udata && udata->outlen >= sizeof(__u64)) { 846 if (udata && udata->outlen >= sizeof(__u64)) {
847 struct ipath_mmap_info *ip;
848 __u64 offset = (__u64) qp->r_rq.wq;
849 int err; 847 int err;
850 848
851 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 849 if (!qp->r_rq.wq) {
852 if (err) { 850 __u64 offset = 0;
853 ret = ERR_PTR(err);
854 goto bail_rwq;
855 }
856 851
857 if (qp->r_rq.wq) { 852 err = ib_copy_to_udata(udata, &offset,
858 /* Allocate info for ipath_mmap(). */ 853 sizeof(offset));
859 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 854 if (err) {
860 if (!ip) { 855 ret = ERR_PTR(err);
856 goto bail_rwq;
857 }
858 } else {
859 u32 s = sizeof(struct ipath_rwq) +
860 qp->r_rq.size * sz;
861
862 qp->ip =
863 ipath_create_mmap_info(dev, s,
864 ibpd->uobject->context,
865 qp->r_rq.wq);
866 if (!qp->ip) {
861 ret = ERR_PTR(-ENOMEM); 867 ret = ERR_PTR(-ENOMEM);
862 goto bail_rwq; 868 goto bail_rwq;
863 } 869 }
864 qp->ip = ip; 870
865 ip->context = ibpd->uobject->context; 871 err = ib_copy_to_udata(udata, &(qp->ip->offset),
866 ip->obj = qp->r_rq.wq; 872 sizeof(qp->ip->offset));
867 kref_init(&ip->ref); 873 if (err) {
868 ip->mmap_cnt = 0; 874 ret = ERR_PTR(err);
869 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + 875 goto bail_ip;
870 qp->r_rq.size * sz); 876 }
871 spin_lock_irq(&dev->pending_lock);
872 ip->next = dev->pending_mmaps;
873 dev->pending_mmaps = ip;
874 spin_unlock_irq(&dev->pending_lock);
875 } 877 }
876 } 878 }
877 879
@@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
885 dev->n_qps_allocated++; 887 dev->n_qps_allocated++;
886 spin_unlock(&dev->n_qps_lock); 888 spin_unlock(&dev->n_qps_lock);
887 889
890 if (qp->ip) {
891 spin_lock_irq(&dev->pending_lock);
892 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
893 spin_unlock_irq(&dev->pending_lock);
894 }
895
888 ret = &qp->ibqp; 896 ret = &qp->ibqp;
889 goto bail; 897 goto bail;
890 898
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index b4b88d0b53f5..1915771fd038 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
98 case OP(RDMA_READ_RESPONSE_LAST): 98 case OP(RDMA_READ_RESPONSE_LAST):
99 case OP(RDMA_READ_RESPONSE_ONLY): 99 case OP(RDMA_READ_RESPONSE_ONLY):
100 case OP(ATOMIC_ACKNOWLEDGE): 100 case OP(ATOMIC_ACKNOWLEDGE):
101 qp->s_ack_state = OP(ACKNOWLEDGE); 101 /*
102 * We can increment the tail pointer now that the last
103 * response has been sent instead of only being
104 * constructed.
105 */
106 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
107 qp->s_tail_ack_queue = 0;
102 /* FALLTHROUGH */ 108 /* FALLTHROUGH */
109 case OP(SEND_ONLY):
103 case OP(ACKNOWLEDGE): 110 case OP(ACKNOWLEDGE):
104 /* Check for no next entry in the queue. */ 111 /* Check for no next entry in the queue. */
105 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 112 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
106 if (qp->s_flags & IPATH_S_ACK_PENDING) 113 if (qp->s_flags & IPATH_S_ACK_PENDING)
107 goto normal; 114 goto normal;
115 qp->s_ack_state = OP(ACKNOWLEDGE);
108 goto bail; 116 goto bail;
109 } 117 }
110 118
@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
117 if (len > pmtu) { 125 if (len > pmtu) {
118 len = pmtu; 126 len = pmtu;
119 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
120 } else { 128 } else
121 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
122 if (++qp->s_tail_ack_queue >
123 IPATH_MAX_RDMA_ATOMIC)
124 qp->s_tail_ack_queue = 0;
125 }
126 ohdr->u.aeth = ipath_compute_aeth(qp); 130 ohdr->u.aeth = ipath_compute_aeth(qp);
127 hwords++; 131 hwords++;
128 qp->s_ack_rdma_psn = e->psn; 132 qp->s_ack_rdma_psn = e->psn;
@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
139 cpu_to_be32(e->atomic_data); 143 cpu_to_be32(e->atomic_data);
140 hwords += sizeof(ohdr->u.at) / sizeof(u32); 144 hwords += sizeof(ohdr->u.at) / sizeof(u32);
141 bth2 = e->psn; 145 bth2 = e->psn;
142 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
143 qp->s_tail_ack_queue = 0;
144 } 146 }
145 bth0 = qp->s_ack_state << 24; 147 bth0 = qp->s_ack_state << 24;
146 break; 148 break;
@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
156 ohdr->u.aeth = ipath_compute_aeth(qp); 158 ohdr->u.aeth = ipath_compute_aeth(qp);
157 hwords++; 159 hwords++;
158 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 160 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
159 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
160 qp->s_tail_ack_queue = 0;
161 } 161 }
162 bth0 = qp->s_ack_state << 24; 162 bth0 = qp->s_ack_state << 24;
163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; 163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
171 * the ACK before setting s_ack_state to ACKNOWLEDGE 171 * the ACK before setting s_ack_state to ACKNOWLEDGE
172 * (see above). 172 * (see above).
173 */ 173 */
174 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 174 qp->s_ack_state = OP(SEND_ONLY);
175 qp->s_flags &= ~IPATH_S_ACK_PENDING; 175 qp->s_flags &= ~IPATH_S_ACK_PENDING;
176 qp->s_cur_sge = NULL; 176 qp->s_cur_sge = NULL;
177 if (qp->s_nak_state) 177 if (qp->s_nak_state)
@@ -223,23 +223,18 @@ int ipath_make_rc_req(struct ipath_qp *qp,
223 /* Sending responses has higher priority over sending requests. */ 223 /* Sending responses has higher priority over sending requests. */
224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || 224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
225 (qp->s_flags & IPATH_S_ACK_PENDING) || 225 (qp->s_flags & IPATH_S_ACK_PENDING) ||
226 qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && 226 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) 227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
228 goto done; 228 goto done;
229 229
230 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || 230 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
231 qp->s_rnr_timeout) 231 qp->s_rnr_timeout || qp->s_wait_credit)
232 goto bail; 232 goto bail;
233 233
234 /* Limit the number of packets sent without an ACK. */ 234 /* Limit the number of packets sent without an ACK. */
235 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { 235 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
236 qp->s_wait_credit = 1; 236 qp->s_wait_credit = 1;
237 dev->n_rc_stalls++; 237 dev->n_rc_stalls++;
238 spin_lock(&dev->pending_lock);
239 if (list_empty(&qp->timerwait))
240 list_add_tail(&qp->timerwait,
241 &dev->pending[dev->pending_index]);
242 spin_unlock(&dev->pending_lock);
243 goto bail; 238 goto bail;
244 } 239 }
245 240
@@ -587,9 +582,12 @@ static void send_rc_ack(struct ipath_qp *qp)
587 u32 hwords; 582 u32 hwords;
588 struct ipath_ib_header hdr; 583 struct ipath_ib_header hdr;
589 struct ipath_other_headers *ohdr; 584 struct ipath_other_headers *ohdr;
585 unsigned long flags;
590 586
591 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 587 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
592 if (qp->r_head_ack_queue != qp->s_tail_ack_queue) 588 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
589 (qp->s_flags & IPATH_S_ACK_PENDING) ||
590 qp->s_ack_state != OP(ACKNOWLEDGE))
593 goto queue_ack; 591 goto queue_ack;
594 592
595 /* Construct the header. */ 593 /* Construct the header. */
@@ -640,11 +638,11 @@ static void send_rc_ack(struct ipath_qp *qp)
640 dev->n_rc_qacks++; 638 dev->n_rc_qacks++;
641 639
642queue_ack: 640queue_ack:
643 spin_lock_irq(&qp->s_lock); 641 spin_lock_irqsave(&qp->s_lock, flags);
644 qp->s_flags |= IPATH_S_ACK_PENDING; 642 qp->s_flags |= IPATH_S_ACK_PENDING;
645 qp->s_nak_state = qp->r_nak_state; 643 qp->s_nak_state = qp->r_nak_state;
646 qp->s_ack_psn = qp->r_ack_psn; 644 qp->s_ack_psn = qp->r_ack_psn;
647 spin_unlock_irq(&qp->s_lock); 645 spin_unlock_irqrestore(&qp->s_lock, flags);
648 646
649 /* Call ipath_do_rc_send() in another thread. */ 647 /* Call ipath_do_rc_send() in another thread. */
650 tasklet_hi_schedule(&qp->s_task); 648 tasklet_hi_schedule(&qp->s_task);
@@ -1261,6 +1259,7 @@ ack_err:
1261 wc.dlid_path_bits = 0; 1259 wc.dlid_path_bits = 0;
1262 wc.port_num = 0; 1260 wc.port_num = 0;
1263 ipath_sqerror_qp(qp, &wc); 1261 ipath_sqerror_qp(qp, &wc);
1262 spin_unlock_irqrestore(&qp->s_lock, flags);
1264bail: 1263bail:
1265 return; 1264 return;
1266} 1265}
@@ -1294,6 +1293,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1294 struct ipath_ack_entry *e; 1293 struct ipath_ack_entry *e;
1295 u8 i, prev; 1294 u8 i, prev;
1296 int old_req; 1295 int old_req;
1296 unsigned long flags;
1297 1297
1298 if (diff > 0) { 1298 if (diff > 0) {
1299 /* 1299 /*
@@ -1327,7 +1327,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1327 psn &= IPATH_PSN_MASK; 1327 psn &= IPATH_PSN_MASK;
1328 e = NULL; 1328 e = NULL;
1329 old_req = 1; 1329 old_req = 1;
1330 spin_lock_irq(&qp->s_lock); 1330 spin_lock_irqsave(&qp->s_lock, flags);
1331 for (i = qp->r_head_ack_queue; ; i = prev) { 1331 for (i = qp->r_head_ack_queue; ; i = prev) {
1332 if (i == qp->s_tail_ack_queue) 1332 if (i == qp->s_tail_ack_queue)
1333 old_req = 0; 1333 old_req = 0;
@@ -1425,7 +1425,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1425 * after all the previous RDMA reads and atomics. 1425 * after all the previous RDMA reads and atomics.
1426 */ 1426 */
1427 if (i == qp->r_head_ack_queue) { 1427 if (i == qp->r_head_ack_queue) {
1428 spin_unlock_irq(&qp->s_lock); 1428 spin_unlock_irqrestore(&qp->s_lock, flags);
1429 qp->r_nak_state = 0; 1429 qp->r_nak_state = 0;
1430 qp->r_ack_psn = qp->r_psn - 1; 1430 qp->r_ack_psn = qp->r_psn - 1;
1431 goto send_ack; 1431 goto send_ack;
@@ -1439,11 +1439,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1439 break; 1439 break;
1440 } 1440 }
1441 qp->r_nak_state = 0; 1441 qp->r_nak_state = 0;
1442 spin_unlock_irq(&qp->s_lock);
1443 tasklet_hi_schedule(&qp->s_task); 1442 tasklet_hi_schedule(&qp->s_task);
1444 1443
1445unlock_done: 1444unlock_done:
1446 spin_unlock_irq(&qp->s_lock); 1445 spin_unlock_irqrestore(&qp->s_lock, flags);
1447done: 1446done:
1448 return 1; 1447 return 1;
1449 1448
@@ -1453,10 +1452,12 @@ send_ack:
1453 1452
1454static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1453static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1455{ 1454{
1456 spin_lock_irq(&qp->s_lock); 1455 unsigned long flags;
1456
1457 spin_lock_irqsave(&qp->s_lock, flags);
1457 qp->state = IB_QPS_ERR; 1458 qp->state = IB_QPS_ERR;
1458 ipath_error_qp(qp, err); 1459 ipath_error_qp(qp, err);
1459 spin_unlock_irq(&qp->s_lock); 1460 spin_unlock_irqrestore(&qp->s_lock, flags);
1460} 1461}
1461 1462
1462/** 1463/**
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 94033503400c..03acae66ba81 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
139 * See ipath_mmap() for details. 139 * See ipath_mmap() for details.
140 */ 140 */
141 if (udata && udata->outlen >= sizeof(__u64)) { 141 if (udata && udata->outlen >= sizeof(__u64)) {
142 struct ipath_mmap_info *ip;
143 __u64 offset = (__u64) srq->rq.wq;
144 int err; 142 int err;
143 u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
145 144
146 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 145 srq->ip =
147 if (err) { 146 ipath_create_mmap_info(dev, s,
148 ret = ERR_PTR(err); 147 ibpd->uobject->context,
148 srq->rq.wq);
149 if (!srq->ip) {
150 ret = ERR_PTR(-ENOMEM);
149 goto bail_wq; 151 goto bail_wq;
150 } 152 }
151 153
152 /* Allocate info for ipath_mmap(). */ 154 err = ib_copy_to_udata(udata, &srq->ip->offset,
153 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 155 sizeof(srq->ip->offset));
154 if (!ip) { 156 if (err) {
155 ret = ERR_PTR(-ENOMEM); 157 ret = ERR_PTR(err);
156 goto bail_wq; 158 goto bail_ip;
157 } 159 }
158 srq->ip = ip;
159 ip->context = ibpd->uobject->context;
160 ip->obj = srq->rq.wq;
161 kref_init(&ip->ref);
162 ip->mmap_cnt = 0;
163 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
164 srq->rq.size * sz);
165 spin_lock_irq(&dev->pending_lock);
166 ip->next = dev->pending_mmaps;
167 dev->pending_mmaps = ip;
168 spin_unlock_irq(&dev->pending_lock);
169 } else 160 } else
170 srq->ip = NULL; 161 srq->ip = NULL;
171 162
@@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
181 if (dev->n_srqs_allocated == ib_ipath_max_srqs) { 172 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
182 spin_unlock(&dev->n_srqs_lock); 173 spin_unlock(&dev->n_srqs_lock);
183 ret = ERR_PTR(-ENOMEM); 174 ret = ERR_PTR(-ENOMEM);
184 goto bail_wq; 175 goto bail_ip;
185 } 176 }
186 177
187 dev->n_srqs_allocated++; 178 dev->n_srqs_allocated++;
188 spin_unlock(&dev->n_srqs_lock); 179 spin_unlock(&dev->n_srqs_lock);
189 180
181 if (srq->ip) {
182 spin_lock_irq(&dev->pending_lock);
183 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
184 spin_unlock_irq(&dev->pending_lock);
185 }
186
190 ret = &srq->ibsrq; 187 ret = &srq->ibsrq;
191 goto done; 188 goto done;
192 189
190bail_ip:
191 kfree(srq->ip);
193bail_wq: 192bail_wq:
194 vfree(srq->rq.wq); 193 vfree(srq->rq.wq);
195
196bail_srq: 194bail_srq:
197 kfree(srq); 195 kfree(srq);
198
199done: 196done:
200 return ret; 197 return ret;
201} 198}
@@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
312 if (srq->ip) { 309 if (srq->ip) {
313 struct ipath_mmap_info *ip = srq->ip; 310 struct ipath_mmap_info *ip = srq->ip;
314 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); 311 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
312 u32 s = sizeof(struct ipath_rwq) + size * sz;
315 313
316 ip->obj = wq; 314 ipath_update_mmap_info(dev, ip, s, wq);
317 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
318 size * sz);
319 spin_lock_irq(&dev->pending_lock); 315 spin_lock_irq(&dev->pending_lock);
320 ip->next = dev->pending_mmaps; 316 if (list_empty(&ip->pending_mmaps))
321 dev->pending_mmaps = ip; 317 list_add(&ip->pending_mmaps,
318 &dev->pending_mmaps);
322 spin_unlock_irq(&dev->pending_lock); 319 spin_unlock_irq(&dev->pending_lock);
323 } 320 }
324 } else if (attr_mask & IB_SRQ_LIMIT) { 321 } else if (attr_mask & IB_SRQ_LIMIT) {
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 18c6df2052c2..12933e77c7e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1476 ret = -ENOMEM; 1476 ret = -ENOMEM;
1477 goto err_lk; 1477 goto err_lk;
1478 } 1478 }
1479 INIT_LIST_HEAD(&idev->pending_mmaps);
1479 spin_lock_init(&idev->pending_lock); 1480 spin_lock_init(&idev->pending_lock);
1481 idev->mmap_offset = PAGE_SIZE;
1482 spin_lock_init(&idev->mmap_offset_lock);
1480 INIT_LIST_HEAD(&idev->pending[0]); 1483 INIT_LIST_HEAD(&idev->pending[0]);
1481 INIT_LIST_HEAD(&idev->pending[1]); 1484 INIT_LIST_HEAD(&idev->pending[1]);
1482 INIT_LIST_HEAD(&idev->pending[2]); 1485 INIT_LIST_HEAD(&idev->pending[2]);
@@ -1558,6 +1561,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1558 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 1561 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1559 dev->node_type = RDMA_NODE_IB_CA; 1562 dev->node_type = RDMA_NODE_IB_CA;
1560 dev->phys_port_cnt = 1; 1563 dev->phys_port_cnt = 1;
1564 dev->num_comp_vectors = 1;
1561 dev->dma_device = &dd->pcidev->dev; 1565 dev->dma_device = &dd->pcidev->dev;
1562 dev->query_device = ipath_query_device; 1566 dev->query_device = ipath_query_device;
1563 dev->modify_device = ipath_modify_device; 1567 dev->modify_device = ipath_modify_device;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 7c4929f1cb5b..7064fc222727 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -173,12 +173,12 @@ struct ipath_ah {
173 * this as its vm_private_data. 173 * this as its vm_private_data.
174 */ 174 */
175struct ipath_mmap_info { 175struct ipath_mmap_info {
176 struct ipath_mmap_info *next; 176 struct list_head pending_mmaps;
177 struct ib_ucontext *context; 177 struct ib_ucontext *context;
178 void *obj; 178 void *obj;
179 __u64 offset;
179 struct kref ref; 180 struct kref ref;
180 unsigned size; 181 unsigned size;
181 unsigned mmap_cnt;
182}; 182};
183 183
184/* 184/*
@@ -422,7 +422,7 @@ struct ipath_qp {
422#define IPATH_S_RDMAR_PENDING 0x04 422#define IPATH_S_RDMAR_PENDING 0x04
423#define IPATH_S_ACK_PENDING 0x08 423#define IPATH_S_ACK_PENDING 0x08
424 424
425#define IPATH_PSN_CREDIT 2048 425#define IPATH_PSN_CREDIT 512
426 426
427/* 427/*
428 * Since struct ipath_swqe is not a fixed size, we can't simply index into 428 * Since struct ipath_swqe is not a fixed size, we can't simply index into
@@ -485,9 +485,10 @@ struct ipath_opcode_stats {
485 485
486struct ipath_ibdev { 486struct ipath_ibdev {
487 struct ib_device ibdev; 487 struct ib_device ibdev;
488 struct list_head dev_list;
489 struct ipath_devdata *dd; 488 struct ipath_devdata *dd;
490 struct ipath_mmap_info *pending_mmaps; 489 struct list_head pending_mmaps;
490 spinlock_t mmap_offset_lock;
491 u32 mmap_offset;
491 int ib_unit; /* This is the device number */ 492 int ib_unit; /* This is the device number */
492 u16 sm_lid; /* in host order */ 493 u16 sm_lid; /* in host order */
493 u8 sm_sl; 494 u8 sm_sl;
@@ -734,13 +735,13 @@ int ipath_destroy_srq(struct ib_srq *ibsrq);
734 735
735int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 736int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
736 737
737struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, 738struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
738 struct ib_ucontext *context, 739 struct ib_ucontext *context,
739 struct ib_udata *udata); 740 struct ib_udata *udata);
740 741
741int ipath_destroy_cq(struct ib_cq *ibcq); 742int ipath_destroy_cq(struct ib_cq *ibcq);
742 743
743int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); 744int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
744 745
745int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 746int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
746 747
@@ -768,6 +769,15 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
768 769
769void ipath_release_mmap_info(struct kref *ref); 770void ipath_release_mmap_info(struct kref *ref);
770 771
772struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
773 u32 size,
774 struct ib_ucontext *context,
775 void *obj);
776
777void ipath_update_mmap_info(struct ipath_ibdev *dev,
778 struct ipath_mmap_info *ip,
779 u32 size, void *obj);
780
771int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 781int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
772 782
773void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); 783void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index efd79ef109a6..cf0868f6e965 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -726,11 +726,12 @@ repoll:
726 return err == 0 || err == -EAGAIN ? npolled : err; 726 return err == 0 || err == -EAGAIN ? npolled : err;
727} 727}
728 728
729int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 729int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
730{ 730{
731 __be32 doorbell[2]; 731 __be32 doorbell[2];
732 732
733 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 733 doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) ==
734 IB_CQ_SOLICITED ?
734 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 735 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
735 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 736 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
736 to_mcq(cq)->cqn); 737 to_mcq(cq)->cqn);
@@ -743,7 +744,7 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
743 return 0; 744 return 0;
744} 745}
745 746
746int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 747int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
747{ 748{
748 struct mthca_cq *cq = to_mcq(ibcq); 749 struct mthca_cq *cq = to_mcq(ibcq);
749 __be32 doorbell[2]; 750 __be32 doorbell[2];
@@ -755,7 +756,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
755 756
756 doorbell[0] = ci; 757 doorbell[0] = ci;
757 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | 758 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
758 (notify == IB_CQ_SOLICITED ? 1 : 2)); 759 ((flags & IB_CQ_SOLICITED_MASK) ==
760 IB_CQ_SOLICITED ? 1 : 2));
759 761
760 mthca_write_db_rec(doorbell, cq->arm_db); 762 mthca_write_db_rec(doorbell, cq->arm_db);
761 763
@@ -766,7 +768,7 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
766 wmb(); 768 wmb();
767 769
768 doorbell[0] = cpu_to_be32((sn << 28) | 770 doorbell[0] = cpu_to_be32((sn << 28) |
769 (notify == IB_CQ_SOLICITED ? 771 ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
770 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : 772 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
771 MTHCA_ARBEL_CQ_DB_REQ_NOT) | 773 MTHCA_ARBEL_CQ_DB_REQ_NOT) |
772 cq->cqn); 774 cq->cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index b7e42efaf43d..9bae3cc60603 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -495,8 +495,8 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev);
495 495
496int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, 496int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
497 struct ib_wc *entry); 497 struct ib_wc *entry);
498int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 498int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
499int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 499int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
500int mthca_init_cq(struct mthca_dev *dev, int nent, 500int mthca_init_cq(struct mthca_dev *dev, int nent,
501 struct mthca_ucontext *ctx, u32 pdn, 501 struct mthca_ucontext *ctx, u32 pdn,
502 struct mthca_cq *cq); 502 struct mthca_cq *cq);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 47e6fd46d9c2..1c05486c3c68 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -663,6 +663,7 @@ static int mthca_destroy_qp(struct ib_qp *qp)
663} 663}
664 664
665static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, 665static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
666 int comp_vector,
666 struct ib_ucontext *context, 667 struct ib_ucontext *context,
667 struct ib_udata *udata) 668 struct ib_udata *udata)
668{ 669{
@@ -1292,6 +1293,7 @@ int mthca_register_device(struct mthca_dev *dev)
1292 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); 1293 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1293 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1294 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1294 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1295 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1296 dev->ib_dev.num_comp_vectors = 1;
1295 dev->ib_dev.dma_device = &dev->pdev->dev; 1297 dev->ib_dev.dma_device = &dev->pdev->dev;
1296 dev->ib_dev.query_device = mthca_query_device; 1298 dev->ib_dev.query_device = mthca_query_device;
1297 dev->ib_dev.query_port = mthca_query_port; 1299 dev->ib_dev.query_port = mthca_query_port;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 8fe6fee7a97a..fee60c852d14 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -701,6 +701,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
701 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 701 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
702 } 702 }
703 703
704 if (ibqp->qp_type == IB_QPT_RC &&
705 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
706 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
707
708 if (mthca_is_memfree(dev))
709 qp_context->rlkey_arbel_sched_queue |= sched_queue;
710 else
711 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
712
713 qp_param->opt_param_mask |=
714 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
715 }
716
704 if (attr_mask & IB_QP_TIMEOUT) { 717 if (attr_mask & IB_QP_TIMEOUT) {
705 qp_context->pri_path.ackto = attr->timeout << 3; 718 qp_context->pri_path.ackto = attr->timeout << 3;
706 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 719 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);