aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c41
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c25
5 files changed, 66 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 1720dc790d13..f35a935267e7 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -185,7 +185,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
185 V_CQE_OPCODE(FW_RI_SEND) | 185 V_CQE_OPCODE(FW_RI_SEND) |
186 V_CQE_TYPE(0) | 186 V_CQE_TYPE(0) |
187 V_CQE_SWCQE(1) | 187 V_CQE_SWCQE(1) |
188 V_CQE_QPID(wq->rq.qid)); 188 V_CQE_QPID(wq->sq.qid));
189 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 189 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
190 cq->sw_queue[cq->sw_pidx] = cqe; 190 cq->sw_queue[cq->sw_pidx] = cqe;
191 t4_swcq_produce(cq); 191 t4_swcq_produce(cq);
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
818 chp->cq.size--; /* status page */ 818 chp->cq.size--; /* status page */
819 chp->ibcq.cqe = entries - 2; 819 chp->ibcq.cqe = entries - 2;
820 spin_lock_init(&chp->lock); 820 spin_lock_init(&chp->lock);
821 spin_lock_init(&chp->comp_handler_lock);
821 atomic_set(&chp->refcnt, 1); 822 atomic_set(&chp->refcnt, 1);
822 init_waitqueue_head(&chp->wait); 823 init_waitqueue_head(&chp->wait);
823 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 824 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 40a13cc633a3..6d0df6ec161b 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -376,10 +376,8 @@ struct uld_ctx {
376 struct c4iw_dev *dev; 376 struct c4iw_dev *dev;
377}; 377};
378 378
379static void c4iw_remove(struct uld_ctx *ctx) 379static void c4iw_dealloc(struct uld_ctx *ctx)
380{ 380{
381 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
382 c4iw_unregister_device(ctx->dev);
383 c4iw_rdev_close(&ctx->dev->rdev); 381 c4iw_rdev_close(&ctx->dev->rdev);
384 idr_destroy(&ctx->dev->cqidr); 382 idr_destroy(&ctx->dev->cqidr);
385 idr_destroy(&ctx->dev->qpidr); 383 idr_destroy(&ctx->dev->qpidr);
@@ -389,11 +387,30 @@ static void c4iw_remove(struct uld_ctx *ctx)
389 ctx->dev = NULL; 387 ctx->dev = NULL;
390} 388}
391 389
390static void c4iw_remove(struct uld_ctx *ctx)
391{
392 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
393 c4iw_unregister_device(ctx->dev);
394 c4iw_dealloc(ctx);
395}
396
397static int rdma_supported(const struct cxgb4_lld_info *infop)
398{
399 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
400 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
401 infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
402}
403
392static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 404static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
393{ 405{
394 struct c4iw_dev *devp; 406 struct c4iw_dev *devp;
395 int ret; 407 int ret;
396 408
409 if (!rdma_supported(infop)) {
410 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
411 pci_name(infop->pdev));
412 return ERR_PTR(-ENOSYS);
413 }
397 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 414 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
398 if (!devp) { 415 if (!devp) {
399 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 416 printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -414,7 +431,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
414 431
415 ret = c4iw_rdev_open(&devp->rdev); 432 ret = c4iw_rdev_open(&devp->rdev);
416 if (ret) { 433 if (ret) {
417 mutex_unlock(&dev_mutex);
418 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 434 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
419 ib_dealloc_device(&devp->ibdev); 435 ib_dealloc_device(&devp->ibdev);
420 return ERR_PTR(ret); 436 return ERR_PTR(ret);
@@ -519,15 +535,24 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
519 case CXGB4_STATE_UP: 535 case CXGB4_STATE_UP:
520 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); 536 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
521 if (!ctx->dev) { 537 if (!ctx->dev) {
522 int ret = 0; 538 int ret;
523 539
524 ctx->dev = c4iw_alloc(&ctx->lldi); 540 ctx->dev = c4iw_alloc(&ctx->lldi);
525 if (!IS_ERR(ctx->dev)) 541 if (IS_ERR(ctx->dev)) {
526 ret = c4iw_register_device(ctx->dev); 542 printk(KERN_ERR MOD
527 if (IS_ERR(ctx->dev) || ret) 543 "%s: initialization failed: %ld\n",
544 pci_name(ctx->lldi.pdev),
545 PTR_ERR(ctx->dev));
546 ctx->dev = NULL;
547 break;
548 }
549 ret = c4iw_register_device(ctx->dev);
550 if (ret) {
528 printk(KERN_ERR MOD 551 printk(KERN_ERR MOD
529 "%s: RDMA registration failed: %d\n", 552 "%s: RDMA registration failed: %d\n",
530 pci_name(ctx->lldi.pdev), ret); 553 pci_name(ctx->lldi.pdev), ret);
554 c4iw_dealloc(ctx);
555 }
531 } 556 }
532 break; 557 break;
533 case CXGB4_STATE_DOWN: 558 case CXGB4_STATE_DOWN:
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c13041a0aeba..397cb36cf103 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
42{ 42{
43 struct ib_event event; 43 struct ib_event event;
44 struct c4iw_qp_attributes attrs; 44 struct c4iw_qp_attributes attrs;
45 unsigned long flag;
45 46
46 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || 47 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
47 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { 48 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
72 if (qhp->ibqp.event_handler) 73 if (qhp->ibqp.event_handler)
73 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); 74 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
74 75
76 spin_lock_irqsave(&chp->comp_handler_lock, flag);
75 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 77 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
78 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
76} 79}
77 80
78void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) 81void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -183,11 +186,14 @@ out:
183int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) 186int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
184{ 187{
185 struct c4iw_cq *chp; 188 struct c4iw_cq *chp;
189 unsigned long flag;
186 190
187 chp = get_chp(dev, qid); 191 chp = get_chp(dev, qid);
188 if (chp) 192 if (chp) {
193 spin_lock_irqsave(&chp->comp_handler_lock, flag);
189 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 194 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
190 else 195 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
196 } else
191 PDBG("%s unknown cqid 0x%x\n", __func__, qid); 197 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
192 return 0; 198 return 0;
193} 199}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 62cea0e2b158..1357c5bf209b 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -309,6 +309,7 @@ struct c4iw_cq {
309 struct c4iw_dev *rhp; 309 struct c4iw_dev *rhp;
310 struct t4_cq cq; 310 struct t4_cq cq;
311 spinlock_t lock; 311 spinlock_t lock;
312 spinlock_t comp_handler_lock;
312 atomic_t refcnt; 313 atomic_t refcnt;
313 wait_queue_head_t wait; 314 wait_queue_head_t wait;
314}; 315};
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index ec3ce675fdff..d6ccc7e84802 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -945,8 +945,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
945 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 945 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
946 spin_unlock(&qhp->lock); 946 spin_unlock(&qhp->lock);
947 spin_unlock_irqrestore(&rchp->lock, flag); 947 spin_unlock_irqrestore(&rchp->lock, flag);
948 if (flushed) 948 if (flushed) {
949 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
949 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 950 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
951 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
952 }
950 953
951 /* locking hierarchy: cq lock first, then qp lock. */ 954 /* locking hierarchy: cq lock first, then qp lock. */
952 spin_lock_irqsave(&schp->lock, flag); 955 spin_lock_irqsave(&schp->lock, flag);
@@ -956,13 +959,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
956 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); 959 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
957 spin_unlock(&qhp->lock); 960 spin_unlock(&qhp->lock);
958 spin_unlock_irqrestore(&schp->lock, flag); 961 spin_unlock_irqrestore(&schp->lock, flag);
959 if (flushed) 962 if (flushed) {
963 spin_lock_irqsave(&schp->comp_handler_lock, flag);
960 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 964 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
965 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
966 }
961} 967}
962 968
963static void flush_qp(struct c4iw_qp *qhp) 969static void flush_qp(struct c4iw_qp *qhp)
964{ 970{
965 struct c4iw_cq *rchp, *schp; 971 struct c4iw_cq *rchp, *schp;
972 unsigned long flag;
966 973
967 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 974 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
968 schp = get_chp(qhp->rhp, qhp->attr.scq); 975 schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -970,8 +977,16 @@ static void flush_qp(struct c4iw_qp *qhp)
970 if (qhp->ibqp.uobject) { 977 if (qhp->ibqp.uobject) {
971 t4_set_wq_in_error(&qhp->wq); 978 t4_set_wq_in_error(&qhp->wq);
972 t4_set_cq_in_error(&rchp->cq); 979 t4_set_cq_in_error(&rchp->cq);
973 if (schp != rchp) 980 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
981 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
982 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
983 if (schp != rchp) {
974 t4_set_cq_in_error(&schp->cq); 984 t4_set_cq_in_error(&schp->cq);
985 spin_lock_irqsave(&schp->comp_handler_lock, flag);
986 (*schp->ibcq.comp_handler)(&schp->ibcq,
987 schp->ibcq.cq_context);
988 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
989 }
975 return; 990 return;
976 } 991 }
977 __flush_qp(qhp, rchp, schp); 992 __flush_qp(qhp, rchp, schp);
@@ -1211,6 +1226,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1211 disconnect = 1; 1226 disconnect = 1;
1212 c4iw_get_ep(&qhp->ep->com); 1227 c4iw_get_ep(&qhp->ep->com);
1213 } 1228 }
1229 if (qhp->ibqp.uobject)
1230 t4_set_wq_in_error(&qhp->wq);
1214 ret = rdma_fini(rhp, qhp, ep); 1231 ret = rdma_fini(rhp, qhp, ep);
1215 if (ret) 1232 if (ret)
1216 goto err; 1233 goto err;
@@ -1229,6 +1246,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1229 break; 1246 break;
1230 case C4IW_QP_STATE_ERROR: 1247 case C4IW_QP_STATE_ERROR:
1231 set_state(qhp, C4IW_QP_STATE_ERROR); 1248 set_state(qhp, C4IW_QP_STATE_ERROR);
1249 if (qhp->ibqp.uobject)
1250 t4_set_wq_in_error(&qhp->wq);
1232 if (!internal) { 1251 if (!internal) {
1233 abort = 1; 1252 abort = 1;
1234 disconnect = 1; 1253 disconnect = 1;