aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c133
1 files changed, 82 insertions, 51 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 64f07b19349f..16db9ac0b402 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -81,11 +81,51 @@ static u32 credit_table[31] = {
81 32768 /* 1E */ 81 32768 /* 1E */
82}; 82};
83 83
84static u32 alloc_qpn(struct ipath_qp_table *qpt) 84
85static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
86{
87 unsigned long page = get_zeroed_page(GFP_KERNEL);
88 unsigned long flags;
89
90 /*
91 * Free the page if someone raced with us installing it.
92 */
93
94 spin_lock_irqsave(&qpt->lock, flags);
95 if (map->page)
96 free_page(page);
97 else
98 map->page = (void *)page;
99 spin_unlock_irqrestore(&qpt->lock, flags);
100}
101
102
103static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
85{ 104{
86 u32 i, offset, max_scan, qpn; 105 u32 i, offset, max_scan, qpn;
87 struct qpn_map *map; 106 struct qpn_map *map;
88 u32 ret; 107 u32 ret = -1;
108
109 if (type == IB_QPT_SMI)
110 ret = 0;
111 else if (type == IB_QPT_GSI)
112 ret = 1;
113
114 if (ret != -1) {
115 map = &qpt->map[0];
116 if (unlikely(!map->page)) {
117 get_map_page(qpt, map);
118 if (unlikely(!map->page)) {
119 ret = -ENOMEM;
120 goto bail;
121 }
122 }
123 if (!test_and_set_bit(ret, map->page))
124 atomic_dec(&map->n_free);
125 else
126 ret = -EBUSY;
127 goto bail;
128 }
89 129
90 qpn = qpt->last + 1; 130 qpn = qpt->last + 1;
91 if (qpn >= QPN_MAX) 131 if (qpn >= QPN_MAX)
@@ -95,19 +135,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt)
95 max_scan = qpt->nmaps - !offset; 135 max_scan = qpt->nmaps - !offset;
96 for (i = 0;;) { 136 for (i = 0;;) {
97 if (unlikely(!map->page)) { 137 if (unlikely(!map->page)) {
98 unsigned long page = get_zeroed_page(GFP_KERNEL); 138 get_map_page(qpt, map);
99 unsigned long flags;
100
101 /*
102 * Free the page if someone raced with us
103 * installing it:
104 */
105 spin_lock_irqsave(&qpt->lock, flags);
106 if (map->page)
107 free_page(page);
108 else
109 map->page = (void *)page;
110 spin_unlock_irqrestore(&qpt->lock, flags);
111 if (unlikely(!map->page)) 139 if (unlikely(!map->page))
112 break; 140 break;
113 } 141 }
@@ -151,7 +179,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt)
151 qpn = mk_qpn(qpt, map, offset); 179 qpn = mk_qpn(qpt, map, offset);
152 } 180 }
153 181
154 ret = 0; 182 ret = -ENOMEM;
155 183
156bail: 184bail:
157 return ret; 185 return ret;
@@ -180,29 +208,19 @@ static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
180 enum ib_qp_type type) 208 enum ib_qp_type type)
181{ 209{
182 unsigned long flags; 210 unsigned long flags;
183 u32 qpn;
184 int ret; 211 int ret;
185 212
186 if (type == IB_QPT_SMI) 213 ret = alloc_qpn(qpt, type);
187 qpn = 0; 214 if (ret < 0)
188 else if (type == IB_QPT_GSI) 215 goto bail;
189 qpn = 1; 216 qp->ibqp.qp_num = ret;
190 else {
191 /* Allocate the next available QPN */
192 qpn = alloc_qpn(qpt);
193 if (qpn == 0) {
194 ret = -ENOMEM;
195 goto bail;
196 }
197 }
198 qp->ibqp.qp_num = qpn;
199 217
200 /* Add the QP to the hash table. */ 218 /* Add the QP to the hash table. */
201 spin_lock_irqsave(&qpt->lock, flags); 219 spin_lock_irqsave(&qpt->lock, flags);
202 220
203 qpn %= qpt->max; 221 ret %= qpt->max;
204 qp->next = qpt->table[qpn]; 222 qp->next = qpt->table[ret];
205 qpt->table[qpn] = qp; 223 qpt->table[ret] = qp;
206 atomic_inc(&qp->refcount); 224 atomic_inc(&qp->refcount);
207 225
208 spin_unlock_irqrestore(&qpt->lock, flags); 226 spin_unlock_irqrestore(&qpt->lock, flags);
@@ -245,9 +263,7 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
245 if (!fnd) 263 if (!fnd)
246 return; 264 return;
247 265
248 /* If QPN is not reserved, mark QPN free in the bitmap. */ 266 free_qpn(qpt, qp->ibqp.qp_num);
249 if (qp->ibqp.qp_num > 1)
250 free_qpn(qpt, qp->ibqp.qp_num);
251 267
252 wait_event(qp->wait, !atomic_read(&qp->refcount)); 268 wait_event(qp->wait, !atomic_read(&qp->refcount));
253} 269}
@@ -270,11 +286,10 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
270 286
271 while (qp) { 287 while (qp) {
272 nqp = qp->next; 288 nqp = qp->next;
273 if (qp->ibqp.qp_num > 1) 289 free_qpn(qpt, qp->ibqp.qp_num);
274 free_qpn(qpt, qp->ibqp.qp_num);
275 if (!atomic_dec_and_test(&qp->refcount) || 290 if (!atomic_dec_and_test(&qp->refcount) ||
276 !ipath_destroy_qp(&qp->ibqp)) 291 !ipath_destroy_qp(&qp->ibqp))
277 ipath_dbg(KERN_INFO "QP memory leak!\n"); 292 ipath_dbg("QP memory leak!\n");
278 qp = nqp; 293 qp = nqp;
279 } 294 }
280 } 295 }
@@ -320,7 +335,8 @@ static void ipath_reset_qp(struct ipath_qp *qp)
320 qp->remote_qpn = 0; 335 qp->remote_qpn = 0;
321 qp->qkey = 0; 336 qp->qkey = 0;
322 qp->qp_access_flags = 0; 337 qp->qp_access_flags = 0;
323 clear_bit(IPATH_S_BUSY, &qp->s_flags); 338 qp->s_busy = 0;
339 qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR;
324 qp->s_hdrwords = 0; 340 qp->s_hdrwords = 0;
325 qp->s_psn = 0; 341 qp->s_psn = 0;
326 qp->r_psn = 0; 342 qp->r_psn = 0;
@@ -333,7 +349,6 @@ static void ipath_reset_qp(struct ipath_qp *qp)
333 qp->r_state = IB_OPCODE_UC_SEND_LAST; 349 qp->r_state = IB_OPCODE_UC_SEND_LAST;
334 } 350 }
335 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 351 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
336 qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
337 qp->r_nak_state = 0; 352 qp->r_nak_state = 0;
338 qp->r_wrid_valid = 0; 353 qp->r_wrid_valid = 0;
339 qp->s_rnr_timeout = 0; 354 qp->s_rnr_timeout = 0;
@@ -344,6 +359,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
344 qp->s_ssn = 1; 359 qp->s_ssn = 1;
345 qp->s_lsn = 0; 360 qp->s_lsn = 0;
346 qp->s_wait_credit = 0; 361 qp->s_wait_credit = 0;
362 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
363 qp->r_head_ack_queue = 0;
364 qp->s_tail_ack_queue = 0;
365 qp->s_num_rd_atomic = 0;
347 if (qp->r_rq.wq) { 366 if (qp->r_rq.wq) {
348 qp->r_rq.wq->head = 0; 367 qp->r_rq.wq->head = 0;
349 qp->r_rq.wq->tail = 0; 368 qp->r_rq.wq->tail = 0;
@@ -357,7 +376,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
357 * @err: the receive completion error to signal if a RWQE is active 376 * @err: the receive completion error to signal if a RWQE is active
358 * 377 *
359 * Flushes both send and receive work queues. 378 * Flushes both send and receive work queues.
360 * QP s_lock should be held and interrupts disabled. 379 * The QP s_lock should be held and interrupts disabled.
361 */ 380 */
362 381
363void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) 382void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@@ -365,7 +384,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
365 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 384 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
366 struct ib_wc wc; 385 struct ib_wc wc;
367 386
368 ipath_dbg(KERN_INFO "QP%d/%d in error state\n", 387 ipath_dbg("QP%d/%d in error state\n",
369 qp->ibqp.qp_num, qp->remote_qpn); 388 qp->ibqp.qp_num, qp->remote_qpn);
370 389
371 spin_lock(&dev->pending_lock); 390 spin_lock(&dev->pending_lock);
@@ -389,6 +408,8 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
389 wc.port_num = 0; 408 wc.port_num = 0;
390 if (qp->r_wrid_valid) { 409 if (qp->r_wrid_valid) {
391 qp->r_wrid_valid = 0; 410 qp->r_wrid_valid = 0;
411 wc.wr_id = qp->r_wr_id;
412 wc.opcode = IB_WC_RECV;
392 wc.status = err; 413 wc.status = err;
393 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 414 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
394 } 415 }
@@ -503,13 +524,17 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
503 attr->path_mig_state != IB_MIG_REARM) 524 attr->path_mig_state != IB_MIG_REARM)
504 goto inval; 525 goto inval;
505 526
527 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
528 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
529 goto inval;
530
506 switch (new_state) { 531 switch (new_state) {
507 case IB_QPS_RESET: 532 case IB_QPS_RESET:
508 ipath_reset_qp(qp); 533 ipath_reset_qp(qp);
509 break; 534 break;
510 535
511 case IB_QPS_ERR: 536 case IB_QPS_ERR:
512 ipath_error_qp(qp, IB_WC_GENERAL_ERR); 537 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
513 break; 538 break;
514 539
515 default: 540 default:
@@ -559,6 +584,12 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
559 if (attr_mask & IB_QP_QKEY) 584 if (attr_mask & IB_QP_QKEY)
560 qp->qkey = attr->qkey; 585 qp->qkey = attr->qkey;
561 586
587 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
588 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
589
590 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
591 qp->s_max_rd_atomic = attr->max_rd_atomic;
592
562 qp->state = new_state; 593 qp->state = new_state;
563 spin_unlock_irqrestore(&qp->s_lock, flags); 594 spin_unlock_irqrestore(&qp->s_lock, flags);
564 595
@@ -598,8 +629,8 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
598 attr->alt_pkey_index = 0; 629 attr->alt_pkey_index = 0;
599 attr->en_sqd_async_notify = 0; 630 attr->en_sqd_async_notify = 0;
600 attr->sq_draining = 0; 631 attr->sq_draining = 0;
601 attr->max_rd_atomic = 1; 632 attr->max_rd_atomic = qp->s_max_rd_atomic;
602 attr->max_dest_rd_atomic = 1; 633 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
603 attr->min_rnr_timer = qp->r_min_rnr_timer; 634 attr->min_rnr_timer = qp->r_min_rnr_timer;
604 attr->port_num = 1; 635 attr->port_num = 1;
605 attr->timeout = qp->timeout; 636 attr->timeout = qp->timeout;
@@ -614,7 +645,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
614 init_attr->recv_cq = qp->ibqp.recv_cq; 645 init_attr->recv_cq = qp->ibqp.recv_cq;
615 init_attr->srq = qp->ibqp.srq; 646 init_attr->srq = qp->ibqp.srq;
616 init_attr->cap = attr->cap; 647 init_attr->cap = attr->cap;
617 if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) 648 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
618 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 649 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
619 else 650 else
620 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 651 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -786,7 +817,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
786 qp->s_size = init_attr->cap.max_send_wr + 1; 817 qp->s_size = init_attr->cap.max_send_wr + 1;
787 qp->s_max_sge = init_attr->cap.max_send_sge; 818 qp->s_max_sge = init_attr->cap.max_send_sge;
788 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 819 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
789 qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; 820 qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
790 else 821 else
791 qp->s_flags = 0; 822 qp->s_flags = 0;
792 dev = to_idev(ibpd->device); 823 dev = to_idev(ibpd->device);
@@ -958,7 +989,7 @@ bail:
958 * @wc: the WC responsible for putting the QP in this state 989 * @wc: the WC responsible for putting the QP in this state
959 * 990 *
960 * Flushes the send work queue. 991 * Flushes the send work queue.
961 * The QP s_lock should be held. 992 * The QP s_lock should be held and interrupts disabled.
962 */ 993 */
963 994
964void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 995void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
@@ -966,7 +997,7 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
966 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 997 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
967 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 998 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
968 999
969 ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n", 1000 ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
970 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 1001 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
971 1002
972 spin_lock(&dev->pending_lock); 1003 spin_lock(&dev->pending_lock);
@@ -984,12 +1015,12 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
984 wc->status = IB_WC_WR_FLUSH_ERR; 1015 wc->status = IB_WC_WR_FLUSH_ERR;
985 1016
986 while (qp->s_last != qp->s_head) { 1017 while (qp->s_last != qp->s_head) {
1018 wqe = get_swqe_ptr(qp, qp->s_last);
987 wc->wr_id = wqe->wr.wr_id; 1019 wc->wr_id = wqe->wr.wr_id;
988 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1020 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
989 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1021 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
990 if (++qp->s_last >= qp->s_size) 1022 if (++qp->s_last >= qp->s_size)
991 qp->s_last = 0; 1023 qp->s_last = 0;
992 wqe = get_swqe_ptr(qp, qp->s_last);
993 } 1024 }
994 qp->s_cur = qp->s_tail = qp->s_head; 1025 qp->s_cur = qp->s_tail = qp->s_head;
995 qp->state = IB_QPS_SQE; 1026 qp->state = IB_QPS_SQE;