aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c183
1 files changed, 91 insertions, 92 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 6f98632877eb..4715911101e4 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -242,7 +242,6 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
242{ 242{
243 struct ipath_qp *q, **qpp; 243 struct ipath_qp *q, **qpp;
244 unsigned long flags; 244 unsigned long flags;
245 int fnd = 0;
246 245
247 spin_lock_irqsave(&qpt->lock, flags); 246 spin_lock_irqsave(&qpt->lock, flags);
248 247
@@ -253,51 +252,40 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
253 *qpp = qp->next; 252 *qpp = qp->next;
254 qp->next = NULL; 253 qp->next = NULL;
255 atomic_dec(&qp->refcount); 254 atomic_dec(&qp->refcount);
256 fnd = 1;
257 break; 255 break;
258 } 256 }
259 } 257 }
260 258
261 spin_unlock_irqrestore(&qpt->lock, flags); 259 spin_unlock_irqrestore(&qpt->lock, flags);
262
263 if (!fnd)
264 return;
265
266 free_qpn(qpt, qp->ibqp.qp_num);
267
268 wait_event(qp->wait, !atomic_read(&qp->refcount));
269} 260}
270 261
271/** 262/**
272 * ipath_free_all_qps - remove all QPs from the table 263 * ipath_free_all_qps - check for QPs still in use
273 * @qpt: the QP table to empty 264 * @qpt: the QP table to empty
265 *
266 * There should not be any QPs still in use.
267 * Free memory for table.
274 */ 268 */
275void ipath_free_all_qps(struct ipath_qp_table *qpt) 269unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
276{ 270{
277 unsigned long flags; 271 unsigned long flags;
278 struct ipath_qp *qp, *nqp; 272 struct ipath_qp *qp;
279 u32 n; 273 u32 n, qp_inuse = 0;
280 274
275 spin_lock_irqsave(&qpt->lock, flags);
281 for (n = 0; n < qpt->max; n++) { 276 for (n = 0; n < qpt->max; n++) {
282 spin_lock_irqsave(&qpt->lock, flags);
283 qp = qpt->table[n]; 277 qp = qpt->table[n];
284 qpt->table[n] = NULL; 278 qpt->table[n] = NULL;
285 spin_unlock_irqrestore(&qpt->lock, flags); 279
286 280 for (; qp; qp = qp->next)
287 while (qp) { 281 qp_inuse++;
288 nqp = qp->next;
289 free_qpn(qpt, qp->ibqp.qp_num);
290 if (!atomic_dec_and_test(&qp->refcount) ||
291 !ipath_destroy_qp(&qp->ibqp))
292 ipath_dbg("QP memory leak!\n");
293 qp = nqp;
294 }
295 } 282 }
283 spin_unlock_irqrestore(&qpt->lock, flags);
296 284
297 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { 285 for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
298 if (qpt->map[n].page) 286 if (qpt->map[n].page)
299 free_page((unsigned long)qpt->map[n].page); 287 free_page((unsigned long) qpt->map[n].page);
300 } 288 return qp_inuse;
301} 289}
302 290
303/** 291/**
@@ -336,11 +324,12 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
336 qp->remote_qpn = 0; 324 qp->remote_qpn = 0;
337 qp->qkey = 0; 325 qp->qkey = 0;
338 qp->qp_access_flags = 0; 326 qp->qp_access_flags = 0;
339 qp->s_busy = 0; 327 atomic_set(&qp->s_dma_busy, 0);
340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 328 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
341 qp->s_hdrwords = 0; 329 qp->s_hdrwords = 0;
342 qp->s_wqe = NULL; 330 qp->s_wqe = NULL;
343 qp->s_pkt_delay = 0; 331 qp->s_pkt_delay = 0;
332 qp->s_draining = 0;
344 qp->s_psn = 0; 333 qp->s_psn = 0;
345 qp->r_psn = 0; 334 qp->r_psn = 0;
346 qp->r_msn = 0; 335 qp->r_msn = 0;
@@ -353,7 +342,8 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
353 } 342 }
354 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 343 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
355 qp->r_nak_state = 0; 344 qp->r_nak_state = 0;
356 qp->r_wrid_valid = 0; 345 qp->r_aflags = 0;
346 qp->r_flags = 0;
357 qp->s_rnr_timeout = 0; 347 qp->s_rnr_timeout = 0;
358 qp->s_head = 0; 348 qp->s_head = 0;
359 qp->s_tail = 0; 349 qp->s_tail = 0;
@@ -361,7 +351,6 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
361 qp->s_last = 0; 351 qp->s_last = 0;
362 qp->s_ssn = 1; 352 qp->s_ssn = 1;
363 qp->s_lsn = 0; 353 qp->s_lsn = 0;
364 qp->s_wait_credit = 0;
365 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 354 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
366 qp->r_head_ack_queue = 0; 355 qp->r_head_ack_queue = 0;
367 qp->s_tail_ack_queue = 0; 356 qp->s_tail_ack_queue = 0;
@@ -370,7 +359,6 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
370 qp->r_rq.wq->head = 0; 359 qp->r_rq.wq->head = 0;
371 qp->r_rq.wq->tail = 0; 360 qp->r_rq.wq->tail = 0;
372 } 361 }
373 qp->r_reuse_sge = 0;
374} 362}
375 363
376/** 364/**
@@ -402,39 +390,21 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
402 list_del_init(&qp->piowait); 390 list_del_init(&qp->piowait);
403 spin_unlock(&dev->pending_lock); 391 spin_unlock(&dev->pending_lock);
404 392
405 wc.vendor_err = 0; 393 /* Schedule the sending tasklet to drain the send work queue. */
406 wc.byte_len = 0; 394 if (qp->s_last != qp->s_head)
407 wc.imm_data = 0; 395 ipath_schedule_send(qp);
396
397 memset(&wc, 0, sizeof(wc));
408 wc.qp = &qp->ibqp; 398 wc.qp = &qp->ibqp;
409 wc.src_qp = 0; 399 wc.opcode = IB_WC_RECV;
410 wc.wc_flags = 0; 400
411 wc.pkey_index = 0; 401 if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
412 wc.slid = 0;
413 wc.sl = 0;
414 wc.dlid_path_bits = 0;
415 wc.port_num = 0;
416 if (qp->r_wrid_valid) {
417 qp->r_wrid_valid = 0;
418 wc.wr_id = qp->r_wr_id; 402 wc.wr_id = qp->r_wr_id;
419 wc.opcode = IB_WC_RECV;
420 wc.status = err; 403 wc.status = err;
421 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 404 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
422 } 405 }
423 wc.status = IB_WC_WR_FLUSH_ERR; 406 wc.status = IB_WC_WR_FLUSH_ERR;
424 407
425 while (qp->s_last != qp->s_head) {
426 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
427
428 wc.wr_id = wqe->wr.wr_id;
429 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
430 if (++qp->s_last >= qp->s_size)
431 qp->s_last = 0;
432 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
433 }
434 qp->s_cur = qp->s_tail = qp->s_head;
435 qp->s_hdrwords = 0;
436 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
437
438 if (qp->r_rq.wq) { 408 if (qp->r_rq.wq) {
439 struct ipath_rwq *wq; 409 struct ipath_rwq *wq;
440 u32 head; 410 u32 head;
@@ -450,7 +420,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
450 tail = wq->tail; 420 tail = wq->tail;
451 if (tail >= qp->r_rq.size) 421 if (tail >= qp->r_rq.size)
452 tail = 0; 422 tail = 0;
453 wc.opcode = IB_WC_RECV;
454 while (tail != head) { 423 while (tail != head) {
455 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 424 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
456 if (++tail >= qp->r_rq.size) 425 if (++tail >= qp->r_rq.size)
@@ -482,11 +451,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
482 struct ipath_ibdev *dev = to_idev(ibqp->device); 451 struct ipath_ibdev *dev = to_idev(ibqp->device);
483 struct ipath_qp *qp = to_iqp(ibqp); 452 struct ipath_qp *qp = to_iqp(ibqp);
484 enum ib_qp_state cur_state, new_state; 453 enum ib_qp_state cur_state, new_state;
485 unsigned long flags;
486 int lastwqe = 0; 454 int lastwqe = 0;
487 int ret; 455 int ret;
488 456
489 spin_lock_irqsave(&qp->s_lock, flags); 457 spin_lock_irq(&qp->s_lock);
490 458
491 cur_state = attr_mask & IB_QP_CUR_STATE ? 459 cur_state = attr_mask & IB_QP_CUR_STATE ?
492 attr->cur_qp_state : qp->state; 460 attr->cur_qp_state : qp->state;
@@ -539,16 +507,42 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
539 507
540 switch (new_state) { 508 switch (new_state) {
541 case IB_QPS_RESET: 509 case IB_QPS_RESET:
510 if (qp->state != IB_QPS_RESET) {
511 qp->state = IB_QPS_RESET;
512 spin_lock(&dev->pending_lock);
513 if (!list_empty(&qp->timerwait))
514 list_del_init(&qp->timerwait);
515 if (!list_empty(&qp->piowait))
516 list_del_init(&qp->piowait);
517 spin_unlock(&dev->pending_lock);
518 qp->s_flags &= ~IPATH_S_ANY_WAIT;
519 spin_unlock_irq(&qp->s_lock);
520 /* Stop the sending tasklet */
521 tasklet_kill(&qp->s_task);
522 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
523 spin_lock_irq(&qp->s_lock);
524 }
542 ipath_reset_qp(qp, ibqp->qp_type); 525 ipath_reset_qp(qp, ibqp->qp_type);
543 break; 526 break;
544 527
528 case IB_QPS_SQD:
529 qp->s_draining = qp->s_last != qp->s_cur;
530 qp->state = new_state;
531 break;
532
533 case IB_QPS_SQE:
534 if (qp->ibqp.qp_type == IB_QPT_RC)
535 goto inval;
536 qp->state = new_state;
537 break;
538
545 case IB_QPS_ERR: 539 case IB_QPS_ERR:
546 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 540 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
547 break; 541 break;
548 542
549 default: 543 default:
544 qp->state = new_state;
550 break; 545 break;
551
552 } 546 }
553 547
554 if (attr_mask & IB_QP_PKEY_INDEX) 548 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -601,8 +595,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
601 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 595 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
602 qp->s_max_rd_atomic = attr->max_rd_atomic; 596 qp->s_max_rd_atomic = attr->max_rd_atomic;
603 597
604 qp->state = new_state; 598 spin_unlock_irq(&qp->s_lock);
605 spin_unlock_irqrestore(&qp->s_lock, flags);
606 599
607 if (lastwqe) { 600 if (lastwqe) {
608 struct ib_event ev; 601 struct ib_event ev;
@@ -616,7 +609,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
616 goto bail; 609 goto bail;
617 610
618inval: 611inval:
619 spin_unlock_irqrestore(&qp->s_lock, flags); 612 spin_unlock_irq(&qp->s_lock);
620 ret = -EINVAL; 613 ret = -EINVAL;
621 614
622bail: 615bail:
@@ -647,7 +640,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
647 attr->pkey_index = qp->s_pkey_index; 640 attr->pkey_index = qp->s_pkey_index;
648 attr->alt_pkey_index = 0; 641 attr->alt_pkey_index = 0;
649 attr->en_sqd_async_notify = 0; 642 attr->en_sqd_async_notify = 0;
650 attr->sq_draining = 0; 643 attr->sq_draining = qp->s_draining;
651 attr->max_rd_atomic = qp->s_max_rd_atomic; 644 attr->max_rd_atomic = qp->s_max_rd_atomic;
652 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 645 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
653 attr->min_rnr_timer = qp->r_min_rnr_timer; 646 attr->min_rnr_timer = qp->r_min_rnr_timer;
@@ -837,6 +830,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
837 spin_lock_init(&qp->r_rq.lock); 830 spin_lock_init(&qp->r_rq.lock);
838 atomic_set(&qp->refcount, 0); 831 atomic_set(&qp->refcount, 0);
839 init_waitqueue_head(&qp->wait); 832 init_waitqueue_head(&qp->wait);
833 init_waitqueue_head(&qp->wait_dma);
840 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); 834 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
841 INIT_LIST_HEAD(&qp->piowait); 835 INIT_LIST_HEAD(&qp->piowait);
842 INIT_LIST_HEAD(&qp->timerwait); 836 INIT_LIST_HEAD(&qp->timerwait);
@@ -930,6 +924,7 @@ bail_ip:
930 else 924 else
931 vfree(qp->r_rq.wq); 925 vfree(qp->r_rq.wq);
932 ipath_free_qp(&dev->qp_table, qp); 926 ipath_free_qp(&dev->qp_table, qp);
927 free_qpn(&dev->qp_table, qp->ibqp.qp_num);
933bail_qp: 928bail_qp:
934 kfree(qp); 929 kfree(qp);
935bail_swq: 930bail_swq:
@@ -951,41 +946,44 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
951{ 946{
952 struct ipath_qp *qp = to_iqp(ibqp); 947 struct ipath_qp *qp = to_iqp(ibqp);
953 struct ipath_ibdev *dev = to_idev(ibqp->device); 948 struct ipath_ibdev *dev = to_idev(ibqp->device);
954 unsigned long flags;
955 949
956 spin_lock_irqsave(&qp->s_lock, flags); 950 /* Make sure HW and driver activity is stopped. */
957 qp->state = IB_QPS_ERR; 951 spin_lock_irq(&qp->s_lock);
958 spin_unlock_irqrestore(&qp->s_lock, flags); 952 if (qp->state != IB_QPS_RESET) {
959 spin_lock(&dev->n_qps_lock); 953 qp->state = IB_QPS_RESET;
960 dev->n_qps_allocated--; 954 spin_lock(&dev->pending_lock);
961 spin_unlock(&dev->n_qps_lock); 955 if (!list_empty(&qp->timerwait))
956 list_del_init(&qp->timerwait);
957 if (!list_empty(&qp->piowait))
958 list_del_init(&qp->piowait);
959 spin_unlock(&dev->pending_lock);
960 qp->s_flags &= ~IPATH_S_ANY_WAIT;
961 spin_unlock_irq(&qp->s_lock);
962 /* Stop the sending tasklet */
963 tasklet_kill(&qp->s_task);
964 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
965 } else
966 spin_unlock_irq(&qp->s_lock);
962 967
963 /* Stop the sending tasklet. */ 968 ipath_free_qp(&dev->qp_table, qp);
964 tasklet_kill(&qp->s_task);
965 969
966 if (qp->s_tx) { 970 if (qp->s_tx) {
967 atomic_dec(&qp->refcount); 971 atomic_dec(&qp->refcount);
968 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) 972 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
969 kfree(qp->s_tx->txreq.map_addr); 973 kfree(qp->s_tx->txreq.map_addr);
974 spin_lock_irq(&dev->pending_lock);
975 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
976 spin_unlock_irq(&dev->pending_lock);
977 qp->s_tx = NULL;
970 } 978 }
971 979
972 /* Make sure the QP isn't on the timeout list. */ 980 wait_event(qp->wait, !atomic_read(&qp->refcount));
973 spin_lock_irqsave(&dev->pending_lock, flags);
974 if (!list_empty(&qp->timerwait))
975 list_del_init(&qp->timerwait);
976 if (!list_empty(&qp->piowait))
977 list_del_init(&qp->piowait);
978 if (qp->s_tx)
979 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
980 spin_unlock_irqrestore(&dev->pending_lock, flags);
981 981
982 /* 982 /* all user's cleaned up, mark it available */
983 * Make sure that the QP is not in the QPN table so receive 983 free_qpn(&dev->qp_table, qp->ibqp.qp_num);
984 * interrupts will discard packets for this QP. XXX Also remove QP 984 spin_lock(&dev->n_qps_lock);
985 * from multicast table. 985 dev->n_qps_allocated--;
986 */ 986 spin_unlock(&dev->n_qps_lock);
987 if (atomic_read(&qp->refcount) != 0)
988 ipath_free_qp(&dev->qp_table, qp);
989 987
990 if (qp->ip) 988 if (qp->ip)
991 kref_put(&qp->ip->ref, ipath_release_mmap_info); 989 kref_put(&qp->ip->ref, ipath_release_mmap_info);
@@ -1055,9 +1053,10 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1055 } 1053 }
1056 1054
1057 /* Restart sending if it was blocked due to lack of credits. */ 1055 /* Restart sending if it was blocked due to lack of credits. */
1058 if (qp->s_cur != qp->s_head && 1056 if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
1057 qp->s_cur != qp->s_head &&
1059 (qp->s_lsn == (u32) -1 || 1058 (qp->s_lsn == (u32) -1 ||
1060 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, 1059 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1061 qp->s_lsn + 1) <= 0)) 1060 qp->s_lsn + 1) <= 0))
1062 tasklet_hi_schedule(&qp->s_task); 1061 ipath_schedule_send(qp);
1063} 1062}