diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_qp.c | 237 |
1 files changed, 99 insertions, 138 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index dd5b6e9d57c2..4715911101e4 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -242,7 +242,6 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) | |||
242 | { | 242 | { |
243 | struct ipath_qp *q, **qpp; | 243 | struct ipath_qp *q, **qpp; |
244 | unsigned long flags; | 244 | unsigned long flags; |
245 | int fnd = 0; | ||
246 | 245 | ||
247 | spin_lock_irqsave(&qpt->lock, flags); | 246 | spin_lock_irqsave(&qpt->lock, flags); |
248 | 247 | ||
@@ -253,51 +252,40 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) | |||
253 | *qpp = qp->next; | 252 | *qpp = qp->next; |
254 | qp->next = NULL; | 253 | qp->next = NULL; |
255 | atomic_dec(&qp->refcount); | 254 | atomic_dec(&qp->refcount); |
256 | fnd = 1; | ||
257 | break; | 255 | break; |
258 | } | 256 | } |
259 | } | 257 | } |
260 | 258 | ||
261 | spin_unlock_irqrestore(&qpt->lock, flags); | 259 | spin_unlock_irqrestore(&qpt->lock, flags); |
262 | |||
263 | if (!fnd) | ||
264 | return; | ||
265 | |||
266 | free_qpn(qpt, qp->ibqp.qp_num); | ||
267 | |||
268 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
269 | } | 260 | } |
270 | 261 | ||
271 | /** | 262 | /** |
272 | * ipath_free_all_qps - remove all QPs from the table | 263 | * ipath_free_all_qps - check for QPs still in use |
273 | * @qpt: the QP table to empty | 264 | * @qpt: the QP table to empty |
265 | * | ||
266 | * There should not be any QPs still in use. | ||
267 | * Free memory for table. | ||
274 | */ | 268 | */ |
275 | void ipath_free_all_qps(struct ipath_qp_table *qpt) | 269 | unsigned ipath_free_all_qps(struct ipath_qp_table *qpt) |
276 | { | 270 | { |
277 | unsigned long flags; | 271 | unsigned long flags; |
278 | struct ipath_qp *qp, *nqp; | 272 | struct ipath_qp *qp; |
279 | u32 n; | 273 | u32 n, qp_inuse = 0; |
280 | 274 | ||
275 | spin_lock_irqsave(&qpt->lock, flags); | ||
281 | for (n = 0; n < qpt->max; n++) { | 276 | for (n = 0; n < qpt->max; n++) { |
282 | spin_lock_irqsave(&qpt->lock, flags); | ||
283 | qp = qpt->table[n]; | 277 | qp = qpt->table[n]; |
284 | qpt->table[n] = NULL; | 278 | qpt->table[n] = NULL; |
285 | spin_unlock_irqrestore(&qpt->lock, flags); | 279 | |
286 | 280 | for (; qp; qp = qp->next) | |
287 | while (qp) { | 281 | qp_inuse++; |
288 | nqp = qp->next; | ||
289 | free_qpn(qpt, qp->ibqp.qp_num); | ||
290 | if (!atomic_dec_and_test(&qp->refcount) || | ||
291 | !ipath_destroy_qp(&qp->ibqp)) | ||
292 | ipath_dbg("QP memory leak!\n"); | ||
293 | qp = nqp; | ||
294 | } | ||
295 | } | 282 | } |
283 | spin_unlock_irqrestore(&qpt->lock, flags); | ||
296 | 284 | ||
297 | for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { | 285 | for (n = 0; n < ARRAY_SIZE(qpt->map); n++) |
298 | if (qpt->map[n].page) | 286 | if (qpt->map[n].page) |
299 | free_page((unsigned long)qpt->map[n].page); | 287 | free_page((unsigned long) qpt->map[n].page); |
300 | } | 288 | return qp_inuse; |
301 | } | 289 | } |
302 | 290 | ||
303 | /** | 291 | /** |
@@ -336,11 +324,12 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) | |||
336 | qp->remote_qpn = 0; | 324 | qp->remote_qpn = 0; |
337 | qp->qkey = 0; | 325 | qp->qkey = 0; |
338 | qp->qp_access_flags = 0; | 326 | qp->qp_access_flags = 0; |
339 | qp->s_busy = 0; | 327 | atomic_set(&qp->s_dma_busy, 0); |
340 | qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; | 328 | qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; |
341 | qp->s_hdrwords = 0; | 329 | qp->s_hdrwords = 0; |
342 | qp->s_wqe = NULL; | 330 | qp->s_wqe = NULL; |
343 | qp->s_pkt_delay = 0; | 331 | qp->s_pkt_delay = 0; |
332 | qp->s_draining = 0; | ||
344 | qp->s_psn = 0; | 333 | qp->s_psn = 0; |
345 | qp->r_psn = 0; | 334 | qp->r_psn = 0; |
346 | qp->r_msn = 0; | 335 | qp->r_msn = 0; |
@@ -353,7 +342,8 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) | |||
353 | } | 342 | } |
354 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 343 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
355 | qp->r_nak_state = 0; | 344 | qp->r_nak_state = 0; |
356 | qp->r_wrid_valid = 0; | 345 | qp->r_aflags = 0; |
346 | qp->r_flags = 0; | ||
357 | qp->s_rnr_timeout = 0; | 347 | qp->s_rnr_timeout = 0; |
358 | qp->s_head = 0; | 348 | qp->s_head = 0; |
359 | qp->s_tail = 0; | 349 | qp->s_tail = 0; |
@@ -361,7 +351,6 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) | |||
361 | qp->s_last = 0; | 351 | qp->s_last = 0; |
362 | qp->s_ssn = 1; | 352 | qp->s_ssn = 1; |
363 | qp->s_lsn = 0; | 353 | qp->s_lsn = 0; |
364 | qp->s_wait_credit = 0; | ||
365 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); | 354 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); |
366 | qp->r_head_ack_queue = 0; | 355 | qp->r_head_ack_queue = 0; |
367 | qp->s_tail_ack_queue = 0; | 356 | qp->s_tail_ack_queue = 0; |
@@ -370,17 +359,17 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) | |||
370 | qp->r_rq.wq->head = 0; | 359 | qp->r_rq.wq->head = 0; |
371 | qp->r_rq.wq->tail = 0; | 360 | qp->r_rq.wq->tail = 0; |
372 | } | 361 | } |
373 | qp->r_reuse_sge = 0; | ||
374 | } | 362 | } |
375 | 363 | ||
376 | /** | 364 | /** |
377 | * ipath_error_qp - put a QP into an error state | 365 | * ipath_error_qp - put a QP into the error state |
378 | * @qp: the QP to put into an error state | 366 | * @qp: the QP to put into the error state |
379 | * @err: the receive completion error to signal if a RWQE is active | 367 | * @err: the receive completion error to signal if a RWQE is active |
380 | * | 368 | * |
381 | * Flushes both send and receive work queues. | 369 | * Flushes both send and receive work queues. |
382 | * Returns true if last WQE event should be generated. | 370 | * Returns true if last WQE event should be generated. |
383 | * The QP s_lock should be held and interrupts disabled. | 371 | * The QP s_lock should be held and interrupts disabled. |
372 | * If we are already in error state, just return. | ||
384 | */ | 373 | */ |
385 | 374 | ||
386 | int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | 375 | int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) |
@@ -389,8 +378,10 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
389 | struct ib_wc wc; | 378 | struct ib_wc wc; |
390 | int ret = 0; | 379 | int ret = 0; |
391 | 380 | ||
392 | ipath_dbg("QP%d/%d in error state (%d)\n", | 381 | if (qp->state == IB_QPS_ERR) |
393 | qp->ibqp.qp_num, qp->remote_qpn, err); | 382 | goto bail; |
383 | |||
384 | qp->state = IB_QPS_ERR; | ||
394 | 385 | ||
395 | spin_lock(&dev->pending_lock); | 386 | spin_lock(&dev->pending_lock); |
396 | if (!list_empty(&qp->timerwait)) | 387 | if (!list_empty(&qp->timerwait)) |
@@ -399,39 +390,21 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
399 | list_del_init(&qp->piowait); | 390 | list_del_init(&qp->piowait); |
400 | spin_unlock(&dev->pending_lock); | 391 | spin_unlock(&dev->pending_lock); |
401 | 392 | ||
402 | wc.vendor_err = 0; | 393 | /* Schedule the sending tasklet to drain the send work queue. */ |
403 | wc.byte_len = 0; | 394 | if (qp->s_last != qp->s_head) |
404 | wc.imm_data = 0; | 395 | ipath_schedule_send(qp); |
396 | |||
397 | memset(&wc, 0, sizeof(wc)); | ||
405 | wc.qp = &qp->ibqp; | 398 | wc.qp = &qp->ibqp; |
406 | wc.src_qp = 0; | 399 | wc.opcode = IB_WC_RECV; |
407 | wc.wc_flags = 0; | 400 | |
408 | wc.pkey_index = 0; | 401 | if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) { |
409 | wc.slid = 0; | ||
410 | wc.sl = 0; | ||
411 | wc.dlid_path_bits = 0; | ||
412 | wc.port_num = 0; | ||
413 | if (qp->r_wrid_valid) { | ||
414 | qp->r_wrid_valid = 0; | ||
415 | wc.wr_id = qp->r_wr_id; | 402 | wc.wr_id = qp->r_wr_id; |
416 | wc.opcode = IB_WC_RECV; | ||
417 | wc.status = err; | 403 | wc.status = err; |
418 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | 404 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); |
419 | } | 405 | } |
420 | wc.status = IB_WC_WR_FLUSH_ERR; | 406 | wc.status = IB_WC_WR_FLUSH_ERR; |
421 | 407 | ||
422 | while (qp->s_last != qp->s_head) { | ||
423 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
424 | |||
425 | wc.wr_id = wqe->wr.wr_id; | ||
426 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
427 | if (++qp->s_last >= qp->s_size) | ||
428 | qp->s_last = 0; | ||
429 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
430 | } | ||
431 | qp->s_cur = qp->s_tail = qp->s_head; | ||
432 | qp->s_hdrwords = 0; | ||
433 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
434 | |||
435 | if (qp->r_rq.wq) { | 408 | if (qp->r_rq.wq) { |
436 | struct ipath_rwq *wq; | 409 | struct ipath_rwq *wq; |
437 | u32 head; | 410 | u32 head; |
@@ -447,7 +420,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
447 | tail = wq->tail; | 420 | tail = wq->tail; |
448 | if (tail >= qp->r_rq.size) | 421 | if (tail >= qp->r_rq.size) |
449 | tail = 0; | 422 | tail = 0; |
450 | wc.opcode = IB_WC_RECV; | ||
451 | while (tail != head) { | 423 | while (tail != head) { |
452 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; | 424 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; |
453 | if (++tail >= qp->r_rq.size) | 425 | if (++tail >= qp->r_rq.size) |
@@ -460,6 +432,7 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
460 | } else if (qp->ibqp.event_handler) | 432 | } else if (qp->ibqp.event_handler) |
461 | ret = 1; | 433 | ret = 1; |
462 | 434 | ||
435 | bail: | ||
463 | return ret; | 436 | return ret; |
464 | } | 437 | } |
465 | 438 | ||
@@ -478,11 +451,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
478 | struct ipath_ibdev *dev = to_idev(ibqp->device); | 451 | struct ipath_ibdev *dev = to_idev(ibqp->device); |
479 | struct ipath_qp *qp = to_iqp(ibqp); | 452 | struct ipath_qp *qp = to_iqp(ibqp); |
480 | enum ib_qp_state cur_state, new_state; | 453 | enum ib_qp_state cur_state, new_state; |
481 | unsigned long flags; | ||
482 | int lastwqe = 0; | 454 | int lastwqe = 0; |
483 | int ret; | 455 | int ret; |
484 | 456 | ||
485 | spin_lock_irqsave(&qp->s_lock, flags); | 457 | spin_lock_irq(&qp->s_lock); |
486 | 458 | ||
487 | cur_state = attr_mask & IB_QP_CUR_STATE ? | 459 | cur_state = attr_mask & IB_QP_CUR_STATE ? |
488 | attr->cur_qp_state : qp->state; | 460 | attr->cur_qp_state : qp->state; |
@@ -535,16 +507,42 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
535 | 507 | ||
536 | switch (new_state) { | 508 | switch (new_state) { |
537 | case IB_QPS_RESET: | 509 | case IB_QPS_RESET: |
510 | if (qp->state != IB_QPS_RESET) { | ||
511 | qp->state = IB_QPS_RESET; | ||
512 | spin_lock(&dev->pending_lock); | ||
513 | if (!list_empty(&qp->timerwait)) | ||
514 | list_del_init(&qp->timerwait); | ||
515 | if (!list_empty(&qp->piowait)) | ||
516 | list_del_init(&qp->piowait); | ||
517 | spin_unlock(&dev->pending_lock); | ||
518 | qp->s_flags &= ~IPATH_S_ANY_WAIT; | ||
519 | spin_unlock_irq(&qp->s_lock); | ||
520 | /* Stop the sending tasklet */ | ||
521 | tasklet_kill(&qp->s_task); | ||
522 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | ||
523 | spin_lock_irq(&qp->s_lock); | ||
524 | } | ||
538 | ipath_reset_qp(qp, ibqp->qp_type); | 525 | ipath_reset_qp(qp, ibqp->qp_type); |
539 | break; | 526 | break; |
540 | 527 | ||
528 | case IB_QPS_SQD: | ||
529 | qp->s_draining = qp->s_last != qp->s_cur; | ||
530 | qp->state = new_state; | ||
531 | break; | ||
532 | |||
533 | case IB_QPS_SQE: | ||
534 | if (qp->ibqp.qp_type == IB_QPT_RC) | ||
535 | goto inval; | ||
536 | qp->state = new_state; | ||
537 | break; | ||
538 | |||
541 | case IB_QPS_ERR: | 539 | case IB_QPS_ERR: |
542 | lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); | 540 | lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); |
543 | break; | 541 | break; |
544 | 542 | ||
545 | default: | 543 | default: |
544 | qp->state = new_state; | ||
546 | break; | 545 | break; |
547 | |||
548 | } | 546 | } |
549 | 547 | ||
550 | if (attr_mask & IB_QP_PKEY_INDEX) | 548 | if (attr_mask & IB_QP_PKEY_INDEX) |
@@ -597,8 +595,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
597 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | 595 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) |
598 | qp->s_max_rd_atomic = attr->max_rd_atomic; | 596 | qp->s_max_rd_atomic = attr->max_rd_atomic; |
599 | 597 | ||
600 | qp->state = new_state; | 598 | spin_unlock_irq(&qp->s_lock); |
601 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
602 | 599 | ||
603 | if (lastwqe) { | 600 | if (lastwqe) { |
604 | struct ib_event ev; | 601 | struct ib_event ev; |
@@ -612,7 +609,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
612 | goto bail; | 609 | goto bail; |
613 | 610 | ||
614 | inval: | 611 | inval: |
615 | spin_unlock_irqrestore(&qp->s_lock, flags); | 612 | spin_unlock_irq(&qp->s_lock); |
616 | ret = -EINVAL; | 613 | ret = -EINVAL; |
617 | 614 | ||
618 | bail: | 615 | bail: |
@@ -643,7 +640,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
643 | attr->pkey_index = qp->s_pkey_index; | 640 | attr->pkey_index = qp->s_pkey_index; |
644 | attr->alt_pkey_index = 0; | 641 | attr->alt_pkey_index = 0; |
645 | attr->en_sqd_async_notify = 0; | 642 | attr->en_sqd_async_notify = 0; |
646 | attr->sq_draining = 0; | 643 | attr->sq_draining = qp->s_draining; |
647 | attr->max_rd_atomic = qp->s_max_rd_atomic; | 644 | attr->max_rd_atomic = qp->s_max_rd_atomic; |
648 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; | 645 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; |
649 | attr->min_rnr_timer = qp->r_min_rnr_timer; | 646 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
@@ -833,6 +830,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
833 | spin_lock_init(&qp->r_rq.lock); | 830 | spin_lock_init(&qp->r_rq.lock); |
834 | atomic_set(&qp->refcount, 0); | 831 | atomic_set(&qp->refcount, 0); |
835 | init_waitqueue_head(&qp->wait); | 832 | init_waitqueue_head(&qp->wait); |
833 | init_waitqueue_head(&qp->wait_dma); | ||
836 | tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); | 834 | tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); |
837 | INIT_LIST_HEAD(&qp->piowait); | 835 | INIT_LIST_HEAD(&qp->piowait); |
838 | INIT_LIST_HEAD(&qp->timerwait); | 836 | INIT_LIST_HEAD(&qp->timerwait); |
@@ -926,6 +924,7 @@ bail_ip: | |||
926 | else | 924 | else |
927 | vfree(qp->r_rq.wq); | 925 | vfree(qp->r_rq.wq); |
928 | ipath_free_qp(&dev->qp_table, qp); | 926 | ipath_free_qp(&dev->qp_table, qp); |
927 | free_qpn(&dev->qp_table, qp->ibqp.qp_num); | ||
929 | bail_qp: | 928 | bail_qp: |
930 | kfree(qp); | 929 | kfree(qp); |
931 | bail_swq: | 930 | bail_swq: |
@@ -947,41 +946,44 @@ int ipath_destroy_qp(struct ib_qp *ibqp) | |||
947 | { | 946 | { |
948 | struct ipath_qp *qp = to_iqp(ibqp); | 947 | struct ipath_qp *qp = to_iqp(ibqp); |
949 | struct ipath_ibdev *dev = to_idev(ibqp->device); | 948 | struct ipath_ibdev *dev = to_idev(ibqp->device); |
950 | unsigned long flags; | ||
951 | 949 | ||
952 | spin_lock_irqsave(&qp->s_lock, flags); | 950 | /* Make sure HW and driver activity is stopped. */ |
953 | qp->state = IB_QPS_ERR; | 951 | spin_lock_irq(&qp->s_lock); |
954 | spin_unlock_irqrestore(&qp->s_lock, flags); | 952 | if (qp->state != IB_QPS_RESET) { |
955 | spin_lock(&dev->n_qps_lock); | 953 | qp->state = IB_QPS_RESET; |
956 | dev->n_qps_allocated--; | 954 | spin_lock(&dev->pending_lock); |
957 | spin_unlock(&dev->n_qps_lock); | 955 | if (!list_empty(&qp->timerwait)) |
956 | list_del_init(&qp->timerwait); | ||
957 | if (!list_empty(&qp->piowait)) | ||
958 | list_del_init(&qp->piowait); | ||
959 | spin_unlock(&dev->pending_lock); | ||
960 | qp->s_flags &= ~IPATH_S_ANY_WAIT; | ||
961 | spin_unlock_irq(&qp->s_lock); | ||
962 | /* Stop the sending tasklet */ | ||
963 | tasklet_kill(&qp->s_task); | ||
964 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | ||
965 | } else | ||
966 | spin_unlock_irq(&qp->s_lock); | ||
958 | 967 | ||
959 | /* Stop the sending tasklet. */ | 968 | ipath_free_qp(&dev->qp_table, qp); |
960 | tasklet_kill(&qp->s_task); | ||
961 | 969 | ||
962 | if (qp->s_tx) { | 970 | if (qp->s_tx) { |
963 | atomic_dec(&qp->refcount); | 971 | atomic_dec(&qp->refcount); |
964 | if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) | 972 | if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) |
965 | kfree(qp->s_tx->txreq.map_addr); | 973 | kfree(qp->s_tx->txreq.map_addr); |
974 | spin_lock_irq(&dev->pending_lock); | ||
975 | list_add(&qp->s_tx->txreq.list, &dev->txreq_free); | ||
976 | spin_unlock_irq(&dev->pending_lock); | ||
977 | qp->s_tx = NULL; | ||
966 | } | 978 | } |
967 | 979 | ||
968 | /* Make sure the QP isn't on the timeout list. */ | 980 | wait_event(qp->wait, !atomic_read(&qp->refcount)); |
969 | spin_lock_irqsave(&dev->pending_lock, flags); | ||
970 | if (!list_empty(&qp->timerwait)) | ||
971 | list_del_init(&qp->timerwait); | ||
972 | if (!list_empty(&qp->piowait)) | ||
973 | list_del_init(&qp->piowait); | ||
974 | if (qp->s_tx) | ||
975 | list_add(&qp->s_tx->txreq.list, &dev->txreq_free); | ||
976 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
977 | 981 | ||
978 | /* | 982 | /* all user's cleaned up, mark it available */ |
979 | * Make sure that the QP is not in the QPN table so receive | 983 | free_qpn(&dev->qp_table, qp->ibqp.qp_num); |
980 | * interrupts will discard packets for this QP. XXX Also remove QP | 984 | spin_lock(&dev->n_qps_lock); |
981 | * from multicast table. | 985 | dev->n_qps_allocated--; |
982 | */ | 986 | spin_unlock(&dev->n_qps_lock); |
983 | if (atomic_read(&qp->refcount) != 0) | ||
984 | ipath_free_qp(&dev->qp_table, qp); | ||
985 | 987 | ||
986 | if (qp->ip) | 988 | if (qp->ip) |
987 | kref_put(&qp->ip->ref, ipath_release_mmap_info); | 989 | kref_put(&qp->ip->ref, ipath_release_mmap_info); |
@@ -1026,48 +1028,6 @@ bail: | |||
1026 | } | 1028 | } |
1027 | 1029 | ||
1028 | /** | 1030 | /** |
1029 | * ipath_sqerror_qp - put a QP's send queue into an error state | ||
1030 | * @qp: QP who's send queue will be put into an error state | ||
1031 | * @wc: the WC responsible for putting the QP in this state | ||
1032 | * | ||
1033 | * Flushes the send work queue. | ||
1034 | * The QP s_lock should be held and interrupts disabled. | ||
1035 | */ | ||
1036 | |||
1037 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | ||
1038 | { | ||
1039 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
1040 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
1041 | |||
1042 | ipath_dbg("Send queue error on QP%d/%d: err: %d\n", | ||
1043 | qp->ibqp.qp_num, qp->remote_qpn, wc->status); | ||
1044 | |||
1045 | spin_lock(&dev->pending_lock); | ||
1046 | if (!list_empty(&qp->timerwait)) | ||
1047 | list_del_init(&qp->timerwait); | ||
1048 | if (!list_empty(&qp->piowait)) | ||
1049 | list_del_init(&qp->piowait); | ||
1050 | spin_unlock(&dev->pending_lock); | ||
1051 | |||
1052 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); | ||
1053 | if (++qp->s_last >= qp->s_size) | ||
1054 | qp->s_last = 0; | ||
1055 | |||
1056 | wc->status = IB_WC_WR_FLUSH_ERR; | ||
1057 | |||
1058 | while (qp->s_last != qp->s_head) { | ||
1059 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
1060 | wc->wr_id = wqe->wr.wr_id; | ||
1061 | wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
1062 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); | ||
1063 | if (++qp->s_last >= qp->s_size) | ||
1064 | qp->s_last = 0; | ||
1065 | } | ||
1066 | qp->s_cur = qp->s_tail = qp->s_head; | ||
1067 | qp->state = IB_QPS_SQE; | ||
1068 | } | ||
1069 | |||
1070 | /** | ||
1071 | * ipath_get_credit - flush the send work queue of a QP | 1031 | * ipath_get_credit - flush the send work queue of a QP |
1072 | * @qp: the qp who's send work queue to flush | 1032 | * @qp: the qp who's send work queue to flush |
1073 | * @aeth: the Acknowledge Extended Transport Header | 1033 | * @aeth: the Acknowledge Extended Transport Header |
@@ -1093,9 +1053,10 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth) | |||
1093 | } | 1053 | } |
1094 | 1054 | ||
1095 | /* Restart sending if it was blocked due to lack of credits. */ | 1055 | /* Restart sending if it was blocked due to lack of credits. */ |
1096 | if (qp->s_cur != qp->s_head && | 1056 | if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) && |
1057 | qp->s_cur != qp->s_head && | ||
1097 | (qp->s_lsn == (u32) -1 || | 1058 | (qp->s_lsn == (u32) -1 || |
1098 | ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, | 1059 | ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, |
1099 | qp->s_lsn + 1) <= 0)) | 1060 | qp->s_lsn + 1) <= 0)) |
1100 | tasklet_hi_schedule(&qp->s_task); | 1061 | ipath_schedule_send(qp); |
1101 | } | 1062 | } |