diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ruc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 63 |
1 files changed, 33 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index e86cb171872e..d9c2a9b15d86 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -202,6 +202,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
202 | wq->tail = tail; | 202 | wq->tail = tail; |
203 | 203 | ||
204 | ret = 1; | 204 | ret = 1; |
205 | qp->r_wrid_valid = 1; | ||
205 | if (handler) { | 206 | if (handler) { |
206 | u32 n; | 207 | u32 n; |
207 | 208 | ||
@@ -229,7 +230,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
229 | } | 230 | } |
230 | } | 231 | } |
231 | spin_unlock_irqrestore(&rq->lock, flags); | 232 | spin_unlock_irqrestore(&rq->lock, flags); |
232 | qp->r_wrid_valid = 1; | ||
233 | 233 | ||
234 | bail: | 234 | bail: |
235 | return ret; | 235 | return ret; |
@@ -255,6 +255,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) | |||
255 | unsigned long flags; | 255 | unsigned long flags; |
256 | struct ib_wc wc; | 256 | struct ib_wc wc; |
257 | u64 sdata; | 257 | u64 sdata; |
258 | atomic64_t *maddr; | ||
258 | 259 | ||
259 | qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); | 260 | qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); |
260 | if (!qp) { | 261 | if (!qp) { |
@@ -265,7 +266,8 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) | |||
265 | again: | 266 | again: |
266 | spin_lock_irqsave(&sqp->s_lock, flags); | 267 | spin_lock_irqsave(&sqp->s_lock, flags); |
267 | 268 | ||
268 | if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) { | 269 | if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) || |
270 | qp->s_rnr_timeout) { | ||
269 | spin_unlock_irqrestore(&sqp->s_lock, flags); | 271 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
270 | goto done; | 272 | goto done; |
271 | } | 273 | } |
@@ -310,7 +312,7 @@ again: | |||
310 | sqp->s_rnr_retry--; | 312 | sqp->s_rnr_retry--; |
311 | dev->n_rnr_naks++; | 313 | dev->n_rnr_naks++; |
312 | sqp->s_rnr_timeout = | 314 | sqp->s_rnr_timeout = |
313 | ib_ipath_rnr_table[sqp->r_min_rnr_timer]; | 315 | ib_ipath_rnr_table[qp->r_min_rnr_timer]; |
314 | ipath_insert_rnr_queue(sqp); | 316 | ipath_insert_rnr_queue(sqp); |
315 | goto done; | 317 | goto done; |
316 | } | 318 | } |
@@ -343,20 +345,22 @@ again: | |||
343 | wc.sl = sqp->remote_ah_attr.sl; | 345 | wc.sl = sqp->remote_ah_attr.sl; |
344 | wc.dlid_path_bits = 0; | 346 | wc.dlid_path_bits = 0; |
345 | wc.port_num = 0; | 347 | wc.port_num = 0; |
348 | spin_lock_irqsave(&sqp->s_lock, flags); | ||
346 | ipath_sqerror_qp(sqp, &wc); | 349 | ipath_sqerror_qp(sqp, &wc); |
350 | spin_unlock_irqrestore(&sqp->s_lock, flags); | ||
347 | goto done; | 351 | goto done; |
348 | } | 352 | } |
349 | break; | 353 | break; |
350 | 354 | ||
351 | case IB_WR_RDMA_READ: | 355 | case IB_WR_RDMA_READ: |
356 | if (unlikely(!(qp->qp_access_flags & | ||
357 | IB_ACCESS_REMOTE_READ))) | ||
358 | goto acc_err; | ||
352 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, | 359 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, |
353 | wqe->wr.wr.rdma.remote_addr, | 360 | wqe->wr.wr.rdma.remote_addr, |
354 | wqe->wr.wr.rdma.rkey, | 361 | wqe->wr.wr.rdma.rkey, |
355 | IB_ACCESS_REMOTE_READ))) | 362 | IB_ACCESS_REMOTE_READ))) |
356 | goto acc_err; | 363 | goto acc_err; |
357 | if (unlikely(!(qp->qp_access_flags & | ||
358 | IB_ACCESS_REMOTE_READ))) | ||
359 | goto acc_err; | ||
360 | qp->r_sge.sge = wqe->sg_list[0]; | 364 | qp->r_sge.sge = wqe->sg_list[0]; |
361 | qp->r_sge.sg_list = wqe->sg_list + 1; | 365 | qp->r_sge.sg_list = wqe->sg_list + 1; |
362 | qp->r_sge.num_sge = wqe->wr.num_sge; | 366 | qp->r_sge.num_sge = wqe->wr.num_sge; |
@@ -364,22 +368,22 @@ again: | |||
364 | 368 | ||
365 | case IB_WR_ATOMIC_CMP_AND_SWP: | 369 | case IB_WR_ATOMIC_CMP_AND_SWP: |
366 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 370 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
371 | if (unlikely(!(qp->qp_access_flags & | ||
372 | IB_ACCESS_REMOTE_ATOMIC))) | ||
373 | goto acc_err; | ||
367 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), | 374 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), |
368 | wqe->wr.wr.rdma.remote_addr, | 375 | wqe->wr.wr.atomic.remote_addr, |
369 | wqe->wr.wr.rdma.rkey, | 376 | wqe->wr.wr.atomic.rkey, |
370 | IB_ACCESS_REMOTE_ATOMIC))) | 377 | IB_ACCESS_REMOTE_ATOMIC))) |
371 | goto acc_err; | 378 | goto acc_err; |
372 | /* Perform atomic OP and save result. */ | 379 | /* Perform atomic OP and save result. */ |
373 | sdata = wqe->wr.wr.atomic.swap; | 380 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; |
374 | spin_lock_irqsave(&dev->pending_lock, flags); | 381 | sdata = wqe->wr.wr.atomic.compare_add; |
375 | qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; | 382 | *(u64 *) sqp->s_sge.sge.vaddr = |
376 | if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | 383 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
377 | *(u64 *) qp->r_sge.sge.vaddr = | 384 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
378 | qp->r_atomic_data + sdata; | 385 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
379 | else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add) | 386 | sdata, wqe->wr.wr.atomic.swap); |
380 | *(u64 *) qp->r_sge.sge.vaddr = sdata; | ||
381 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
382 | *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data; | ||
383 | goto send_comp; | 387 | goto send_comp; |
384 | 388 | ||
385 | default: | 389 | default: |
@@ -440,7 +444,7 @@ again: | |||
440 | send_comp: | 444 | send_comp: |
441 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; | 445 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; |
442 | 446 | ||
443 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) || | 447 | if (!(sqp->s_flags & IPATH_S_SIGNAL_REQ_WR) || |
444 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | 448 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { |
445 | wc.wr_id = wqe->wr.wr_id; | 449 | wc.wr_id = wqe->wr.wr_id; |
446 | wc.status = IB_WC_SUCCESS; | 450 | wc.status = IB_WC_SUCCESS; |
@@ -502,7 +506,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
502 | * We clear the tasklet flag now since we are committing to return | 506 | * We clear the tasklet flag now since we are committing to return |
503 | * from the tasklet function. | 507 | * from the tasklet function. |
504 | */ | 508 | */ |
505 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 509 | clear_bit(IPATH_S_BUSY, &qp->s_busy); |
506 | tasklet_unlock(&qp->s_task); | 510 | tasklet_unlock(&qp->s_task); |
507 | want_buffer(dev->dd); | 511 | want_buffer(dev->dd); |
508 | dev->n_piowait++; | 512 | dev->n_piowait++; |
@@ -541,6 +545,9 @@ int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
541 | wr->sg_list[0].addr & (sizeof(u64) - 1))) { | 545 | wr->sg_list[0].addr & (sizeof(u64) - 1))) { |
542 | ret = -EINVAL; | 546 | ret = -EINVAL; |
543 | goto bail; | 547 | goto bail; |
548 | } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { | ||
549 | ret = -EINVAL; | ||
550 | goto bail; | ||
544 | } | 551 | } |
545 | /* IB spec says that num_sge == 0 is OK. */ | 552 | /* IB spec says that num_sge == 0 is OK. */ |
546 | if (wr->num_sge > qp->s_max_sge) { | 553 | if (wr->num_sge > qp->s_max_sge) { |
@@ -647,7 +654,7 @@ void ipath_do_ruc_send(unsigned long data) | |||
647 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 654 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); |
648 | struct ipath_other_headers *ohdr; | 655 | struct ipath_other_headers *ohdr; |
649 | 656 | ||
650 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) | 657 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy)) |
651 | goto bail; | 658 | goto bail; |
652 | 659 | ||
653 | if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { | 660 | if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { |
@@ -683,19 +690,15 @@ again: | |||
683 | */ | 690 | */ |
684 | spin_lock_irqsave(&qp->s_lock, flags); | 691 | spin_lock_irqsave(&qp->s_lock, flags); |
685 | 692 | ||
686 | /* Sending responses has higher priority over sending requests. */ | 693 | if (!((qp->ibqp.qp_type == IB_QPT_RC) ? |
687 | if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE && | 694 | ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : |
688 | (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0) | 695 | ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { |
689 | bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK; | ||
690 | else if (!((qp->ibqp.qp_type == IB_QPT_RC) ? | ||
691 | ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : | ||
692 | ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { | ||
693 | /* | 696 | /* |
694 | * Clear the busy bit before unlocking to avoid races with | 697 | * Clear the busy bit before unlocking to avoid races with |
695 | * adding new work queue items and then failing to process | 698 | * adding new work queue items and then failing to process |
696 | * them. | 699 | * them. |
697 | */ | 700 | */ |
698 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 701 | clear_bit(IPATH_S_BUSY, &qp->s_busy); |
699 | spin_unlock_irqrestore(&qp->s_lock, flags); | 702 | spin_unlock_irqrestore(&qp->s_lock, flags); |
700 | goto bail; | 703 | goto bail; |
701 | } | 704 | } |
@@ -728,7 +731,7 @@ again: | |||
728 | goto again; | 731 | goto again; |
729 | 732 | ||
730 | clear: | 733 | clear: |
731 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 734 | clear_bit(IPATH_S_BUSY, &qp->s_busy); |
732 | bail: | 735 | bail: |
733 | return; | 736 | return; |
734 | } | 737 | } |