diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-08-25 14:24:32 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-22 18:22:31 -0400 |
commit | 34b2aafea38efdf02cd8107a6e1057e2a297c447 (patch) | |
tree | fc800510f947696156df70cf6608f8283bab868c /drivers/infiniband/hw/ipath/ipath_ruc.c | |
parent | b1c1b6a30eac88665a35a207cc5e6233090b9d65 (diff) |
IB/ipath: simplify layering code
A lot of ipath layer code was only called in one place. Now that the
ipath_core and ib_ipath drivers are merged, it's more sensible to simply
inline the simple stuff that the layer code was doing.
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ruc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index dd09420d677d..5c1da2d25e03 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -470,6 +470,15 @@ done: | |||
470 | wake_up(&qp->wait); | 470 | wake_up(&qp->wait); |
471 | } | 471 | } |
472 | 472 | ||
473 | static int want_buffer(struct ipath_devdata *dd) | ||
474 | { | ||
475 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | ||
476 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | ||
477 | dd->ipath_sendctrl); | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
473 | /** | 482 | /** |
474 | * ipath_no_bufs_available - tell the layer driver we need buffers | 483 | * ipath_no_bufs_available - tell the layer driver we need buffers |
475 | * @qp: the QP that caused the problem | 484 | * @qp: the QP that caused the problem |
@@ -486,7 +495,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
486 | list_add_tail(&qp->piowait, &dev->piowait); | 495 | list_add_tail(&qp->piowait, &dev->piowait); |
487 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 496 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
488 | /* | 497 | /* |
489 | * Note that as soon as ipath_layer_want_buffer() is called and | 498 | * Note that as soon as want_buffer() is called and |
490 | * possibly before it returns, ipath_ib_piobufavail() | 499 | * possibly before it returns, ipath_ib_piobufavail() |
491 | * could be called. If we are still in the tasklet function, | 500 | * could be called. If we are still in the tasklet function, |
492 | * tasklet_hi_schedule() will not call us until the next time | 501 | * tasklet_hi_schedule() will not call us until the next time |
@@ -496,7 +505,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
496 | */ | 505 | */ |
497 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 506 | clear_bit(IPATH_S_BUSY, &qp->s_flags); |
498 | tasklet_unlock(&qp->s_task); | 507 | tasklet_unlock(&qp->s_task); |
499 | ipath_layer_want_buffer(dev->dd); | 508 | want_buffer(dev->dd); |
500 | dev->n_piowait++; | 509 | dev->n_piowait++; |
501 | } | 510 | } |
502 | 511 | ||
@@ -611,7 +620,7 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, | |||
611 | hdr->hop_limit = grh->hop_limit; | 620 | hdr->hop_limit = grh->hop_limit; |
612 | /* The SGID is 32-bit aligned. */ | 621 | /* The SGID is 32-bit aligned. */ |
613 | hdr->sgid.global.subnet_prefix = dev->gid_prefix; | 622 | hdr->sgid.global.subnet_prefix = dev->gid_prefix; |
614 | hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd); | 623 | hdr->sgid.global.interface_id = dev->dd->ipath_guid; |
615 | hdr->dgid = grh->dgid; | 624 | hdr->dgid = grh->dgid; |
616 | 625 | ||
617 | /* GRH header size in 32-bit words. */ | 626 | /* GRH header size in 32-bit words. */ |
@@ -643,8 +652,7 @@ void ipath_do_ruc_send(unsigned long data) | |||
643 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) | 652 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) |
644 | goto bail; | 653 | goto bail; |
645 | 654 | ||
646 | if (unlikely(qp->remote_ah_attr.dlid == | 655 | if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { |
647 | ipath_layer_get_lid(dev->dd))) { | ||
648 | ipath_ruc_loopback(qp); | 656 | ipath_ruc_loopback(qp); |
649 | goto clear; | 657 | goto clear; |
650 | } | 658 | } |
@@ -711,8 +719,8 @@ again: | |||
711 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | 719 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
712 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + | 720 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + |
713 | SIZE_OF_CRC); | 721 | SIZE_OF_CRC); |
714 | qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); | 722 | qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); |
715 | bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | 723 | bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); |
716 | bth0 |= extra_bytes << 20; | 724 | bth0 |= extra_bytes << 20; |
717 | ohdr->bth[0] = cpu_to_be32(bth0); | 725 | ohdr->bth[0] = cpu_to_be32(bth0); |
718 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | 726 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); |