diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 61 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 15 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 44 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 145 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 7 |
8 files changed, 170 insertions, 119 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f673c461e30b..1bc2678c2fae 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -483,13 +483,20 @@ out: | |||
483 | return err; | 483 | return err; |
484 | } | 484 | } |
485 | 485 | ||
486 | static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) | 486 | static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, |
487 | struct mthca_qp_path *path) | ||
487 | { | 488 | { |
488 | path->g_mylmc = ah->src_path_bits & 0x7f; | 489 | path->g_mylmc = ah->src_path_bits & 0x7f; |
489 | path->rlid = cpu_to_be16(ah->dlid); | 490 | path->rlid = cpu_to_be16(ah->dlid); |
490 | path->static_rate = !!ah->static_rate; | 491 | path->static_rate = !!ah->static_rate; |
491 | 492 | ||
492 | if (ah->ah_flags & IB_AH_GRH) { | 493 | if (ah->ah_flags & IB_AH_GRH) { |
494 | if (ah->grh.sgid_index >= dev->limits.gid_table_len) { | ||
495 | mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", | ||
496 | ah->grh.sgid_index, dev->limits.gid_table_len-1); | ||
497 | return -1; | ||
498 | } | ||
499 | |||
493 | path->g_mylmc |= 1 << 7; | 500 | path->g_mylmc |= 1 << 7; |
494 | path->mgid_index = ah->grh.sgid_index; | 501 | path->mgid_index = ah->grh.sgid_index; |
495 | path->hop_limit = ah->grh.hop_limit; | 502 | path->hop_limit = ah->grh.hop_limit; |
@@ -500,6 +507,8 @@ static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) | |||
500 | memcpy(path->rgid, ah->grh.dgid.raw, 16); | 507 | memcpy(path->rgid, ah->grh.dgid.raw, 16); |
501 | } else | 508 | } else |
502 | path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); | 509 | path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); |
510 | |||
511 | return 0; | ||
503 | } | 512 | } |
504 | 513 | ||
505 | int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | 514 | int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) |
@@ -592,8 +601,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
592 | 601 | ||
593 | if (qp->transport == MLX || qp->transport == UD) | 602 | if (qp->transport == MLX || qp->transport == UD) |
594 | qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; | 603 | qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; |
595 | else if (attr_mask & IB_QP_PATH_MTU) | 604 | else if (attr_mask & IB_QP_PATH_MTU) { |
605 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { | ||
606 | mthca_dbg(dev, "path MTU (%u) is invalid\n", | ||
607 | attr->path_mtu); | ||
608 | return -EINVAL; | ||
609 | } | ||
596 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; | 610 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; |
611 | } | ||
597 | 612 | ||
598 | if (mthca_is_memfree(dev)) { | 613 | if (mthca_is_memfree(dev)) { |
599 | if (qp->rq.max) | 614 | if (qp->rq.max) |
@@ -642,7 +657,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
642 | } | 657 | } |
643 | 658 | ||
644 | if (attr_mask & IB_QP_AV) { | 659 | if (attr_mask & IB_QP_AV) { |
645 | mthca_path_set(&attr->ah_attr, &qp_context->pri_path); | 660 | if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path)) |
661 | return -EINVAL; | ||
662 | |||
646 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); | 663 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); |
647 | } | 664 | } |
648 | 665 | ||
@@ -664,7 +681,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
664 | return -EINVAL; | 681 | return -EINVAL; |
665 | } | 682 | } |
666 | 683 | ||
667 | mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path); | 684 | if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path)) |
685 | return -EINVAL; | ||
686 | |||
668 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | | 687 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | |
669 | attr->alt_port_num << 24); | 688 | attr->alt_port_num << 24); |
670 | qp_context->alt_path.ackto = attr->alt_timeout << 3; | 689 | qp_context->alt_path.ackto = attr->alt_timeout << 3; |
@@ -758,21 +777,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
758 | 777 | ||
759 | err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, | 778 | err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, |
760 | mailbox, sqd_event, &status); | 779 | mailbox, sqd_event, &status); |
780 | if (err) | ||
781 | goto out; | ||
761 | if (status) { | 782 | if (status) { |
762 | mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", | 783 | mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", |
763 | cur_state, new_state, status); | 784 | cur_state, new_state, status); |
764 | err = -EINVAL; | 785 | err = -EINVAL; |
786 | goto out; | ||
765 | } | 787 | } |
766 | 788 | ||
767 | if (!err) { | 789 | qp->state = new_state; |
768 | qp->state = new_state; | 790 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
769 | if (attr_mask & IB_QP_ACCESS_FLAGS) | 791 | qp->atomic_rd_en = attr->qp_access_flags; |
770 | qp->atomic_rd_en = attr->qp_access_flags; | 792 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
771 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | 793 | qp->resp_depth = attr->max_dest_rd_atomic; |
772 | qp->resp_depth = attr->max_dest_rd_atomic; | ||
773 | } | ||
774 | |||
775 | mthca_free_mailbox(dev, mailbox); | ||
776 | 794 | ||
777 | if (is_sqp(dev, qp)) | 795 | if (is_sqp(dev, qp)) |
778 | store_attrs(to_msqp(qp), attr, attr_mask); | 796 | store_attrs(to_msqp(qp), attr, attr_mask); |
@@ -797,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
797 | * If we moved a kernel QP to RESET, clean up all old CQ | 815 | * If we moved a kernel QP to RESET, clean up all old CQ |
798 | * entries and reinitialize the QP. | 816 | * entries and reinitialize the QP. |
799 | */ | 817 | */ |
800 | if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 818 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
801 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 819 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, |
802 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 820 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
803 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 821 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
@@ -816,6 +834,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
816 | } | 834 | } |
817 | } | 835 | } |
818 | 836 | ||
837 | out: | ||
838 | mthca_free_mailbox(dev, mailbox); | ||
819 | return err; | 839 | return err; |
820 | } | 840 | } |
821 | 841 | ||
@@ -1177,10 +1197,6 @@ int mthca_alloc_qp(struct mthca_dev *dev, | |||
1177 | { | 1197 | { |
1178 | int err; | 1198 | int err; |
1179 | 1199 | ||
1180 | err = mthca_set_qp_size(dev, cap, pd, qp); | ||
1181 | if (err) | ||
1182 | return err; | ||
1183 | |||
1184 | switch (type) { | 1200 | switch (type) { |
1185 | case IB_QPT_RC: qp->transport = RC; break; | 1201 | case IB_QPT_RC: qp->transport = RC; break; |
1186 | case IB_QPT_UC: qp->transport = UC; break; | 1202 | case IB_QPT_UC: qp->transport = UC; break; |
@@ -1188,6 +1204,10 @@ int mthca_alloc_qp(struct mthca_dev *dev, | |||
1188 | default: return -EINVAL; | 1204 | default: return -EINVAL; |
1189 | } | 1205 | } |
1190 | 1206 | ||
1207 | err = mthca_set_qp_size(dev, cap, pd, qp); | ||
1208 | if (err) | ||
1209 | return err; | ||
1210 | |||
1191 | qp->qpn = mthca_alloc(&dev->qp_table.alloc); | 1211 | qp->qpn = mthca_alloc(&dev->qp_table.alloc); |
1192 | if (qp->qpn == -1) | 1212 | if (qp->qpn == -1) |
1193 | return -ENOMEM; | 1213 | return -ENOMEM; |
@@ -1220,6 +1240,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1220 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; | 1240 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; |
1221 | int err; | 1241 | int err; |
1222 | 1242 | ||
1243 | sqp->qp.transport = MLX; | ||
1223 | err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); | 1244 | err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); |
1224 | if (err) | 1245 | if (err) |
1225 | return err; | 1246 | return err; |
@@ -1980,8 +2001,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1980 | wmb(); | 2001 | wmb(); |
1981 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 2002 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1982 | cpu_to_be32(MTHCA_NEXT_DBD | size | | 2003 | cpu_to_be32(MTHCA_NEXT_DBD | size | |
1983 | ((wr->send_flags & IB_SEND_FENCE) ? | 2004 | ((wr->send_flags & IB_SEND_FENCE) ? |
1984 | MTHCA_NEXT_FENCE : 0)); | 2005 | MTHCA_NEXT_FENCE : 0)); |
1985 | 2006 | ||
1986 | if (!size0) { | 2007 | if (!size0) { |
1987 | size0 = size; | 2008 | size0 = size; |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 47a6a754a591..0cfd15802217 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -205,6 +205,10 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
205 | ds = max(64UL, | 205 | ds = max(64UL, |
206 | roundup_pow_of_two(sizeof (struct mthca_next_seg) + | 206 | roundup_pow_of_two(sizeof (struct mthca_next_seg) + |
207 | srq->max_gs * sizeof (struct mthca_data_seg))); | 207 | srq->max_gs * sizeof (struct mthca_data_seg))); |
208 | |||
209 | if (ds > dev->limits.max_desc_sz) | ||
210 | return -EINVAL; | ||
211 | |||
208 | srq->wqe_shift = long_log2(ds); | 212 | srq->wqe_shift = long_log2(ds); |
209 | 213 | ||
210 | srq->srqn = mthca_alloc(&dev->srq_table.alloc); | 214 | srq->srqn = mthca_alloc(&dev->srq_table.alloc); |
@@ -354,6 +358,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
354 | return -EINVAL; | 358 | return -EINVAL; |
355 | 359 | ||
356 | if (attr_mask & IB_SRQ_LIMIT) { | 360 | if (attr_mask & IB_SRQ_LIMIT) { |
361 | if (attr->srq_limit > srq->max) | ||
362 | return -EINVAL; | ||
357 | ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); | 363 | ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); |
358 | if (ret) | 364 | if (ret) |
359 | return ret; | 365 | return ret; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 1251f86ec856..b640107fb732 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -72,13 +72,14 @@ enum { | |||
72 | IPOIB_MAX_MCAST_QUEUE = 3, | 72 | IPOIB_MAX_MCAST_QUEUE = 3, |
73 | 73 | ||
74 | IPOIB_FLAG_OPER_UP = 0, | 74 | IPOIB_FLAG_OPER_UP = 0, |
75 | IPOIB_FLAG_ADMIN_UP = 1, | 75 | IPOIB_FLAG_INITIALIZED = 1, |
76 | IPOIB_PKEY_ASSIGNED = 2, | 76 | IPOIB_FLAG_ADMIN_UP = 2, |
77 | IPOIB_PKEY_STOP = 3, | 77 | IPOIB_PKEY_ASSIGNED = 3, |
78 | IPOIB_FLAG_SUBINTERFACE = 4, | 78 | IPOIB_PKEY_STOP = 4, |
79 | IPOIB_MCAST_RUN = 5, | 79 | IPOIB_FLAG_SUBINTERFACE = 5, |
80 | IPOIB_STOP_REAPER = 6, | 80 | IPOIB_MCAST_RUN = 6, |
81 | IPOIB_MCAST_STARTED = 7, | 81 | IPOIB_STOP_REAPER = 7, |
82 | IPOIB_MCAST_STARTED = 8, | ||
82 | 83 | ||
83 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 84 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
84 | 85 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1f5a05f2f36..ed65202878d8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -423,13 +423,33 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
423 | clear_bit(IPOIB_STOP_REAPER, &priv->flags); | 423 | clear_bit(IPOIB_STOP_REAPER, &priv->flags); |
424 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); | 424 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); |
425 | 425 | ||
426 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | ||
427 | |||
426 | return 0; | 428 | return 0; |
427 | } | 429 | } |
428 | 430 | ||
431 | static void ipoib_pkey_dev_check_presence(struct net_device *dev) | ||
432 | { | ||
433 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
434 | u16 pkey_index = 0; | ||
435 | |||
436 | if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) | ||
437 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
438 | else | ||
439 | set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
440 | } | ||
441 | |||
429 | int ipoib_ib_dev_up(struct net_device *dev) | 442 | int ipoib_ib_dev_up(struct net_device *dev) |
430 | { | 443 | { |
431 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 444 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
432 | 445 | ||
446 | ipoib_pkey_dev_check_presence(dev); | ||
447 | |||
448 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { | ||
449 | ipoib_dbg(priv, "PKEY is not assigned.\n"); | ||
450 | return 0; | ||
451 | } | ||
452 | |||
433 | set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); | 453 | set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); |
434 | 454 | ||
435 | return ipoib_mcast_start_thread(dev); | 455 | return ipoib_mcast_start_thread(dev); |
@@ -483,6 +503,8 @@ int ipoib_ib_dev_stop(struct net_device *dev) | |||
483 | struct ipoib_tx_buf *tx_req; | 503 | struct ipoib_tx_buf *tx_req; |
484 | int i; | 504 | int i; |
485 | 505 | ||
506 | clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | ||
507 | |||
486 | /* | 508 | /* |
487 | * Move our QP to the error state and then reinitialize in | 509 | * Move our QP to the error state and then reinitialize in |
488 | * when all work requests have completed or have been flushed. | 510 | * when all work requests have completed or have been flushed. |
@@ -587,8 +609,15 @@ void ipoib_ib_dev_flush(void *_dev) | |||
587 | struct net_device *dev = (struct net_device *)_dev; | 609 | struct net_device *dev = (struct net_device *)_dev; |
588 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; | 610 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; |
589 | 611 | ||
590 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | 612 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { |
613 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | ||
618 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); | ||
591 | return; | 619 | return; |
620 | } | ||
592 | 621 | ||
593 | ipoib_dbg(priv, "flushing\n"); | 622 | ipoib_dbg(priv, "flushing\n"); |
594 | 623 | ||
@@ -605,7 +634,7 @@ void ipoib_ib_dev_flush(void *_dev) | |||
605 | 634 | ||
606 | /* Flush any child interfaces too */ | 635 | /* Flush any child interfaces too */ |
607 | list_for_each_entry(cpriv, &priv->child_intfs, list) | 636 | list_for_each_entry(cpriv, &priv->child_intfs, list) |
608 | ipoib_ib_dev_flush(&cpriv->dev); | 637 | ipoib_ib_dev_flush(cpriv->dev); |
609 | 638 | ||
610 | mutex_unlock(&priv->vlan_mutex); | 639 | mutex_unlock(&priv->vlan_mutex); |
611 | } | 640 | } |
@@ -632,17 +661,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
632 | * change async notification is available. | 661 | * change async notification is available. |
633 | */ | 662 | */ |
634 | 663 | ||
635 | static void ipoib_pkey_dev_check_presence(struct net_device *dev) | ||
636 | { | ||
637 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
638 | u16 pkey_index = 0; | ||
639 | |||
640 | if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) | ||
641 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
642 | else | ||
643 | set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
644 | } | ||
645 | |||
646 | void ipoib_pkey_poll(void *dev_ptr) | 664 | void ipoib_pkey_poll(void *dev_ptr) |
647 | { | 665 | { |
648 | struct net_device *dev = dev_ptr; | 666 | struct net_device *dev = dev_ptr; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 37da8d3dc388..53a32f65788d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -736,6 +736,11 @@ static void ipoib_set_mcast_list(struct net_device *dev) | |||
736 | { | 736 | { |
737 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 737 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
738 | 738 | ||
739 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { | ||
740 | ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); | ||
741 | return; | ||
742 | } | ||
743 | |||
739 | queue_work(ipoib_workqueue, &priv->restart_task); | 744 | queue_work(ipoib_workqueue, &priv->restart_task); |
740 | } | 745 | } |
741 | 746 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 18d2f53ec34c..5f0388027b25 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
@@ -251,10 +251,12 @@ void ipoib_event(struct ib_event_handler *handler, | |||
251 | struct ipoib_dev_priv *priv = | 251 | struct ipoib_dev_priv *priv = |
252 | container_of(handler, struct ipoib_dev_priv, event_handler); | 252 | container_of(handler, struct ipoib_dev_priv, event_handler); |
253 | 253 | ||
254 | if (record->event == IB_EVENT_PORT_ACTIVE || | 254 | if (record->event == IB_EVENT_PORT_ERR || |
255 | record->event == IB_EVENT_PKEY_CHANGE || | ||
256 | record->event == IB_EVENT_PORT_ACTIVE || | ||
255 | record->event == IB_EVENT_LID_CHANGE || | 257 | record->event == IB_EVENT_LID_CHANGE || |
256 | record->event == IB_EVENT_SM_CHANGE) { | 258 | record->event == IB_EVENT_SM_CHANGE) { |
257 | ipoib_dbg(priv, "Port active event\n"); | 259 | ipoib_dbg(priv, "Port state change event\n"); |
258 | queue_work(ipoib_workqueue, &priv->flush_task); | 260 | queue_work(ipoib_workqueue, &priv->flush_task); |
259 | } | 261 | } |
260 | } | 262 | } |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index a13dcdf90a4f..61924cc30e55 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -503,8 +503,10 @@ err: | |||
503 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | 503 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, |
504 | struct srp_request *req) | 504 | struct srp_request *req) |
505 | { | 505 | { |
506 | struct scatterlist *scat; | ||
506 | struct srp_cmd *cmd = req->cmd->buf; | 507 | struct srp_cmd *cmd = req->cmd->buf; |
507 | int len; | 508 | int len, nents, count; |
509 | int i; | ||
508 | u8 fmt; | 510 | u8 fmt; |
509 | 511 | ||
510 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) | 512 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) |
@@ -517,82 +519,66 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
517 | return -EINVAL; | 519 | return -EINVAL; |
518 | } | 520 | } |
519 | 521 | ||
520 | if (scmnd->use_sg) { | 522 | /* |
521 | struct scatterlist *scat = scmnd->request_buffer; | 523 | * This handling of non-SG commands can be killed when the |
522 | int n; | 524 | * SCSI midlayer no longer generates non-SG commands. |
523 | int i; | 525 | */ |
524 | 526 | if (likely(scmnd->use_sg)) { | |
525 | n = dma_map_sg(target->srp_host->dev->dma_device, | 527 | nents = scmnd->use_sg; |
526 | scat, scmnd->use_sg, scmnd->sc_data_direction); | 528 | scat = scmnd->request_buffer; |
529 | } else { | ||
530 | nents = 1; | ||
531 | scat = &req->fake_sg; | ||
532 | sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); | ||
533 | } | ||
527 | 534 | ||
528 | if (n == 1) { | 535 | count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents, |
529 | struct srp_direct_buf *buf = (void *) cmd->add_data; | 536 | scmnd->sc_data_direction); |
530 | 537 | ||
531 | fmt = SRP_DATA_DESC_DIRECT; | 538 | if (count == 1) { |
539 | struct srp_direct_buf *buf = (void *) cmd->add_data; | ||
532 | 540 | ||
533 | buf->va = cpu_to_be64(sg_dma_address(scat)); | 541 | fmt = SRP_DATA_DESC_DIRECT; |
534 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); | ||
535 | buf->len = cpu_to_be32(sg_dma_len(scat)); | ||
536 | 542 | ||
537 | len = sizeof (struct srp_cmd) + | 543 | buf->va = cpu_to_be64(sg_dma_address(scat)); |
538 | sizeof (struct srp_direct_buf); | 544 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); |
539 | } else { | 545 | buf->len = cpu_to_be32(sg_dma_len(scat)); |
540 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | ||
541 | u32 datalen = 0; | ||
542 | 546 | ||
543 | fmt = SRP_DATA_DESC_INDIRECT; | 547 | len = sizeof (struct srp_cmd) + |
548 | sizeof (struct srp_direct_buf); | ||
549 | } else { | ||
550 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | ||
551 | u32 datalen = 0; | ||
544 | 552 | ||
545 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | 553 | fmt = SRP_DATA_DESC_INDIRECT; |
546 | cmd->data_out_desc_cnt = n; | ||
547 | else | ||
548 | cmd->data_in_desc_cnt = n; | ||
549 | 554 | ||
550 | buf->table_desc.va = cpu_to_be64(req->cmd->dma + | 555 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) |
551 | sizeof *cmd + | 556 | cmd->data_out_desc_cnt = count; |
552 | sizeof *buf); | 557 | else |
553 | buf->table_desc.key = | 558 | cmd->data_in_desc_cnt = count; |
559 | |||
560 | buf->table_desc.va = cpu_to_be64(req->cmd->dma + | ||
561 | sizeof *cmd + | ||
562 | sizeof *buf); | ||
563 | buf->table_desc.key = | ||
564 | cpu_to_be32(target->srp_host->mr->rkey); | ||
565 | buf->table_desc.len = | ||
566 | cpu_to_be32(count * sizeof (struct srp_direct_buf)); | ||
567 | |||
568 | for (i = 0; i < count; ++i) { | ||
569 | buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); | ||
570 | buf->desc_list[i].key = | ||
554 | cpu_to_be32(target->srp_host->mr->rkey); | 571 | cpu_to_be32(target->srp_host->mr->rkey); |
555 | buf->table_desc.len = | 572 | buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); |
556 | cpu_to_be32(n * sizeof (struct srp_direct_buf)); | ||
557 | |||
558 | for (i = 0; i < n; ++i) { | ||
559 | buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); | ||
560 | buf->desc_list[i].key = | ||
561 | cpu_to_be32(target->srp_host->mr->rkey); | ||
562 | buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); | ||
563 | |||
564 | datalen += sg_dma_len(&scat[i]); | ||
565 | } | ||
566 | 573 | ||
567 | buf->len = cpu_to_be32(datalen); | 574 | datalen += sg_dma_len(&scat[i]); |
568 | |||
569 | len = sizeof (struct srp_cmd) + | ||
570 | sizeof (struct srp_indirect_buf) + | ||
571 | n * sizeof (struct srp_direct_buf); | ||
572 | } | ||
573 | } else { | ||
574 | struct srp_direct_buf *buf = (void *) cmd->add_data; | ||
575 | dma_addr_t dma; | ||
576 | |||
577 | dma = dma_map_single(target->srp_host->dev->dma_device, | ||
578 | scmnd->request_buffer, scmnd->request_bufflen, | ||
579 | scmnd->sc_data_direction); | ||
580 | if (dma_mapping_error(dma)) { | ||
581 | printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n", | ||
582 | scmnd->request_buffer, (int) scmnd->request_bufflen, | ||
583 | scmnd->sc_data_direction); | ||
584 | return -EINVAL; | ||
585 | } | 575 | } |
586 | 576 | ||
587 | pci_unmap_addr_set(req, direct_mapping, dma); | 577 | buf->len = cpu_to_be32(datalen); |
588 | 578 | ||
589 | buf->va = cpu_to_be64(dma); | 579 | len = sizeof (struct srp_cmd) + |
590 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); | 580 | sizeof (struct srp_indirect_buf) + |
591 | buf->len = cpu_to_be32(scmnd->request_bufflen); | 581 | count * sizeof (struct srp_direct_buf); |
592 | |||
593 | fmt = SRP_DATA_DESC_DIRECT; | ||
594 | |||
595 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); | ||
596 | } | 582 | } |
597 | 583 | ||
598 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | 584 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) |
@@ -600,7 +586,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
600 | else | 586 | else |
601 | cmd->buf_fmt = fmt; | 587 | cmd->buf_fmt = fmt; |
602 | 588 | ||
603 | |||
604 | return len; | 589 | return len; |
605 | } | 590 | } |
606 | 591 | ||
@@ -608,20 +593,28 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
608 | struct srp_target_port *target, | 593 | struct srp_target_port *target, |
609 | struct srp_request *req) | 594 | struct srp_request *req) |
610 | { | 595 | { |
596 | struct scatterlist *scat; | ||
597 | int nents; | ||
598 | |||
611 | if (!scmnd->request_buffer || | 599 | if (!scmnd->request_buffer || |
612 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | 600 | (scmnd->sc_data_direction != DMA_TO_DEVICE && |
613 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | 601 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) |
614 | return; | 602 | return; |
615 | 603 | ||
616 | if (scmnd->use_sg) | 604 | /* |
617 | dma_unmap_sg(target->srp_host->dev->dma_device, | 605 | * This handling of non-SG commands can be killed when the |
618 | (struct scatterlist *) scmnd->request_buffer, | 606 | * SCSI midlayer no longer generates non-SG commands. |
619 | scmnd->use_sg, scmnd->sc_data_direction); | 607 | */ |
620 | else | 608 | if (likely(scmnd->use_sg)) { |
621 | dma_unmap_single(target->srp_host->dev->dma_device, | 609 | nents = scmnd->use_sg; |
622 | pci_unmap_addr(req, direct_mapping), | 610 | scat = (struct scatterlist *) scmnd->request_buffer; |
623 | scmnd->request_bufflen, | 611 | } else { |
624 | scmnd->sc_data_direction); | 612 | nents = 1; |
613 | scat = (struct scatterlist *) scmnd->request_buffer; | ||
614 | } | ||
615 | |||
616 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
617 | scmnd->sc_data_direction); | ||
625 | } | 618 | } |
626 | 619 | ||
627 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 620 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 4e7727df32f1..bd7f7c3115de 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | #include <linux/list.h> | 39 | #include <linux/list.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/scatterlist.h> | ||
41 | 42 | ||
42 | #include <scsi/scsi_host.h> | 43 | #include <scsi/scsi_host.h> |
43 | #include <scsi/scsi_cmnd.h> | 44 | #include <scsi/scsi_cmnd.h> |
@@ -94,7 +95,11 @@ struct srp_request { | |||
94 | struct scsi_cmnd *scmnd; | 95 | struct scsi_cmnd *scmnd; |
95 | struct srp_iu *cmd; | 96 | struct srp_iu *cmd; |
96 | struct srp_iu *tsk_mgmt; | 97 | struct srp_iu *tsk_mgmt; |
97 | DECLARE_PCI_UNMAP_ADDR(direct_mapping) | 98 | /* |
99 | * Fake scatterlist used when scmnd->use_sg==0. Can be killed | ||
100 | * when the SCSI midlayer no longer generates non-SG commands. | ||
101 | */ | ||
102 | struct scatterlist fake_sg; | ||
98 | struct completion done; | 103 | struct completion done; |
99 | short next; | 104 | short next; |
100 | u8 cmd_done; | 105 | u8 cmd_done; |