aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c37
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c17
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
4 files changed, 31 insertions, 26 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 5ea741f47fc8..e73f81c22381 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -312,7 +312,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
312 int ret, length, hdr_len, copy_offset; 312 int ret, length, hdr_len, copy_offset;
313 int rmpp_active = 0; 313 int rmpp_active = 0;
314 314
315 if (count < sizeof (struct ib_user_mad)) 315 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
316 return -EINVAL; 316 return -EINVAL;
317 317
318 length = count - sizeof (struct ib_user_mad); 318 length = count - sizeof (struct ib_user_mad);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 760c418d5bc9..dd4e13303e96 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -730,15 +730,16 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
730 } 730 }
731 731
732 if (attr_mask & IB_QP_ACCESS_FLAGS) { 732 if (attr_mask & IB_QP_ACCESS_FLAGS) {
733 qp_context->params2 |=
734 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
735 MTHCA_QP_BIT_RWE : 0);
736
733 /* 737 /*
734 * Only enable RDMA/atomics if we have responder 738 * Only enable RDMA reads and atomics if we have
735 * resources set to a non-zero value. 739 * responder resources set to a non-zero value.
736 */ 740 */
737 if (qp->resp_depth) { 741 if (qp->resp_depth) {
738 qp_context->params2 |= 742 qp_context->params2 |=
739 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
740 MTHCA_QP_BIT_RWE : 0);
741 qp_context->params2 |=
742 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ? 743 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
743 MTHCA_QP_BIT_RRE : 0); 744 MTHCA_QP_BIT_RRE : 0);
744 qp_context->params2 |= 745 qp_context->params2 |=
@@ -759,31 +760,27 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
759 if (qp->resp_depth && !attr->max_dest_rd_atomic) { 760 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
760 /* 761 /*
761 * Lowering our responder resources to zero. 762 * Lowering our responder resources to zero.
762 * Turn off RDMA/atomics as responder. 763 * Turn off reads RDMA and atomics as responder.
763 * (RWE/RRE/RAE in params2 already zero) 764 * (RRE/RAE in params2 already zero)
764 */ 765 */
765 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 766 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
766 MTHCA_QP_OPTPAR_RRE |
767 MTHCA_QP_OPTPAR_RAE); 767 MTHCA_QP_OPTPAR_RAE);
768 } 768 }
769 769
770 if (!qp->resp_depth && attr->max_dest_rd_atomic) { 770 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
771 /* 771 /*
772 * Increasing our responder resources from 772 * Increasing our responder resources from
773 * zero. Turn on RDMA/atomics as appropriate. 773 * zero. Turn on RDMA reads and atomics as
774 * appropriate.
774 */ 775 */
775 qp_context->params2 |= 776 qp_context->params2 |=
776 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
777 MTHCA_QP_BIT_RWE : 0);
778 qp_context->params2 |=
779 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? 777 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
780 MTHCA_QP_BIT_RRE : 0); 778 MTHCA_QP_BIT_RRE : 0);
781 qp_context->params2 |= 779 qp_context->params2 |=
782 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ? 780 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
783 MTHCA_QP_BIT_RAE : 0); 781 MTHCA_QP_BIT_RAE : 0);
784 782
785 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 783 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
786 MTHCA_QP_OPTPAR_RRE |
787 MTHCA_QP_OPTPAR_RAE); 784 MTHCA_QP_OPTPAR_RAE);
788 } 785 }
789 786
@@ -921,10 +918,12 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
921 else 918 else
922 qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; 919 qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
923 920
924 qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg); 921 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
925 qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - 922 max_data_size / sizeof (struct mthca_data_seg));
926 sizeof (struct mthca_next_seg)) / 923 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
927 sizeof (struct mthca_data_seg); 924 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
925 sizeof (struct mthca_next_seg)) /
926 sizeof (struct mthca_data_seg));
928} 927}
929 928
930/* 929/*
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 321a3a10e69b..ee9fe226ae99 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -802,13 +802,21 @@ static int srp_post_recv(struct srp_target_port *target)
802 802
803/* 803/*
804 * Must be called with target->scsi_host->host_lock held to protect 804 * Must be called with target->scsi_host->host_lock held to protect
805 * req_lim and tx_head. 805 * req_lim and tx_head. Lock cannot be dropped between call here and
806 * call to __srp_post_send().
806 */ 807 */
807static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) 808static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
808{ 809{
809 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 810 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
810 return NULL; 811 return NULL;
811 812
813 if (unlikely(target->req_lim < 1)) {
814 if (printk_ratelimit())
815 printk(KERN_DEBUG PFX "Target has req_lim %d\n",
816 target->req_lim);
817 return NULL;
818 }
819
812 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 820 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
813} 821}
814 822
@@ -823,11 +831,6 @@ static int __srp_post_send(struct srp_target_port *target,
823 struct ib_send_wr wr, *bad_wr; 831 struct ib_send_wr wr, *bad_wr;
824 int ret = 0; 832 int ret = 0;
825 833
826 if (target->req_lim < 1) {
827 printk(KERN_ERR PFX "Target has req_lim %d\n", target->req_lim);
828 return -EAGAIN;
829 }
830
831 list.addr = iu->dma; 834 list.addr = iu->dma;
832 list.length = len; 835 list.length = len;
833 list.lkey = target->srp_host->mr->lkey; 836 list.lkey = target->srp_host->mr->lkey;
@@ -1417,6 +1420,8 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1417 if (!target_host) 1420 if (!target_host)
1418 return -ENOMEM; 1421 return -ENOMEM;
1419 1422
1423 target_host->max_lun = SRP_MAX_LUN;
1424
1420 target = host_to_target(target_host); 1425 target = host_to_target(target_host);
1421 memset(target, 0, sizeof *target); 1426 memset(target, 0, sizeof *target);
1422 1427
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 4fec28a71367..b564f18caf78 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -54,6 +54,7 @@ enum {
54 SRP_PORT_REDIRECT = 1, 54 SRP_PORT_REDIRECT = 1,
55 SRP_DLID_REDIRECT = 2, 55 SRP_DLID_REDIRECT = 2,
56 56
57 SRP_MAX_LUN = 512,
57 SRP_MAX_IU_LEN = 256, 58 SRP_MAX_IU_LEN = 256,
58 59
59 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,