aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-21 19:19:32 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-21 19:19:32 -0400
commit8aee74c8ee875448cc6d1cf995c9469eb60ae515 (patch)
tree9e9f57dd7fe321825d7e39472cf44777c82f39cf /drivers/infiniband/hw
parent080e89270a7bfb7d01fac9a67050f8ac6d6cdd11 (diff)
parent9f81036c54ed1f860d2807c5a6aa4f2b30c21204 (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IB/cm: Improve local id allocation IPoIB/cm: Fix SRQ WR leak IB/ipoib: Fix typos in error messages IB/mlx4: Check if SRQ is full when posting receive IB/mlx4: Pass send queue sizes from userspace to kernel IB/mlx4: Fix check of opcode in mlx4_ib_post_send() mlx4_core: Fix array overrun in dump_dev_cap_flags() IB/mlx4: Fix RESET to RESET and RESET to ERROR transitions IB/mthca: Fix RESET to ERROR transition IB/mlx4: Set GRH:HopLimit when sending globally routed MADs IB/mthca: Set GRH:HopLimit when building MLX headers IB/mlx4: Fix check of max_qp_dest_rdma in modify QP IB/mthca: Fix use-after-free on device restart IB/ehca: Return proper error code if register_mr fails IPoIB: Handle P_Key table reordering IB/core: Use start_port() and end_port() IB/core: Add helpers for uncached GID and P_Key searches IB/ipath: Fix potential deadlock with multicast spinlocks IB/core: Free umem when mm is already gone
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c16
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c181
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/user.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c158
8 files changed, 250 insertions, 128 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 84c5bb498563..add79bd44e39 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -2050,13 +2050,10 @@ int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
2050 switch (hipz_rc) { 2050 switch (hipz_rc) {
2051 case H_SUCCESS: /* successful completion */ 2051 case H_SUCCESS: /* successful completion */
2052 return 0; 2052 return 0;
2053 case H_ADAPTER_PARM: /* invalid adapter handle */
2054 case H_RT_PARM: /* invalid resource type */
2055 case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ 2053 case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2056 case H_MLENGTH_PARM: /* invalid memory length */
2057 case H_MEM_ACCESS_PARM: /* invalid access controls */
2058 case H_CONSTRAINED: /* resource constraint */ 2054 case H_CONSTRAINED: /* resource constraint */
2059 return -EINVAL; 2055 case H_NO_MEM:
2056 return -ENOMEM;
2060 case H_BUSY: /* long busy */ 2057 case H_BUSY: /* long busy */
2061 return -EBUSY; 2058 return -EBUSY;
2062 default: 2059 default:
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index 085e28b939ec..dd691cfa5079 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -165,10 +165,9 @@ static int ipath_mcast_add(struct ipath_ibdev *dev,
165{ 165{
166 struct rb_node **n = &mcast_tree.rb_node; 166 struct rb_node **n = &mcast_tree.rb_node;
167 struct rb_node *pn = NULL; 167 struct rb_node *pn = NULL;
168 unsigned long flags;
169 int ret; 168 int ret;
170 169
171 spin_lock_irqsave(&mcast_lock, flags); 170 spin_lock_irq(&mcast_lock);
172 171
173 while (*n) { 172 while (*n) {
174 struct ipath_mcast *tmcast; 173 struct ipath_mcast *tmcast;
@@ -228,7 +227,7 @@ static int ipath_mcast_add(struct ipath_ibdev *dev,
228 ret = 0; 227 ret = 0;
229 228
230bail: 229bail:
231 spin_unlock_irqrestore(&mcast_lock, flags); 230 spin_unlock_irq(&mcast_lock);
232 231
233 return ret; 232 return ret;
234} 233}
@@ -289,17 +288,16 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
289 struct ipath_mcast *mcast = NULL; 288 struct ipath_mcast *mcast = NULL;
290 struct ipath_mcast_qp *p, *tmp; 289 struct ipath_mcast_qp *p, *tmp;
291 struct rb_node *n; 290 struct rb_node *n;
292 unsigned long flags;
293 int last = 0; 291 int last = 0;
294 int ret; 292 int ret;
295 293
296 spin_lock_irqsave(&mcast_lock, flags); 294 spin_lock_irq(&mcast_lock);
297 295
298 /* Find the GID in the mcast table. */ 296 /* Find the GID in the mcast table. */
299 n = mcast_tree.rb_node; 297 n = mcast_tree.rb_node;
300 while (1) { 298 while (1) {
301 if (n == NULL) { 299 if (n == NULL) {
302 spin_unlock_irqrestore(&mcast_lock, flags); 300 spin_unlock_irq(&mcast_lock);
303 ret = -EINVAL; 301 ret = -EINVAL;
304 goto bail; 302 goto bail;
305 } 303 }
@@ -334,7 +332,7 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
334 break; 332 break;
335 } 333 }
336 334
337 spin_unlock_irqrestore(&mcast_lock, flags); 335 spin_unlock_irq(&mcast_lock);
338 336
339 if (p) { 337 if (p) {
340 /* 338 /*
@@ -348,9 +346,9 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
348 atomic_dec(&mcast->refcount); 346 atomic_dec(&mcast->refcount);
349 wait_event(mcast->wait, !atomic_read(&mcast->refcount)); 347 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
350 ipath_mcast_free(mcast); 348 ipath_mcast_free(mcast);
351 spin_lock(&dev->n_mcast_grps_lock); 349 spin_lock_irq(&dev->n_mcast_grps_lock);
352 dev->n_mcast_grps_allocated--; 350 dev->n_mcast_grps_allocated--;
353 spin_unlock(&dev->n_mcast_grps_lock); 351 spin_unlock_irq(&dev->n_mcast_grps_lock);
354 } 352 }
355 353
356 ret = 0; 354 ret = 0;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5cd706908450..a824bc5f79fd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -188,14 +188,32 @@ static int send_wqe_overhead(enum ib_qp_type type)
188 } 188 }
189} 189}
190 190
191static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 191static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
192 enum ib_qp_type type, struct mlx4_ib_qp *qp) 192 struct mlx4_ib_qp *qp)
193{ 193{
194 /* Sanity check QP size before proceeding */ 194 /* Sanity check RQ size before proceeding */
195 if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
196 cap->max_recv_sge > dev->dev->caps.max_rq_sg)
197 return -EINVAL;
198
199 qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0;
200
201 qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
202 sizeof (struct mlx4_wqe_data_seg)));
203 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
204
205 cap->max_recv_wr = qp->rq.max;
206 cap->max_recv_sge = qp->rq.max_gs;
207
208 return 0;
209}
210
211static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
212 enum ib_qp_type type, struct mlx4_ib_qp *qp)
213{
214 /* Sanity check SQ size before proceeding */
195 if (cap->max_send_wr > dev->dev->caps.max_wqes || 215 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
196 cap->max_recv_wr > dev->dev->caps.max_wqes ||
197 cap->max_send_sge > dev->dev->caps.max_sq_sg || 216 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
198 cap->max_recv_sge > dev->dev->caps.max_rq_sg ||
199 cap->max_inline_data + send_wqe_overhead(type) + 217 cap->max_inline_data + send_wqe_overhead(type) +
200 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 218 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
201 return -EINVAL; 219 return -EINVAL;
@@ -208,12 +226,7 @@ static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
208 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) 226 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
209 return -EINVAL; 227 return -EINVAL;
210 228
211 qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; 229 qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 1;
212 qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0;
213
214 qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
215 sizeof (struct mlx4_wqe_data_seg)));
216 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
217 230
218 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * 231 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
219 sizeof (struct mlx4_wqe_data_seg), 232 sizeof (struct mlx4_wqe_data_seg),
@@ -233,16 +246,26 @@ static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
233 qp->sq.offset = 0; 246 qp->sq.offset = 0;
234 } 247 }
235 248
236 cap->max_send_wr = qp->sq.max; 249 cap->max_send_wr = qp->sq.max;
237 cap->max_recv_wr = qp->rq.max; 250 cap->max_send_sge = qp->sq.max_gs;
238 cap->max_send_sge = qp->sq.max_gs;
239 cap->max_recv_sge = qp->rq.max_gs;
240 cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) - 251 cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) -
241 sizeof (struct mlx4_wqe_inline_seg); 252 sizeof (struct mlx4_wqe_inline_seg);
242 253
243 return 0; 254 return 0;
244} 255}
245 256
257static int set_user_sq_size(struct mlx4_ib_qp *qp,
258 struct mlx4_ib_create_qp *ucmd)
259{
260 qp->sq.max = 1 << ucmd->log_sq_bb_count;
261 qp->sq.wqe_shift = ucmd->log_sq_stride;
262
263 qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) +
264 (qp->sq.max << qp->sq.wqe_shift);
265
266 return 0;
267}
268
246static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 269static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
247 struct ib_qp_init_attr *init_attr, 270 struct ib_qp_init_attr *init_attr,
248 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
@@ -264,7 +287,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
264 qp->sq.head = 0; 287 qp->sq.head = 0;
265 qp->sq.tail = 0; 288 qp->sq.tail = 0;
266 289
267 err = set_qp_size(dev, &init_attr->cap, init_attr->qp_type, qp); 290 err = set_rq_size(dev, &init_attr->cap, qp);
268 if (err) 291 if (err)
269 goto err; 292 goto err;
270 293
@@ -276,6 +299,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
276 goto err; 299 goto err;
277 } 300 }
278 301
302 err = set_user_sq_size(qp, &ucmd);
303 if (err)
304 goto err;
305
279 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 306 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
280 qp->buf_size, 0); 307 qp->buf_size, 0);
281 if (IS_ERR(qp->umem)) { 308 if (IS_ERR(qp->umem)) {
@@ -297,6 +324,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
297 if (err) 324 if (err)
298 goto err_mtt; 325 goto err_mtt;
299 } else { 326 } else {
327 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
328 if (err)
329 goto err;
330
300 err = mlx4_ib_db_alloc(dev, &qp->db, 0); 331 err = mlx4_ib_db_alloc(dev, &qp->db, 0);
301 if (err) 332 if (err)
302 goto err; 333 goto err;
@@ -573,7 +604,7 @@ static int to_mlx4_st(enum ib_qp_type type)
573 } 604 }
574} 605}
575 606
576static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *attr, 607static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
577 int attr_mask) 608 int attr_mask)
578{ 609{
579 u8 dest_rd_atomic; 610 u8 dest_rd_atomic;
@@ -603,7 +634,7 @@ static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *att
603 return cpu_to_be32(hw_access_flags); 634 return cpu_to_be32(hw_access_flags);
604} 635}
605 636
606static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, struct ib_qp_attr *attr, 637static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
607 int attr_mask) 638 int attr_mask)
608{ 639{
609 if (attr_mask & IB_QP_PKEY_INDEX) 640 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -619,7 +650,7 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
619 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 650 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
620} 651}
621 652
622static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah, 653static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
623 struct mlx4_qp_path *path, u8 port) 654 struct mlx4_qp_path *path, u8 port)
624{ 655{
625 path->grh_mylmc = ah->src_path_bits & 0x7f; 656 path->grh_mylmc = ah->src_path_bits & 0x7f;
@@ -655,14 +686,14 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah,
655 return 0; 686 return 0;
656} 687}
657 688
658int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 689static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
659 int attr_mask, struct ib_udata *udata) 690 const struct ib_qp_attr *attr, int attr_mask,
691 enum ib_qp_state cur_state, enum ib_qp_state new_state)
660{ 692{
661 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 693 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
662 struct mlx4_ib_qp *qp = to_mqp(ibqp); 694 struct mlx4_ib_qp *qp = to_mqp(ibqp);
663 struct mlx4_qp_context *context; 695 struct mlx4_qp_context *context;
664 enum mlx4_qp_optpar optpar = 0; 696 enum mlx4_qp_optpar optpar = 0;
665 enum ib_qp_state cur_state, new_state;
666 int sqd_event; 697 int sqd_event;
667 int err = -EINVAL; 698 int err = -EINVAL;
668 699
@@ -670,34 +701,6 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
670 if (!context) 701 if (!context)
671 return -ENOMEM; 702 return -ENOMEM;
672 703
673 mutex_lock(&qp->mutex);
674
675 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
676 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
677
678 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
679 goto out;
680
681 if ((attr_mask & IB_QP_PKEY_INDEX) &&
682 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
683 goto out;
684 }
685
686 if ((attr_mask & IB_QP_PORT) &&
687 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
688 goto out;
689 }
690
691 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
692 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
693 goto out;
694 }
695
696 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
697 attr->max_dest_rd_atomic > 1 << dev->dev->caps.max_qp_dest_rdma) {
698 goto out;
699 }
700
701 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | 704 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
702 (to_mlx4_st(ibqp->qp_type) << 16)); 705 (to_mlx4_st(ibqp->qp_type) << 16));
703 context->flags |= cpu_to_be32(1 << 8); /* DE? */ 706 context->flags |= cpu_to_be32(1 << 8); /* DE? */
@@ -920,11 +923,84 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
920 } 923 }
921 924
922out: 925out:
923 mutex_unlock(&qp->mutex);
924 kfree(context); 926 kfree(context);
925 return err; 927 return err;
926} 928}
927 929
930static const struct ib_qp_attr mlx4_ib_qp_attr = { .port_num = 1 };
931static const int mlx4_ib_qp_attr_mask_table[IB_QPT_UD + 1] = {
932 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
933 IB_QP_PORT |
934 IB_QP_QKEY),
935 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
936 IB_QP_PORT |
937 IB_QP_ACCESS_FLAGS),
938 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
939 IB_QP_PORT |
940 IB_QP_ACCESS_FLAGS),
941 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
942 IB_QP_QKEY),
943 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
944 IB_QP_QKEY),
945};
946
947int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
948 int attr_mask, struct ib_udata *udata)
949{
950 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
951 struct mlx4_ib_qp *qp = to_mqp(ibqp);
952 enum ib_qp_state cur_state, new_state;
953 int err = -EINVAL;
954
955 mutex_lock(&qp->mutex);
956
957 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
958 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
959
960 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
961 goto out;
962
963 if ((attr_mask & IB_QP_PKEY_INDEX) &&
964 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
965 goto out;
966 }
967
968 if ((attr_mask & IB_QP_PORT) &&
969 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
970 goto out;
971 }
972
973 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
974 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
975 goto out;
976 }
977
978 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
979 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
980 goto out;
981 }
982
983 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
984 err = 0;
985 goto out;
986 }
987
988 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
989 err = __mlx4_ib_modify_qp(ibqp, &mlx4_ib_qp_attr,
990 mlx4_ib_qp_attr_mask_table[ibqp->qp_type],
991 IB_QPS_RESET, IB_QPS_INIT);
992 if (err)
993 goto out;
994 cur_state = IB_QPS_INIT;
995 }
996
997 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
998
999out:
1000 mutex_unlock(&qp->mutex);
1001 return err;
1002}
1003
928static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1004static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
929 void *wqe) 1005 void *wqe)
930{ 1006{
@@ -952,6 +1028,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
952 (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff; 1028 (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
953 sqp->ud_header.grh.flow_label = 1029 sqp->ud_header.grh.flow_label =
954 ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff); 1030 ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
1031 sqp->ud_header.grh.hop_limit = ah->av.hop_limit;
955 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24, 1032 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
956 ah->av.gid_index, &sqp->ud_header.grh.source_gid); 1033 ah->av.gid_index, &sqp->ud_header.grh.source_gid);
957 memcpy(sqp->ud_header.grh.destination_gid.raw, 1034 memcpy(sqp->ud_header.grh.destination_gid.raw,
@@ -1192,7 +1269,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1192 */ 1269 */
1193 wmb(); 1270 wmb();
1194 1271
1195 if (wr->opcode < 0 || wr->opcode > ARRAY_SIZE(mlx4_ib_opcode)) { 1272 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
1196 err = -EINVAL; 1273 err = -EINVAL;
1197 goto out; 1274 goto out;
1198 } 1275 }
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 42ab4a801d6a..12fac1c8989d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -297,6 +297,12 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
297 break; 297 break;
298 } 298 }
299 299
300 if (unlikely(srq->head == srq->tail)) {
301 err = -ENOMEM;
302 *bad_wr = wr;
303 break;
304 }
305
300 srq->wrid[srq->head] = wr->wr_id; 306 srq->wrid[srq->head] = wr->wr_id;
301 307
302 next = get_wqe(srq, srq->head); 308 next = get_wqe(srq, srq->head);
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index 5b8eddc9fa83..88c72d56368b 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -39,7 +39,7 @@
39 * Increment this value if any changes that break userspace ABI 39 * Increment this value if any changes that break userspace ABI
40 * compatibility are made. 40 * compatibility are made.
41 */ 41 */
42#define MLX4_IB_UVERBS_ABI_VERSION 1 42#define MLX4_IB_UVERBS_ABI_VERSION 2
43 43
44/* 44/*
45 * Make sure that all structs defined in this file remain laid out so 45 * Make sure that all structs defined in this file remain laid out so
@@ -87,6 +87,9 @@ struct mlx4_ib_create_srq_resp {
87struct mlx4_ib_create_qp { 87struct mlx4_ib_create_qp {
88 __u64 buf_addr; 88 __u64 buf_addr;
89 __u64 db_addr; 89 __u64 db_addr;
90 __u8 log_sq_bb_count;
91 __u8 log_sq_stride;
92 __u8 reserved[6];
90}; 93};
91 94
92#endif /* MLX4_IB_USER_H */ 95#endif /* MLX4_IB_USER_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 27caf3b0648a..4b111a852ff6 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -279,6 +279,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
279 (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff; 279 (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
280 header->grh.flow_label = 280 header->grh.flow_label =
281 ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff); 281 ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
282 header->grh.hop_limit = ah->av->hop_limit;
282 ib_get_cached_gid(&dev->ib_dev, 283 ib_get_cached_gid(&dev->ib_dev,
283 be32_to_cpu(ah->av->port_pd) >> 24, 284 be32_to_cpu(ah->av->port_pd) >> 24,
284 ah->av->gid_index % dev->limits.gid_table_len, 285 ah->av->gid_index % dev->limits.gid_table_len,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 773145e29947..aa563e61de65 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1250,12 +1250,14 @@ static void __mthca_remove_one(struct pci_dev *pdev)
1250int __mthca_restart_one(struct pci_dev *pdev) 1250int __mthca_restart_one(struct pci_dev *pdev)
1251{ 1251{
1252 struct mthca_dev *mdev; 1252 struct mthca_dev *mdev;
1253 int hca_type;
1253 1254
1254 mdev = pci_get_drvdata(pdev); 1255 mdev = pci_get_drvdata(pdev);
1255 if (!mdev) 1256 if (!mdev)
1256 return -ENODEV; 1257 return -ENODEV;
1258 hca_type = mdev->hca_type;
1257 __mthca_remove_one(pdev); 1259 __mthca_remove_one(pdev);
1258 return __mthca_init_one(pdev, mdev->hca_type); 1260 return __mthca_init_one(pdev, hca_type);
1259} 1261}
1260 1262
1261static int __devinit mthca_init_one(struct pci_dev *pdev, 1263static int __devinit mthca_init_one(struct pci_dev *pdev,
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 2741ded89297..027664979fe2 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -296,7 +296,7 @@ static int to_mthca_st(int transport)
296 } 296 }
297} 297}
298 298
299static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, 299static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
300 int attr_mask) 300 int attr_mask)
301{ 301{
302 if (attr_mask & IB_QP_PKEY_INDEX) 302 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -328,7 +328,7 @@ static void init_port(struct mthca_dev *dev, int port)
328 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 328 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
329} 329}
330 330
331static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, 331static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
332 int attr_mask) 332 int attr_mask)
333{ 333{
334 u8 dest_rd_atomic; 334 u8 dest_rd_atomic;
@@ -511,7 +511,7 @@ out:
511 return err; 511 return err;
512} 512}
513 513
514static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, 514static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
515 struct mthca_qp_path *path, u8 port) 515 struct mthca_qp_path *path, u8 port)
516{ 516{
517 path->g_mylmc = ah->src_path_bits & 0x7f; 517 path->g_mylmc = ah->src_path_bits & 0x7f;
@@ -539,12 +539,12 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
539 return 0; 539 return 0;
540} 540}
541 541
542int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 542static int __mthca_modify_qp(struct ib_qp *ibqp,
543 struct ib_udata *udata) 543 const struct ib_qp_attr *attr, int attr_mask,
544 enum ib_qp_state cur_state, enum ib_qp_state new_state)
544{ 545{
545 struct mthca_dev *dev = to_mdev(ibqp->device); 546 struct mthca_dev *dev = to_mdev(ibqp->device);
546 struct mthca_qp *qp = to_mqp(ibqp); 547 struct mthca_qp *qp = to_mqp(ibqp);
547 enum ib_qp_state cur_state, new_state;
548 struct mthca_mailbox *mailbox; 548 struct mthca_mailbox *mailbox;
549 struct mthca_qp_param *qp_param; 549 struct mthca_qp_param *qp_param;
550 struct mthca_qp_context *qp_context; 550 struct mthca_qp_context *qp_context;
@@ -552,60 +552,6 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
552 u8 status; 552 u8 status;
553 int err = -EINVAL; 553 int err = -EINVAL;
554 554
555 mutex_lock(&qp->mutex);
556
557 if (attr_mask & IB_QP_CUR_STATE) {
558 cur_state = attr->cur_qp_state;
559 } else {
560 spin_lock_irq(&qp->sq.lock);
561 spin_lock(&qp->rq.lock);
562 cur_state = qp->state;
563 spin_unlock(&qp->rq.lock);
564 spin_unlock_irq(&qp->sq.lock);
565 }
566
567 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
568
569 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
570 mthca_dbg(dev, "Bad QP transition (transport %d) "
571 "%d->%d with attr 0x%08x\n",
572 qp->transport, cur_state, new_state,
573 attr_mask);
574 goto out;
575 }
576
577 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
578 err = 0;
579 goto out;
580 }
581
582 if ((attr_mask & IB_QP_PKEY_INDEX) &&
583 attr->pkey_index >= dev->limits.pkey_table_len) {
584 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
585 attr->pkey_index, dev->limits.pkey_table_len-1);
586 goto out;
587 }
588
589 if ((attr_mask & IB_QP_PORT) &&
590 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
591 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
592 goto out;
593 }
594
595 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
596 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
597 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
598 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
599 goto out;
600 }
601
602 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
603 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
604 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
605 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
606 goto out;
607 }
608
609 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 555 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
610 if (IS_ERR(mailbox)) { 556 if (IS_ERR(mailbox)) {
611 err = PTR_ERR(mailbox); 557 err = PTR_ERR(mailbox);
@@ -892,6 +838,98 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
892 838
893out_mailbox: 839out_mailbox:
894 mthca_free_mailbox(dev, mailbox); 840 mthca_free_mailbox(dev, mailbox);
841out:
842 return err;
843}
844
845static const struct ib_qp_attr dummy_init_attr = { .port_num = 1 };
846static const int dummy_init_attr_mask[] = {
847 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
848 IB_QP_PORT |
849 IB_QP_QKEY),
850 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
851 IB_QP_PORT |
852 IB_QP_ACCESS_FLAGS),
853 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
854 IB_QP_PORT |
855 IB_QP_ACCESS_FLAGS),
856 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
857 IB_QP_QKEY),
858 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
859 IB_QP_QKEY),
860};
861
862int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
863 struct ib_udata *udata)
864{
865 struct mthca_dev *dev = to_mdev(ibqp->device);
866 struct mthca_qp *qp = to_mqp(ibqp);
867 enum ib_qp_state cur_state, new_state;
868 int err = -EINVAL;
869
870 mutex_lock(&qp->mutex);
871 if (attr_mask & IB_QP_CUR_STATE) {
872 cur_state = attr->cur_qp_state;
873 } else {
874 spin_lock_irq(&qp->sq.lock);
875 spin_lock(&qp->rq.lock);
876 cur_state = qp->state;
877 spin_unlock(&qp->rq.lock);
878 spin_unlock_irq(&qp->sq.lock);
879 }
880
881 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
882
883 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
884 mthca_dbg(dev, "Bad QP transition (transport %d) "
885 "%d->%d with attr 0x%08x\n",
886 qp->transport, cur_state, new_state,
887 attr_mask);
888 goto out;
889 }
890
891 if ((attr_mask & IB_QP_PKEY_INDEX) &&
892 attr->pkey_index >= dev->limits.pkey_table_len) {
893 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
894 attr->pkey_index, dev->limits.pkey_table_len-1);
895 goto out;
896 }
897
898 if ((attr_mask & IB_QP_PORT) &&
899 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
900 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
901 goto out;
902 }
903
904 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
905 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
906 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
907 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
908 goto out;
909 }
910
911 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
912 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
913 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
914 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
915 goto out;
916 }
917
918 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
919 err = 0;
920 goto out;
921 }
922
923 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
924 err = __mthca_modify_qp(ibqp, &dummy_init_attr,
925 dummy_init_attr_mask[ibqp->qp_type],
926 IB_QPS_RESET, IB_QPS_INIT);
927 if (err)
928 goto out;
929 cur_state = IB_QPS_INIT;
930 }
931
932 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
895 933
896out: 934out:
897 mutex_unlock(&qp->mutex); 935 mutex_unlock(&qp->mutex);