aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-27 17:20:51 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-27 17:20:51 -0500
commit86e67a07d4dc8cd40454698f2abb972fced06910 (patch)
treed6c3fb4d8eef44f9bc1b0655085cb52b0d9e8da9
parentfebb187761b02fce7d61b9c897d0e701f672b5ee (diff)
parenta316b79c3306c59176d7ae04e4aad12374dfed37 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/iser: Add missing counter increment in iser_data_buf_aligned_len() IB/ehca: Fix static rate regression mlx4_core: Fix state check in mlx4_qp_modify() IB/ipath: Normalize error return codes for posting work requests IB/ipath: Fix offset returned to ibv_modify_srq() IB/ipath: Fix error path in QP creation IB/ipath: Fix offset returned to ibv_resize_cq()
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c44
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c8
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/net/mlx4/qp.c2
7 files changed, 61 insertions, 37 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 2e3e6547cb78..dd126681fed0 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1203,7 +1203,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1203 mqpcb->service_level = attr->ah_attr.sl; 1203 mqpcb->service_level = attr->ah_attr.sl;
1204 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1); 1204 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1205 1205
1206 if (ehca_calc_ipd(shca, my_qp->init_attr.port_num, 1206 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1207 attr->ah_attr.static_rate, 1207 attr->ah_attr.static_rate,
1208 &mqpcb->max_static_rate)) { 1208 &mqpcb->max_static_rate)) {
1209 ret = -EINVAL; 1209 ret = -EINVAL;
@@ -1302,7 +1302,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1302 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits; 1302 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1303 mqpcb->service_level_al = attr->alt_ah_attr.sl; 1303 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1304 1304
1305 if (ehca_calc_ipd(shca, my_qp->init_attr.port_num, 1305 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1306 attr->alt_ah_attr.static_rate, 1306 attr->alt_ah_attr.static_rate,
1307 &mqpcb->max_static_rate_al)) { 1307 &mqpcb->max_static_rate_al)) {
1308 ret = -EINVAL; 1308 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 08d8ae148cd0..d1380c7a1703 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -395,12 +395,9 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
395 goto bail; 395 goto bail;
396 } 396 }
397 397
398 /* 398 /* Check that we can write the offset to mmap. */
399 * Return the address of the WC as the offset to mmap.
400 * See ipath_mmap() for details.
401 */
402 if (udata && udata->outlen >= sizeof(__u64)) { 399 if (udata && udata->outlen >= sizeof(__u64)) {
403 __u64 offset = (__u64) wc; 400 __u64 offset = 0;
404 401
405 ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); 402 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
406 if (ret) 403 if (ret)
@@ -450,6 +447,18 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
450 struct ipath_mmap_info *ip = cq->ip; 447 struct ipath_mmap_info *ip = cq->ip;
451 448
452 ipath_update_mmap_info(dev, ip, sz, wc); 449 ipath_update_mmap_info(dev, ip, sz, wc);
450
451 /*
452 * Return the offset to mmap.
453 * See ipath_mmap() for details.
454 */
455 if (udata && udata->outlen >= sizeof(__u64)) {
456 ret = ib_copy_to_udata(udata, &ip->offset,
457 sizeof(ip->offset));
458 if (ret)
459 goto bail;
460 }
461
453 spin_lock_irq(&dev->pending_lock); 462 spin_lock_irq(&dev->pending_lock);
454 if (list_empty(&ip->pending_mmaps)) 463 if (list_empty(&ip->pending_mmaps))
455 list_add(&ip->pending_mmaps, &dev->pending_mmaps); 464 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 6a41fdbc8e57..b997ff88401b 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -835,7 +835,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
835 init_attr->qp_type); 835 init_attr->qp_type);
836 if (err) { 836 if (err) {
837 ret = ERR_PTR(err); 837 ret = ERR_PTR(err);
838 goto bail_rwq; 838 vfree(qp->r_rq.wq);
839 goto bail_qp;
839 } 840 }
840 qp->ip = NULL; 841 qp->ip = NULL;
841 ipath_reset_qp(qp); 842 ipath_reset_qp(qp);
@@ -863,7 +864,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
863 sizeof(offset)); 864 sizeof(offset));
864 if (err) { 865 if (err) {
865 ret = ERR_PTR(err); 866 ret = ERR_PTR(err);
866 goto bail_rwq; 867 goto bail_ip;
867 } 868 }
868 } else { 869 } else {
869 u32 s = sizeof(struct ipath_rwq) + 870 u32 s = sizeof(struct ipath_rwq) +
@@ -875,7 +876,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
875 qp->r_rq.wq); 876 qp->r_rq.wq);
876 if (!qp->ip) { 877 if (!qp->ip) {
877 ret = ERR_PTR(-ENOMEM); 878 ret = ERR_PTR(-ENOMEM);
878 goto bail_rwq; 879 goto bail_ip;
879 } 880 }
880 881
881 err = ib_copy_to_udata(udata, &(qp->ip->offset), 882 err = ib_copy_to_udata(udata, &(qp->ip->offset),
@@ -907,9 +908,11 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
907 goto bail; 908 goto bail;
908 909
909bail_ip: 910bail_ip:
910 kfree(qp->ip); 911 if (qp->ip)
911bail_rwq: 912 kref_put(&qp->ip->ref, ipath_release_mmap_info);
912 vfree(qp->r_rq.wq); 913 else
914 vfree(qp->r_rq.wq);
915 ipath_free_qp(&dev->qp_table, qp);
913bail_qp: 916bail_qp:
914 kfree(qp); 917 kfree(qp);
915bail_swq: 918bail_swq:
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 40c36ec19016..2fef36f4b675 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -59,7 +59,7 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
59 59
60 if ((unsigned) wr->num_sge > srq->rq.max_sge) { 60 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
61 *bad_wr = wr; 61 *bad_wr = wr;
62 ret = -ENOMEM; 62 ret = -EINVAL;
63 goto bail; 63 goto bail;
64 } 64 }
65 65
@@ -211,11 +211,11 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
211 struct ib_udata *udata) 211 struct ib_udata *udata)
212{ 212{
213 struct ipath_srq *srq = to_isrq(ibsrq); 213 struct ipath_srq *srq = to_isrq(ibsrq);
214 struct ipath_rwq *wq;
214 int ret = 0; 215 int ret = 0;
215 216
216 if (attr_mask & IB_SRQ_MAX_WR) { 217 if (attr_mask & IB_SRQ_MAX_WR) {
217 struct ipath_rwq *owq; 218 struct ipath_rwq *owq;
218 struct ipath_rwq *wq;
219 struct ipath_rwqe *p; 219 struct ipath_rwqe *p;
220 u32 sz, size, n, head, tail; 220 u32 sz, size, n, head, tail;
221 221
@@ -236,27 +236,20 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
236 goto bail; 236 goto bail;
237 } 237 }
238 238
239 /* 239 /* Check that we can write the offset to mmap. */
240 * Return the address of the RWQ as the offset to mmap.
241 * See ipath_mmap() for details.
242 */
243 if (udata && udata->inlen >= sizeof(__u64)) { 240 if (udata && udata->inlen >= sizeof(__u64)) {
244 __u64 offset_addr; 241 __u64 offset_addr;
245 __u64 offset = (__u64) wq; 242 __u64 offset = 0;
246 243
247 ret = ib_copy_from_udata(&offset_addr, udata, 244 ret = ib_copy_from_udata(&offset_addr, udata,
248 sizeof(offset_addr)); 245 sizeof(offset_addr));
249 if (ret) { 246 if (ret)
250 vfree(wq); 247 goto bail_free;
251 goto bail;
252 }
253 udata->outbuf = (void __user *) offset_addr; 248 udata->outbuf = (void __user *) offset_addr;
254 ret = ib_copy_to_udata(udata, &offset, 249 ret = ib_copy_to_udata(udata, &offset,
255 sizeof(offset)); 250 sizeof(offset));
256 if (ret) { 251 if (ret)
257 vfree(wq); 252 goto bail_free;
258 goto bail;
259 }
260 } 253 }
261 254
262 spin_lock_irq(&srq->rq.lock); 255 spin_lock_irq(&srq->rq.lock);
@@ -277,10 +270,8 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
277 else 270 else
278 n -= tail; 271 n -= tail;
279 if (size <= n) { 272 if (size <= n) {
280 spin_unlock_irq(&srq->rq.lock);
281 vfree(wq);
282 ret = -EINVAL; 273 ret = -EINVAL;
283 goto bail; 274 goto bail_unlock;
284 } 275 }
285 n = 0; 276 n = 0;
286 p = wq->wq; 277 p = wq->wq;
@@ -314,6 +305,18 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
314 u32 s = sizeof(struct ipath_rwq) + size * sz; 305 u32 s = sizeof(struct ipath_rwq) + size * sz;
315 306
316 ipath_update_mmap_info(dev, ip, s, wq); 307 ipath_update_mmap_info(dev, ip, s, wq);
308
309 /*
310 * Return the offset to mmap.
311 * See ipath_mmap() for details.
312 */
313 if (udata && udata->inlen >= sizeof(__u64)) {
314 ret = ib_copy_to_udata(udata, &ip->offset,
315 sizeof(ip->offset));
316 if (ret)
317 goto bail;
318 }
319
317 spin_lock_irq(&dev->pending_lock); 320 spin_lock_irq(&dev->pending_lock);
318 if (list_empty(&ip->pending_mmaps)) 321 if (list_empty(&ip->pending_mmaps))
319 list_add(&ip->pending_mmaps, 322 list_add(&ip->pending_mmaps,
@@ -328,7 +331,12 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
328 srq->limit = attr->srq_limit; 331 srq->limit = attr->srq_limit;
329 spin_unlock_irq(&srq->rq.lock); 332 spin_unlock_irq(&srq->rq.lock);
330 } 333 }
334 goto bail;
331 335
336bail_unlock:
337 spin_unlock_irq(&srq->rq.lock);
338bail_free:
339 vfree(wq);
332bail: 340bail:
333 return ret; 341 return ret;
334} 342}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 74f77e7c2c1b..c4c998446c7b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -302,8 +302,10 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
302 next = qp->s_head + 1; 302 next = qp->s_head + 1;
303 if (next >= qp->s_size) 303 if (next >= qp->s_size)
304 next = 0; 304 next = 0;
305 if (next == qp->s_last) 305 if (next == qp->s_last) {
306 goto bail_inval; 306 ret = -ENOMEM;
307 goto bail;
308 }
307 309
308 wqe = get_swqe_ptr(qp, qp->s_head); 310 wqe = get_swqe_ptr(qp, qp->s_head);
309 wqe->wr = *wr; 311 wqe->wr = *wr;
@@ -404,7 +406,7 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
404 406
405 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { 407 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
406 *bad_wr = wr; 408 *bad_wr = wr;
407 ret = -ENOMEM; 409 ret = -EINVAL;
408 goto bail; 410 goto bail;
409 } 411 }
410 412
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index d68798061795..4a17743a639f 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -310,13 +310,15 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
310 if (i + 1 < data->dma_nents) { 310 if (i + 1 < data->dma_nents) {
311 next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); 311 next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
312 /* are i, i+1 fragments of the same page? */ 312 /* are i, i+1 fragments of the same page? */
313 if (end_addr == next_addr) 313 if (end_addr == next_addr) {
314 cnt++;
314 continue; 315 continue;
315 else if (!IS_4K_ALIGNED(end_addr)) { 316 } else if (!IS_4K_ALIGNED(end_addr)) {
316 ret_len = cnt + 1; 317 ret_len = cnt + 1;
317 break; 318 break;
318 } 319 }
319 } 320 }
321 cnt++;
320 } 322 }
321 if (i == data->dma_nents) 323 if (i == data->dma_nents)
322 ret_len = cnt; /* loop ended */ 324 ret_len = cnt; /* loop ended */
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 42b47639c81c..fa24e6597591 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -113,7 +113,7 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
113 struct mlx4_cmd_mailbox *mailbox; 113 struct mlx4_cmd_mailbox *mailbox;
114 int ret = 0; 114 int ret = 0;
115 115
116 if (cur_state >= MLX4_QP_NUM_STATE || cur_state >= MLX4_QP_NUM_STATE || 116 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
117 !op[cur_state][new_state]) 117 !op[cur_state][new_state])
118 return -EINVAL; 118 return -EINVAL;
119 119