aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-14 21:53:49 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-14 21:53:49 -0500
commit4e396db8034cd5566a6b77716c15954b533090a6 (patch)
tree106edaeff4cac9d1a402eaa33863422248bf68cc /drivers
parenta052f4473603765eb6b4c19754689977601dc1d1 (diff)
parente383d19e90cfbbf8e00512d44194ce175b3f60a2 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: mlx4_core: Fix thinko in QP destroy (incorrect bitmap_free) RDMA/cxgb3: Set the max_qp_init_rd_atom attribute in query_device IB/ehca: Fix static rate calculation IB/ehca: Return physical link information in query_port() IB/ipath: Fix race with ACK retry timeout list management IB/ipath: Fix memory leak in ipath_resize_cq() if copy_to_user() fails mlx4_core: Fix possible bad free in mlx4_buf_free()
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c48
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c20
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c29
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c5
-rw-r--r--drivers/net/mlx4/alloc.c7
-rw-r--r--drivers/net/mlx4/qp.c2
12 files changed, 89 insertions, 47 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index f0c777589374..b5436ca92e68 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1000,6 +1000,7 @@ static int iwch_query_device(struct ib_device *ibdev,
1000 props->max_sge = dev->attr.max_sge_per_wr; 1000 props->max_sge = dev->attr.max_sge_per_wr;
1001 props->max_sge_rd = 1; 1001 props->max_sge_rd = 1;
1002 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; 1002 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1003 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1003 props->max_cq = dev->attr.max_cqs; 1004 props->max_cq = dev->attr.max_cqs;
1004 props->max_cqe = dev->attr.max_cqes_per_cq; 1005 props->max_cqe = dev->attr.max_cqes_per_cq;
1005 props->max_mr = dev->attr.max_mem_regs; 1006 props->max_mr = dev->attr.max_mem_regs;
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 97d108634c58..453eb995c1d4 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -50,6 +50,38 @@
50 50
51static struct kmem_cache *av_cache; 51static struct kmem_cache *av_cache;
52 52
53int ehca_calc_ipd(struct ehca_shca *shca, int port,
54 enum ib_rate path_rate, u32 *ipd)
55{
56 int path = ib_rate_to_mult(path_rate);
57 int link, ret;
58 struct ib_port_attr pa;
59
60 if (path_rate == IB_RATE_PORT_CURRENT) {
61 *ipd = 0;
62 return 0;
63 }
64
65 if (unlikely(path < 0)) {
66 ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
67 path_rate);
68 return -EINVAL;
69 }
70
71 ret = ehca_query_port(&shca->ib_device, port, &pa);
72 if (unlikely(ret < 0)) {
73 ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
74 return ret;
75 }
76
77 link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
78
79 /* IPD = round((link / path) - 1) */
80 *ipd = ((link + (path >> 1)) / path) - 1;
81
82 return 0;
83}
84
53struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 85struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
54{ 86{
55 int ret; 87 int ret;
@@ -69,15 +101,13 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
69 av->av.slid_path_bits = ah_attr->src_path_bits; 101 av->av.slid_path_bits = ah_attr->src_path_bits;
70 102
71 if (ehca_static_rate < 0) { 103 if (ehca_static_rate < 0) {
72 int ah_mult = ib_rate_to_mult(ah_attr->static_rate); 104 u32 ipd;
73 int ehca_mult = 105 if (ehca_calc_ipd(shca, ah_attr->port_num,
74 ib_rate_to_mult(shca->sport[ah_attr->port_num].rate ); 106 ah_attr->static_rate, &ipd)) {
75 107 ret = -EINVAL;
76 if (ah_mult >= ehca_mult) 108 goto create_ah_exit1;
77 av->av.ipd = 0; 109 }
78 else 110 av->av.ipd = ipd;
79 av->av.ipd = (ah_mult > 0) ?
80 ((ehca_mult - 1) / ah_mult) : 0;
81 } else 111 } else
82 av->av.ipd = ehca_static_rate; 112 av->av.ipd = ehca_static_rate;
83 113
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 2d660ae189e5..87f12d4312a7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -95,7 +95,6 @@ struct ehca_sma_attr {
95struct ehca_sport { 95struct ehca_sport {
96 struct ib_cq *ibcq_aqp1; 96 struct ib_cq *ibcq_aqp1;
97 struct ib_qp *ibqp_aqp1; 97 struct ib_qp *ibqp_aqp1;
98 enum ib_rate rate;
99 enum ib_port_state port_state; 98 enum ib_port_state port_state;
100 struct ehca_sma_attr saved_attr; 99 struct ehca_sma_attr saved_attr;
101}; 100};
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 15806d140461..5bd7b591987e 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -151,7 +151,6 @@ int ehca_query_port(struct ib_device *ibdev,
151 } 151 }
152 152
153 memset(props, 0, sizeof(struct ib_port_attr)); 153 memset(props, 0, sizeof(struct ib_port_attr));
154 props->state = rblock->state;
155 154
156 switch (rblock->max_mtu) { 155 switch (rblock->max_mtu) {
157 case 0x1: 156 case 0x1:
@@ -188,11 +187,20 @@ int ehca_query_port(struct ib_device *ibdev,
188 props->subnet_timeout = rblock->subnet_timeout; 187 props->subnet_timeout = rblock->subnet_timeout;
189 props->init_type_reply = rblock->init_type_reply; 188 props->init_type_reply = rblock->init_type_reply;
190 189
191 props->active_width = IB_WIDTH_12X; 190 if (rblock->state && rblock->phys_width) {
192 props->active_speed = 0x1; 191 props->phys_state = rblock->phys_pstate;
193 192 props->state = rblock->phys_state;
194 /* at the moment (logical) link state is always LINK_UP */ 193 props->active_width = rblock->phys_width;
195 props->phys_state = 0x5; 194 props->active_speed = rblock->phys_speed;
195 } else {
196 /* old firmware releases don't report physical
197 * port info, so use default values
198 */
199 props->phys_state = 5;
200 props->state = rblock->state;
201 props->active_width = IB_WIDTH_12X;
202 props->active_speed = 0x1;
203 }
196 204
197query_port1: 205query_port1:
198 ehca_free_fw_ctrlblock(rblock); 206 ehca_free_fw_ctrlblock(rblock);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index dce503bb7d6b..5485799cdc8d 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -189,6 +189,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
189 189
190void ehca_poll_eqs(unsigned long data); 190void ehca_poll_eqs(unsigned long data);
191 191
192int ehca_calc_ipd(struct ehca_shca *shca, int port,
193 enum ib_rate path_rate, u32 *ipd);
194
192#ifdef CONFIG_PPC_64K_PAGES 195#ifdef CONFIG_PPC_64K_PAGES
193void *ehca_alloc_fw_ctrlblock(gfp_t flags); 196void *ehca_alloc_fw_ctrlblock(gfp_t flags);
194void ehca_free_fw_ctrlblock(void *ptr); 197void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c6cd38c5321f..90d4334179bf 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -327,9 +327,6 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
327 shca->hw_level = ehca_hw_level; 327 shca->hw_level = ehca_hw_level;
328 ehca_gen_dbg(" ... hardware level=%x", shca->hw_level); 328 ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
329 329
330 shca->sport[0].rate = IB_RATE_30_GBPS;
331 shca->sport[1].rate = IB_RATE_30_GBPS;
332
333 shca->hca_cap = rblock->hca_cap_indicators; 330 shca->hca_cap = rblock->hca_cap_indicators;
334 ehca_gen_dbg(" ... HCA capabilities:"); 331 ehca_gen_dbg(" ... HCA capabilities:");
335 for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++) 332 for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index de182648b282..2e3e6547cb78 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1196,10 +1196,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1196 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1); 1196 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1197 } 1197 }
1198 if (attr_mask & IB_QP_AV) { 1198 if (attr_mask & IB_QP_AV) {
1199 int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
1200 int ehca_mult = ib_rate_to_mult(shca->sport[my_qp->
1201 init_attr.port_num].rate);
1202
1203 mqpcb->dlid = attr->ah_attr.dlid; 1199 mqpcb->dlid = attr->ah_attr.dlid;
1204 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1); 1200 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1205 mqpcb->source_path_bits = attr->ah_attr.src_path_bits; 1201 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
@@ -1207,11 +1203,12 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1207 mqpcb->service_level = attr->ah_attr.sl; 1203 mqpcb->service_level = attr->ah_attr.sl;
1208 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1); 1204 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1209 1205
1210 if (ah_mult < ehca_mult) 1206 if (ehca_calc_ipd(shca, my_qp->init_attr.port_num,
1211 mqpcb->max_static_rate = (ah_mult > 0) ? 1207 attr->ah_attr.static_rate,
1212 ((ehca_mult - 1) / ah_mult) : 0; 1208 &mqpcb->max_static_rate)) {
1213 else 1209 ret = -EINVAL;
1214 mqpcb->max_static_rate = 0; 1210 goto modify_qp_exit2;
1211 }
1215 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); 1212 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1216 1213
1217 /* 1214 /*
@@ -1280,10 +1277,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1280 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1); 1277 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1281 } 1278 }
1282 if (attr_mask & IB_QP_ALT_PATH) { 1279 if (attr_mask & IB_QP_ALT_PATH) {
1283 int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate);
1284 int ehca_mult = ib_rate_to_mult(
1285 shca->sport[my_qp->init_attr.port_num].rate);
1286
1287 if (attr->alt_port_num < 1 1280 if (attr->alt_port_num < 1
1288 || attr->alt_port_num > shca->num_ports) { 1281 || attr->alt_port_num > shca->num_ports) {
1289 ret = -EINVAL; 1282 ret = -EINVAL;
@@ -1309,10 +1302,12 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1309 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits; 1302 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1310 mqpcb->service_level_al = attr->alt_ah_attr.sl; 1303 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1311 1304
1312 if (ah_mult > 0 && ah_mult < ehca_mult) 1305 if (ehca_calc_ipd(shca, my_qp->init_attr.port_num,
1313 mqpcb->max_static_rate_al = (ehca_mult - 1) / ah_mult; 1306 attr->alt_ah_attr.static_rate,
1314 else 1307 &mqpcb->max_static_rate_al)) {
1315 mqpcb->max_static_rate_al = 0; 1308 ret = -EINVAL;
1309 goto modify_qp_exit2;
1310 }
1316 1311
1317 /* OpenIB doesn't support alternate retry counts - copy them */ 1312 /* OpenIB doesn't support alternate retry counts - copy them */
1318 mqpcb->retry_count_al = mqpcb->retry_count; 1313 mqpcb->retry_count_al = mqpcb->retry_count;
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
index d9739e554515..485b8400359e 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -402,7 +402,11 @@ struct hipz_query_port {
402 u64 max_msg_sz; 402 u64 max_msg_sz;
403 u32 max_mtu; 403 u32 max_mtu;
404 u32 vl_cap; 404 u32 vl_cap;
405 u8 reserved2[1900]; 405 u32 phys_pstate;
406 u32 phys_state;
407 u32 phys_speed;
408 u32 phys_width;
409 u8 reserved2[1884];
406 u64 guid_entries[255]; 410 u64 guid_entries[255];
407} __attribute__ ((packed)); 411} __attribute__ ((packed));
408 412
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 645ed71fd797..08d8ae148cd0 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -404,7 +404,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
404 404
405 ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); 405 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
406 if (ret) 406 if (ret)
407 goto bail; 407 goto bail_free;
408 } 408 }
409 409
410 spin_lock_irq(&cq->lock); 410 spin_lock_irq(&cq->lock);
@@ -424,10 +424,8 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
424 else 424 else
425 n = head - tail; 425 n = head - tail;
426 if (unlikely((u32)cqe < n)) { 426 if (unlikely((u32)cqe < n)) {
427 spin_unlock_irq(&cq->lock);
428 vfree(wc);
429 ret = -EOVERFLOW; 427 ret = -EOVERFLOW;
430 goto bail; 428 goto bail_unlock;
431 } 429 }
432 for (n = 0; tail != head; n++) { 430 for (n = 0; tail != head; n++) {
433 if (cq->ip) 431 if (cq->ip)
@@ -459,7 +457,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
459 } 457 }
460 458
461 ret = 0; 459 ret = 0;
460 goto bail;
462 461
462bail_unlock:
463 spin_unlock_irq(&cq->lock);
464bail_free:
465 vfree(wc);
463bail: 466bail:
464 return ret; 467 return ret;
465} 468}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 5c29b2bfea17..120a61b03bc4 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -959,8 +959,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
959 /* If this is a partial ACK, reset the retransmit timer. */ 959 /* If this is a partial ACK, reset the retransmit timer. */
960 if (qp->s_last != qp->s_tail) { 960 if (qp->s_last != qp->s_tail) {
961 spin_lock(&dev->pending_lock); 961 spin_lock(&dev->pending_lock);
962 list_add_tail(&qp->timerwait, 962 if (list_empty(&qp->timerwait))
963 &dev->pending[dev->pending_index]); 963 list_add_tail(&qp->timerwait,
964 &dev->pending[dev->pending_index]);
964 spin_unlock(&dev->pending_lock); 965 spin_unlock(&dev->pending_lock);
965 /* 966 /*
966 * If we get a partial ACK for a resent operation, 967 * If we get a partial ACK for a resent operation,
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index f8d63d39f592..b226e019bc8b 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -171,9 +171,10 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
171 buf->u.direct.map); 171 buf->u.direct.map);
172 else { 172 else {
173 for (i = 0; i < buf->nbufs; ++i) 173 for (i = 0; i < buf->nbufs; ++i)
174 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 174 if (buf->u.page_list[i].buf)
175 buf->u.page_list[i].buf, 175 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
176 buf->u.page_list[i].map); 176 buf->u.page_list[i].buf,
177 buf->u.page_list[i].map);
177 kfree(buf->u.page_list); 178 kfree(buf->u.page_list);
178 } 179 }
179} 180}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index cc4b1be18219..42b47639c81c 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -240,7 +240,7 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
240 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 240 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 241 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
242 242
243 if (qp->qpn < dev->caps.sqp_start + 8) 243 if (qp->qpn >= dev->caps.sqp_start + 8)
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); 244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245} 245}
246EXPORT_SYMBOL_GPL(mlx4_qp_free); 246EXPORT_SYMBOL_GPL(mlx4_qp_free);