aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-14 16:53:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-14 16:53:22 -0400
commitcf5046323ea254be72535648a9d090b18b8510f3 (patch)
treed44894722bd965b2f28a54c4dc4157f22b618c34 /drivers/net/mlx4
parentae937debe178b4327fd67d604ee83a20f22aa0de (diff)
parent8d34ff34016959d464fd5582ea6a8226fe57ab0e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: mlx4_core: Don't double-free IRQs when falling back from MSI-X to INTx IB/mthca: Don't double-free IRQs when falling back from MSI-X to INTx IB/mlx4: Add strong ordering to local inval and fast reg work requests IB/ehca: Remove superfluous bitmasks from QP control block RDMA/cxgb3: Limit fast register size based on T3 limitations RDMA/cxgb3: Report correct port state and MTU mlx4_core: Add module parameter for number of MTTs per segment IB/mthca: Add module parameter for number of MTTs per segment RDMA/nes: Fix off-by-one bugs in reset_adapter_ne020() and init_serdes() infiniband: Remove void casts IB/ehca: Increment version number IB/ehca: Remove unnecessary memory operations for userspace queue pairs IB/ehca: Fall back to vmalloc() for big allocations IB/ehca: Replace vmalloc() with kmalloc() for queue allocation
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/main.c14
-rw-r--r--drivers/net/mlx4/mr.c6
-rw-r--r--drivers/net/mlx4/profile.c2
4 files changed, 19 insertions, 7 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8830dcb92ec8..ce064e324200 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
497 if (eq_table->have_irq) 497 if (eq_table->have_irq)
498 free_irq(dev->pdev->irq, dev); 498 free_irq(dev->pdev->irq, dev);
499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
500 if (eq_table->eq[i].have_irq) 500 if (eq_table->eq[i].have_irq) {
501 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 501 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
502 eq_table->eq[i].have_irq = 0;
503 }
502 504
503 kfree(eq_table->irq_names); 505 kfree(eq_table->irq_names);
504} 506}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 30bea9689694..018348c01193 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
101 "(0/1, default 0)"); 101 "(0/1, default 0)");
102 102
103static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
104module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
105MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
106
103int mlx4_check_port_params(struct mlx4_dev *dev, 107int mlx4_check_port_params(struct mlx4_dev *dev,
104 enum mlx4_port_type *port_type) 108 enum mlx4_port_type *port_type)
105{ 109{
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
203 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 207 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
204 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 208 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
205 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 209 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
210 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
206 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 211 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
207 MLX4_MTT_ENTRY_PER_SEG); 212 dev->caps.mtts_per_seg);
208 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 213 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
209 dev->caps.reserved_uars = dev_cap->reserved_uars; 214 dev->caps.reserved_uars = dev_cap->reserved_uars;
210 dev->caps.reserved_pds = dev_cap->reserved_pds; 215 dev->caps.reserved_pds = dev_cap->reserved_pds;
211 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 216 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
212 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 217 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
213 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 218 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
214 dev->caps.flags = dev_cap->flags; 219 dev->caps.flags = dev_cap->flags;
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
1304 return -1; 1309 return -1;
1305 } 1310 }
1306 1311
1312 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1313 printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1314 return -1;
1315 }
1316
1307 return 0; 1317 return 0;
1308} 1318}
1309 1319
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0caf74cae8bc..3b8973d19933 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
209 } else 209 } else
210 mtt->page_shift = page_shift; 210 mtt->page_shift = page_shift;
211 211
212 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) 212 for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
213 ++mtt->order; 213 ++mtt->order;
214 214
215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); 215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
351 MLX4_MPT_PD_FLAG_RAE); 351 MLX4_MPT_PD_FLAG_RAE);
352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * 352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
353 MLX4_MTT_ENTRY_PER_SEG); 353 dev->caps.mtts_per_seg);
354 } else { 354 } else {
355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
356 } 356 }
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) 391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
392 return -EINVAL; 392 return -EINVAL;
393 393
394 if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) 394 if (start_index & (dev->caps.mtts_per_seg - 1))
395 return -EINVAL; 395 return -EINVAL;
396 396
397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + 397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index cebdf3243ca1..bd22df95adf9 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; 98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; 99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; 100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
101 profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 101 profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; 102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
103 103
104 profile[MLX4_RES_QP].num = request->num_qp; 104 profile[MLX4_RES_QP].num = request->num_qp;