aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/main.c
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2008-10-22 13:25:29 -0400
committerRoland Dreier <rolandd@cisco.com>2008-10-22 13:25:29 -0400
commit93fc9e1bb6507dde945c2eab68c93e1066ac3691 (patch)
treeaa495ec31b7372580f9ec50acead1d170fd70aab /drivers/net/mlx4/main.c
parenta3cdcbfa8fb1fccfe48d359da86e99546610c562 (diff)
mlx4_core: Support multiple pre-reserved QP regions
For ethernet support, we need to reserve QPs for the ethernet and fibre channel driver. The QPs are reserved at the end of the QP table. (This way we assure that they are aligned to their size) We need to consider these reserved ranges in bitmap creation, so we extend the mlx4 bitmap utility functions to allow reserved ranges at both the bottom and the top of the range. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4/main.c')
-rw-r--r--drivers/net/mlx4/main.c62
1 files changed, 56 insertions, 6 deletions
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..560e1962212e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,19 @@ static struct mlx4_profile default_profile = {
85 .num_mtt = 1 << 20, 85 .num_mtt = 1 << 20,
86}; 86};
87 87
88static int log_num_mac = 2;
89module_param_named(log_num_mac, log_num_mac, int, 0444);
90MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92static int log_num_vlan;
93module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96static int use_prio;
97module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)");
100
88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 101static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 102{
90 int err; 103 int err;
@@ -134,7 +147,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 147 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz; 148 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 149 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 150 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 151 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 152 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,6 +175,39 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
163 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 175 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
164 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 176 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
165 177
178 dev->caps.log_num_macs = log_num_mac;
179 dev->caps.log_num_vlans = log_num_vlan;
180 dev->caps.log_num_prios = use_prio ? 3 : 0;
181
182 for (i = 1; i <= dev->caps.num_ports; ++i) {
183 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
184 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
185 mlx4_warn(dev, "Requested number of MACs is too much "
186 "for port %d, reducing to %d.\n",
187 i, 1 << dev->caps.log_num_macs);
188 }
189 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
190 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
191 mlx4_warn(dev, "Requested number of VLANs is too much "
192 "for port %d, reducing to %d.\n",
193 i, 1 << dev->caps.log_num_vlans);
194 }
195 }
196
197 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
198 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
199 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
200 (1 << dev->caps.log_num_macs) *
201 (1 << dev->caps.log_num_vlans) *
202 (1 << dev->caps.log_num_prios) *
203 dev->caps.num_ports;
204 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
205
206 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
207 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
208 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
209 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
210
166 return 0; 211 return 0;
167} 212}
168 213
@@ -211,7 +256,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
211 ((u64) (MLX4_CMPT_TYPE_QP * 256 ((u64) (MLX4_CMPT_TYPE_QP *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 257 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_qps, 258 cmpt_entry_sz, dev->caps.num_qps,
214 dev->caps.reserved_qps, 0, 0); 259 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
260 0, 0);
215 if (err) 261 if (err)
216 goto err; 262 goto err;
217 263
@@ -336,7 +382,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
336 init_hca->qpc_base, 382 init_hca->qpc_base,
337 dev_cap->qpc_entry_sz, 383 dev_cap->qpc_entry_sz,
338 dev->caps.num_qps, 384 dev->caps.num_qps,
339 dev->caps.reserved_qps, 0, 0); 385 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
386 0, 0);
340 if (err) { 387 if (err) {
341 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 388 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
342 goto err_unmap_dmpt; 389 goto err_unmap_dmpt;
@@ -346,7 +393,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
346 init_hca->auxc_base, 393 init_hca->auxc_base,
347 dev_cap->aux_entry_sz, 394 dev_cap->aux_entry_sz,
348 dev->caps.num_qps, 395 dev->caps.num_qps,
349 dev->caps.reserved_qps, 0, 0); 396 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
397 0, 0);
350 if (err) { 398 if (err) {
351 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 399 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
352 goto err_unmap_qp; 400 goto err_unmap_qp;
@@ -356,7 +404,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
356 init_hca->altc_base, 404 init_hca->altc_base,
357 dev_cap->altc_entry_sz, 405 dev_cap->altc_entry_sz,
358 dev->caps.num_qps, 406 dev->caps.num_qps,
359 dev->caps.reserved_qps, 0, 0); 407 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
408 0, 0);
360 if (err) { 409 if (err) {
361 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 410 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
362 goto err_unmap_auxc; 411 goto err_unmap_auxc;
@@ -366,7 +415,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
366 init_hca->rdmarc_base, 415 init_hca->rdmarc_base,
367 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 416 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
368 dev->caps.num_qps, 417 dev->caps.num_qps,
369 dev->caps.reserved_qps, 0, 0); 418 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
419 0, 0);
370 if (err) { 420 if (err) {
371 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 421 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
372 goto err_unmap_altc; 422 goto err_unmap_altc;