diff options
author | Jack Morgenstein <jackm@dev.mellanox.co.il> | 2007-08-01 05:28:20 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-10-09 22:59:15 -0400 |
commit | 5b0bf5e25efea77103b0ac7c8057cd56c778ef41 (patch) | |
tree | 60fc637ca840c2a88563edc495d01f2ad75a5b69 /drivers/net/mlx4/main.c | |
parent | 04d29b0ede242000b24cfc34cc78fbd164c47e1a (diff) |
mlx4_core: Support ICM tables in coherent memory
Enable having ICM tables in coherent memory, and use coherent memory
for the dMPT table. This will allow writing MPT entries for MRs both
via the SW2HW_MPT command and also directly by the driver for FMR
remapping without needing to flush or worry about cacheline boundaries.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4/main.c')
-rw-r--r-- | drivers/net/mlx4/main.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 9e590e11c1cf..07c2847a7cc8 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -168,7 +168,7 @@ static int __devinit mlx4_load_fw(struct mlx4_dev *dev) | |||
168 | int err; | 168 | int err; |
169 | 169 | ||
170 | priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, | 170 | priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, |
171 | GFP_HIGHUSER | __GFP_NOWARN); | 171 | GFP_HIGHUSER | __GFP_NOWARN, 0); |
172 | if (!priv->fw.fw_icm) { | 172 | if (!priv->fw.fw_icm) { |
173 | mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); | 173 | mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); |
174 | return -ENOMEM; | 174 | return -ENOMEM; |
@@ -192,7 +192,7 @@ err_unmap_fa: | |||
192 | mlx4_UNMAP_FA(dev); | 192 | mlx4_UNMAP_FA(dev); |
193 | 193 | ||
194 | err_free: | 194 | err_free: |
195 | mlx4_free_icm(dev, priv->fw.fw_icm); | 195 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); |
196 | return err; | 196 | return err; |
197 | } | 197 | } |
198 | 198 | ||
@@ -207,7 +207,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
207 | ((u64) (MLX4_CMPT_TYPE_QP * | 207 | ((u64) (MLX4_CMPT_TYPE_QP * |
208 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 208 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
209 | cmpt_entry_sz, dev->caps.num_qps, | 209 | cmpt_entry_sz, dev->caps.num_qps, |
210 | dev->caps.reserved_qps, 0); | 210 | dev->caps.reserved_qps, 0, 0); |
211 | if (err) | 211 | if (err) |
212 | goto err; | 212 | goto err; |
213 | 213 | ||
@@ -216,7 +216,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
216 | ((u64) (MLX4_CMPT_TYPE_SRQ * | 216 | ((u64) (MLX4_CMPT_TYPE_SRQ * |
217 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 217 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
218 | cmpt_entry_sz, dev->caps.num_srqs, | 218 | cmpt_entry_sz, dev->caps.num_srqs, |
219 | dev->caps.reserved_srqs, 0); | 219 | dev->caps.reserved_srqs, 0, 0); |
220 | if (err) | 220 | if (err) |
221 | goto err_qp; | 221 | goto err_qp; |
222 | 222 | ||
@@ -225,7 +225,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
225 | ((u64) (MLX4_CMPT_TYPE_CQ * | 225 | ((u64) (MLX4_CMPT_TYPE_CQ * |
226 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 226 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
227 | cmpt_entry_sz, dev->caps.num_cqs, | 227 | cmpt_entry_sz, dev->caps.num_cqs, |
228 | dev->caps.reserved_cqs, 0); | 228 | dev->caps.reserved_cqs, 0, 0); |
229 | if (err) | 229 | if (err) |
230 | goto err_srq; | 230 | goto err_srq; |
231 | 231 | ||
@@ -236,7 +236,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
236 | cmpt_entry_sz, | 236 | cmpt_entry_sz, |
237 | roundup_pow_of_two(MLX4_NUM_EQ + | 237 | roundup_pow_of_two(MLX4_NUM_EQ + |
238 | dev->caps.reserved_eqs), | 238 | dev->caps.reserved_eqs), |
239 | MLX4_NUM_EQ + dev->caps.reserved_eqs, 0); | 239 | MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); |
240 | if (err) | 240 | if (err) |
241 | goto err_cq; | 241 | goto err_cq; |
242 | 242 | ||
@@ -275,7 +275,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
275 | (unsigned long long) aux_pages << 2); | 275 | (unsigned long long) aux_pages << 2); |
276 | 276 | ||
277 | priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, | 277 | priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, |
278 | GFP_HIGHUSER | __GFP_NOWARN); | 278 | GFP_HIGHUSER | __GFP_NOWARN, 0); |
279 | if (!priv->fw.aux_icm) { | 279 | if (!priv->fw.aux_icm) { |
280 | mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); | 280 | mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); |
281 | return -ENOMEM; | 281 | return -ENOMEM; |
@@ -303,7 +303,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
303 | init_hca->mtt_base, | 303 | init_hca->mtt_base, |
304 | dev->caps.mtt_entry_sz, | 304 | dev->caps.mtt_entry_sz, |
305 | dev->caps.num_mtt_segs, | 305 | dev->caps.num_mtt_segs, |
306 | dev->caps.reserved_mtts, 1); | 306 | dev->caps.reserved_mtts, 1, 0); |
307 | if (err) { | 307 | if (err) { |
308 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); | 308 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); |
309 | goto err_unmap_eq; | 309 | goto err_unmap_eq; |
@@ -313,7 +313,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
313 | init_hca->dmpt_base, | 313 | init_hca->dmpt_base, |
314 | dev_cap->dmpt_entry_sz, | 314 | dev_cap->dmpt_entry_sz, |
315 | dev->caps.num_mpts, | 315 | dev->caps.num_mpts, |
316 | dev->caps.reserved_mrws, 1); | 316 | dev->caps.reserved_mrws, 1, 1); |
317 | if (err) { | 317 | if (err) { |
318 | mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); | 318 | mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); |
319 | goto err_unmap_mtt; | 319 | goto err_unmap_mtt; |
@@ -323,7 +323,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
323 | init_hca->qpc_base, | 323 | init_hca->qpc_base, |
324 | dev_cap->qpc_entry_sz, | 324 | dev_cap->qpc_entry_sz, |
325 | dev->caps.num_qps, | 325 | dev->caps.num_qps, |
326 | dev->caps.reserved_qps, 0); | 326 | dev->caps.reserved_qps, 0, 0); |
327 | if (err) { | 327 | if (err) { |
328 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); | 328 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); |
329 | goto err_unmap_dmpt; | 329 | goto err_unmap_dmpt; |
@@ -333,7 +333,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
333 | init_hca->auxc_base, | 333 | init_hca->auxc_base, |
334 | dev_cap->aux_entry_sz, | 334 | dev_cap->aux_entry_sz, |
335 | dev->caps.num_qps, | 335 | dev->caps.num_qps, |
336 | dev->caps.reserved_qps, 0); | 336 | dev->caps.reserved_qps, 0, 0); |
337 | if (err) { | 337 | if (err) { |
338 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); | 338 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); |
339 | goto err_unmap_qp; | 339 | goto err_unmap_qp; |
@@ -343,7 +343,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
343 | init_hca->altc_base, | 343 | init_hca->altc_base, |
344 | dev_cap->altc_entry_sz, | 344 | dev_cap->altc_entry_sz, |
345 | dev->caps.num_qps, | 345 | dev->caps.num_qps, |
346 | dev->caps.reserved_qps, 0); | 346 | dev->caps.reserved_qps, 0, 0); |
347 | if (err) { | 347 | if (err) { |
348 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); | 348 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); |
349 | goto err_unmap_auxc; | 349 | goto err_unmap_auxc; |
@@ -353,7 +353,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
353 | init_hca->rdmarc_base, | 353 | init_hca->rdmarc_base, |
354 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, | 354 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, |
355 | dev->caps.num_qps, | 355 | dev->caps.num_qps, |
356 | dev->caps.reserved_qps, 0); | 356 | dev->caps.reserved_qps, 0, 0); |
357 | if (err) { | 357 | if (err) { |
358 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); | 358 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); |
359 | goto err_unmap_altc; | 359 | goto err_unmap_altc; |
@@ -363,7 +363,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
363 | init_hca->cqc_base, | 363 | init_hca->cqc_base, |
364 | dev_cap->cqc_entry_sz, | 364 | dev_cap->cqc_entry_sz, |
365 | dev->caps.num_cqs, | 365 | dev->caps.num_cqs, |
366 | dev->caps.reserved_cqs, 0); | 366 | dev->caps.reserved_cqs, 0, 0); |
367 | if (err) { | 367 | if (err) { |
368 | mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); | 368 | mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); |
369 | goto err_unmap_rdmarc; | 369 | goto err_unmap_rdmarc; |
@@ -373,7 +373,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
373 | init_hca->srqc_base, | 373 | init_hca->srqc_base, |
374 | dev_cap->srq_entry_sz, | 374 | dev_cap->srq_entry_sz, |
375 | dev->caps.num_srqs, | 375 | dev->caps.num_srqs, |
376 | dev->caps.reserved_srqs, 0); | 376 | dev->caps.reserved_srqs, 0, 0); |
377 | if (err) { | 377 | if (err) { |
378 | mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); | 378 | mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); |
379 | goto err_unmap_cq; | 379 | goto err_unmap_cq; |
@@ -388,7 +388,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, | |||
388 | init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, | 388 | init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, |
389 | dev->caps.num_mgms + dev->caps.num_amgms, | 389 | dev->caps.num_mgms + dev->caps.num_amgms, |
390 | dev->caps.num_mgms + dev->caps.num_amgms, | 390 | dev->caps.num_mgms + dev->caps.num_amgms, |
391 | 0); | 391 | 0, 0); |
392 | if (err) { | 392 | if (err) { |
393 | mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); | 393 | mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); |
394 | goto err_unmap_srq; | 394 | goto err_unmap_srq; |
@@ -433,7 +433,7 @@ err_unmap_aux: | |||
433 | mlx4_UNMAP_ICM_AUX(dev); | 433 | mlx4_UNMAP_ICM_AUX(dev); |
434 | 434 | ||
435 | err_free_aux: | 435 | err_free_aux: |
436 | mlx4_free_icm(dev, priv->fw.aux_icm); | 436 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
437 | 437 | ||
438 | return err; | 438 | return err; |
439 | } | 439 | } |
@@ -458,7 +458,7 @@ static void mlx4_free_icms(struct mlx4_dev *dev) | |||
458 | mlx4_unmap_eq_icm(dev); | 458 | mlx4_unmap_eq_icm(dev); |
459 | 459 | ||
460 | mlx4_UNMAP_ICM_AUX(dev); | 460 | mlx4_UNMAP_ICM_AUX(dev); |
461 | mlx4_free_icm(dev, priv->fw.aux_icm); | 461 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
462 | } | 462 | } |
463 | 463 | ||
464 | static void mlx4_close_hca(struct mlx4_dev *dev) | 464 | static void mlx4_close_hca(struct mlx4_dev *dev) |
@@ -466,7 +466,7 @@ static void mlx4_close_hca(struct mlx4_dev *dev) | |||
466 | mlx4_CLOSE_HCA(dev, 0); | 466 | mlx4_CLOSE_HCA(dev, 0); |
467 | mlx4_free_icms(dev); | 467 | mlx4_free_icms(dev); |
468 | mlx4_UNMAP_FA(dev); | 468 | mlx4_UNMAP_FA(dev); |
469 | mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm); | 469 | mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); |
470 | } | 470 | } |
471 | 471 | ||
472 | static int __devinit mlx4_init_hca(struct mlx4_dev *dev) | 472 | static int __devinit mlx4_init_hca(struct mlx4_dev *dev) |
@@ -537,7 +537,7 @@ err_free_icm: | |||
537 | 537 | ||
538 | err_stop_fw: | 538 | err_stop_fw: |
539 | mlx4_UNMAP_FA(dev); | 539 | mlx4_UNMAP_FA(dev); |
540 | mlx4_free_icm(dev, priv->fw.fw_icm); | 540 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); |
541 | 541 | ||
542 | return err; | 542 | return err; |
543 | } | 543 | } |