diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_main.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 40 |
1 files changed, 25 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 44bc6cc734ab..0d9b7d06bbc2 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -379,7 +379,7 @@ static int mthca_load_fw(struct mthca_dev *mdev) | |||
379 | 379 | ||
380 | mdev->fw.arbel.fw_icm = | 380 | mdev->fw.arbel.fw_icm = |
381 | mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, | 381 | mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, |
382 | GFP_HIGHUSER | __GFP_NOWARN); | 382 | GFP_HIGHUSER | __GFP_NOWARN, 0); |
383 | if (!mdev->fw.arbel.fw_icm) { | 383 | if (!mdev->fw.arbel.fw_icm) { |
384 | mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); | 384 | mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); |
385 | return -ENOMEM; | 385 | return -ENOMEM; |
@@ -412,7 +412,7 @@ err_unmap_fa: | |||
412 | mthca_UNMAP_FA(mdev, &status); | 412 | mthca_UNMAP_FA(mdev, &status); |
413 | 413 | ||
414 | err_free: | 414 | err_free: |
415 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); | 415 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); |
416 | return err; | 416 | return err; |
417 | } | 417 | } |
418 | 418 | ||
@@ -441,7 +441,7 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
441 | (unsigned long long) aux_pages << 2); | 441 | (unsigned long long) aux_pages << 2); |
442 | 442 | ||
443 | mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, | 443 | mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, |
444 | GFP_HIGHUSER | __GFP_NOWARN); | 444 | GFP_HIGHUSER | __GFP_NOWARN, 0); |
445 | if (!mdev->fw.arbel.aux_icm) { | 445 | if (!mdev->fw.arbel.aux_icm) { |
446 | mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); | 446 | mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); |
447 | return -ENOMEM; | 447 | return -ENOMEM; |
@@ -464,10 +464,15 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
464 | goto err_unmap_aux; | 464 | goto err_unmap_aux; |
465 | } | 465 | } |
466 | 466 | ||
467 | /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ | ||
468 | mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE, | ||
469 | dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE; | ||
470 | |||
467 | mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, | 471 | mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, |
468 | MTHCA_MTT_SEG_SIZE, | 472 | MTHCA_MTT_SEG_SIZE, |
469 | mdev->limits.num_mtt_segs, | 473 | mdev->limits.num_mtt_segs, |
470 | mdev->limits.reserved_mtts, 1); | 474 | mdev->limits.reserved_mtts, |
475 | 1, 0); | ||
471 | if (!mdev->mr_table.mtt_table) { | 476 | if (!mdev->mr_table.mtt_table) { |
472 | mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); | 477 | mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); |
473 | err = -ENOMEM; | 478 | err = -ENOMEM; |
@@ -477,7 +482,8 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
477 | mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, | 482 | mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, |
478 | dev_lim->mpt_entry_sz, | 483 | dev_lim->mpt_entry_sz, |
479 | mdev->limits.num_mpts, | 484 | mdev->limits.num_mpts, |
480 | mdev->limits.reserved_mrws, 1); | 485 | mdev->limits.reserved_mrws, |
486 | 1, 1); | ||
481 | if (!mdev->mr_table.mpt_table) { | 487 | if (!mdev->mr_table.mpt_table) { |
482 | mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); | 488 | mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); |
483 | err = -ENOMEM; | 489 | err = -ENOMEM; |
@@ -487,7 +493,8 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
487 | mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, | 493 | mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, |
488 | dev_lim->qpc_entry_sz, | 494 | dev_lim->qpc_entry_sz, |
489 | mdev->limits.num_qps, | 495 | mdev->limits.num_qps, |
490 | mdev->limits.reserved_qps, 0); | 496 | mdev->limits.reserved_qps, |
497 | 0, 0); | ||
491 | if (!mdev->qp_table.qp_table) { | 498 | if (!mdev->qp_table.qp_table) { |
492 | mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); | 499 | mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); |
493 | err = -ENOMEM; | 500 | err = -ENOMEM; |
@@ -497,7 +504,8 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
497 | mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, | 504 | mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, |
498 | dev_lim->eqpc_entry_sz, | 505 | dev_lim->eqpc_entry_sz, |
499 | mdev->limits.num_qps, | 506 | mdev->limits.num_qps, |
500 | mdev->limits.reserved_qps, 0); | 507 | mdev->limits.reserved_qps, |
508 | 0, 0); | ||
501 | if (!mdev->qp_table.eqp_table) { | 509 | if (!mdev->qp_table.eqp_table) { |
502 | mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); | 510 | mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); |
503 | err = -ENOMEM; | 511 | err = -ENOMEM; |
@@ -507,7 +515,7 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
507 | mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, | 515 | mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, |
508 | MTHCA_RDB_ENTRY_SIZE, | 516 | MTHCA_RDB_ENTRY_SIZE, |
509 | mdev->limits.num_qps << | 517 | mdev->limits.num_qps << |
510 | mdev->qp_table.rdb_shift, | 518 | mdev->qp_table.rdb_shift, 0, |
511 | 0, 0); | 519 | 0, 0); |
512 | if (!mdev->qp_table.rdb_table) { | 520 | if (!mdev->qp_table.rdb_table) { |
513 | mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); | 521 | mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); |
@@ -518,7 +526,8 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
518 | mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, | 526 | mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, |
519 | dev_lim->cqc_entry_sz, | 527 | dev_lim->cqc_entry_sz, |
520 | mdev->limits.num_cqs, | 528 | mdev->limits.num_cqs, |
521 | mdev->limits.reserved_cqs, 0); | 529 | mdev->limits.reserved_cqs, |
530 | 0, 0); | ||
522 | if (!mdev->cq_table.table) { | 531 | if (!mdev->cq_table.table) { |
523 | mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); | 532 | mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); |
524 | err = -ENOMEM; | 533 | err = -ENOMEM; |
@@ -530,7 +539,8 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
530 | mthca_alloc_icm_table(mdev, init_hca->srqc_base, | 539 | mthca_alloc_icm_table(mdev, init_hca->srqc_base, |
531 | dev_lim->srq_entry_sz, | 540 | dev_lim->srq_entry_sz, |
532 | mdev->limits.num_srqs, | 541 | mdev->limits.num_srqs, |
533 | mdev->limits.reserved_srqs, 0); | 542 | mdev->limits.reserved_srqs, |
543 | 0, 0); | ||
534 | if (!mdev->srq_table.table) { | 544 | if (!mdev->srq_table.table) { |
535 | mthca_err(mdev, "Failed to map SRQ context memory, " | 545 | mthca_err(mdev, "Failed to map SRQ context memory, " |
536 | "aborting.\n"); | 546 | "aborting.\n"); |
@@ -550,7 +560,7 @@ static int mthca_init_icm(struct mthca_dev *mdev, | |||
550 | mdev->limits.num_amgms, | 560 | mdev->limits.num_amgms, |
551 | mdev->limits.num_mgms + | 561 | mdev->limits.num_mgms + |
552 | mdev->limits.num_amgms, | 562 | mdev->limits.num_amgms, |
553 | 0); | 563 | 0, 0); |
554 | if (!mdev->mcg_table.table) { | 564 | if (!mdev->mcg_table.table) { |
555 | mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); | 565 | mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); |
556 | err = -ENOMEM; | 566 | err = -ENOMEM; |
@@ -588,7 +598,7 @@ err_unmap_aux: | |||
588 | mthca_UNMAP_ICM_AUX(mdev, &status); | 598 | mthca_UNMAP_ICM_AUX(mdev, &status); |
589 | 599 | ||
590 | err_free_aux: | 600 | err_free_aux: |
591 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | 601 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); |
592 | 602 | ||
593 | return err; | 603 | return err; |
594 | } | 604 | } |
@@ -609,7 +619,7 @@ static void mthca_free_icms(struct mthca_dev *mdev) | |||
609 | mthca_unmap_eq_icm(mdev); | 619 | mthca_unmap_eq_icm(mdev); |
610 | 620 | ||
611 | mthca_UNMAP_ICM_AUX(mdev, &status); | 621 | mthca_UNMAP_ICM_AUX(mdev, &status); |
612 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); | 622 | mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); |
613 | } | 623 | } |
614 | 624 | ||
615 | static int mthca_init_arbel(struct mthca_dev *mdev) | 625 | static int mthca_init_arbel(struct mthca_dev *mdev) |
@@ -693,7 +703,7 @@ err_free_icm: | |||
693 | 703 | ||
694 | err_stop_fw: | 704 | err_stop_fw: |
695 | mthca_UNMAP_FA(mdev, &status); | 705 | mthca_UNMAP_FA(mdev, &status); |
696 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); | 706 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); |
697 | 707 | ||
698 | err_disable: | 708 | err_disable: |
699 | if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) | 709 | if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) |
@@ -712,7 +722,7 @@ static void mthca_close_hca(struct mthca_dev *mdev) | |||
712 | mthca_free_icms(mdev); | 722 | mthca_free_icms(mdev); |
713 | 723 | ||
714 | mthca_UNMAP_FA(mdev, &status); | 724 | mthca_UNMAP_FA(mdev, &status); |
715 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); | 725 | mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); |
716 | 726 | ||
717 | if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) | 727 | if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) |
718 | mthca_DISABLE_LAM(mdev, &status); | 728 | mthca_DISABLE_LAM(mdev, &status); |