aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_main.c
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@mellanox.co.il>2007-02-10 16:15:08 -0500
committerRoland Dreier <rolandd@cisco.com>2007-02-12 19:16:29 -0500
commit391e4dea7189eef32b0c2d121e7e047110c1b83c (patch)
tree99cfb7f912837fb6f37ae290c9f1345d218eab06 /drivers/infiniband/hw/mthca/mthca_main.c
parent1d1f19cfce7687b557cebdc41bf8a5eeba8a9882 (diff)
IB/mthca: Fix access to MTT and MPT tables on non-cache-coherent CPUs
We allocate the MTT table with alloc_pages() and then do pci_map_sg(), so we must call pci_dma_sync_sg() after the CPU writes to the MTT table. This works since the device will never write MTTs on mem-free HCAs, once we get rid of the use of the WRITE_MTT firmware command. This change is needed to make that work, and is an improvement for now, since it gives FMRs a chance at working. For MPTs, both the device and CPU might write there, so we must allocate DMA coherent memory for these. Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_main.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 9a9dd32885a0..0d9b7d06bbc2 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -379,7 +379,7 @@ static int mthca_load_fw(struct mthca_dev *mdev)
379 379
380 mdev->fw.arbel.fw_icm = 380 mdev->fw.arbel.fw_icm =
381 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, 381 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
382 GFP_HIGHUSER | __GFP_NOWARN); 382 GFP_HIGHUSER | __GFP_NOWARN, 0);
383 if (!mdev->fw.arbel.fw_icm) { 383 if (!mdev->fw.arbel.fw_icm) {
384 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); 384 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
385 return -ENOMEM; 385 return -ENOMEM;
@@ -412,7 +412,7 @@ err_unmap_fa:
412 mthca_UNMAP_FA(mdev, &status); 412 mthca_UNMAP_FA(mdev, &status);
413 413
414err_free: 414err_free:
415 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 415 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
416 return err; 416 return err;
417} 417}
418 418
@@ -441,7 +441,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
441 (unsigned long long) aux_pages << 2); 441 (unsigned long long) aux_pages << 2);
442 442
443 mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, 443 mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
444 GFP_HIGHUSER | __GFP_NOWARN); 444 GFP_HIGHUSER | __GFP_NOWARN, 0);
445 if (!mdev->fw.arbel.aux_icm) { 445 if (!mdev->fw.arbel.aux_icm) {
446 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); 446 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
447 return -ENOMEM; 447 return -ENOMEM;
@@ -471,7 +471,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
471 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, 471 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
472 MTHCA_MTT_SEG_SIZE, 472 MTHCA_MTT_SEG_SIZE,
473 mdev->limits.num_mtt_segs, 473 mdev->limits.num_mtt_segs,
474 mdev->limits.reserved_mtts, 1); 474 mdev->limits.reserved_mtts,
475 1, 0);
475 if (!mdev->mr_table.mtt_table) { 476 if (!mdev->mr_table.mtt_table) {
476 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); 477 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
477 err = -ENOMEM; 478 err = -ENOMEM;
@@ -481,7 +482,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
481 mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, 482 mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
482 dev_lim->mpt_entry_sz, 483 dev_lim->mpt_entry_sz,
483 mdev->limits.num_mpts, 484 mdev->limits.num_mpts,
484 mdev->limits.reserved_mrws, 1); 485 mdev->limits.reserved_mrws,
486 1, 1);
485 if (!mdev->mr_table.mpt_table) { 487 if (!mdev->mr_table.mpt_table) {
486 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); 488 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
487 err = -ENOMEM; 489 err = -ENOMEM;
@@ -491,7 +493,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
491 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, 493 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
492 dev_lim->qpc_entry_sz, 494 dev_lim->qpc_entry_sz,
493 mdev->limits.num_qps, 495 mdev->limits.num_qps,
494 mdev->limits.reserved_qps, 0); 496 mdev->limits.reserved_qps,
497 0, 0);
495 if (!mdev->qp_table.qp_table) { 498 if (!mdev->qp_table.qp_table) {
496 mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); 499 mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
497 err = -ENOMEM; 500 err = -ENOMEM;
@@ -501,7 +504,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
501 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, 504 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
502 dev_lim->eqpc_entry_sz, 505 dev_lim->eqpc_entry_sz,
503 mdev->limits.num_qps, 506 mdev->limits.num_qps,
504 mdev->limits.reserved_qps, 0); 507 mdev->limits.reserved_qps,
508 0, 0);
505 if (!mdev->qp_table.eqp_table) { 509 if (!mdev->qp_table.eqp_table) {
506 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); 510 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
507 err = -ENOMEM; 511 err = -ENOMEM;
@@ -511,7 +515,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
511 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, 515 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
512 MTHCA_RDB_ENTRY_SIZE, 516 MTHCA_RDB_ENTRY_SIZE,
513 mdev->limits.num_qps << 517 mdev->limits.num_qps <<
514 mdev->qp_table.rdb_shift, 518 mdev->qp_table.rdb_shift, 0,
515 0, 0); 519 0, 0);
516 if (!mdev->qp_table.rdb_table) { 520 if (!mdev->qp_table.rdb_table) {
517 mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); 521 mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
@@ -522,7 +526,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
522 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 526 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
523 dev_lim->cqc_entry_sz, 527 dev_lim->cqc_entry_sz,
524 mdev->limits.num_cqs, 528 mdev->limits.num_cqs,
525 mdev->limits.reserved_cqs, 0); 529 mdev->limits.reserved_cqs,
530 0, 0);
526 if (!mdev->cq_table.table) { 531 if (!mdev->cq_table.table) {
527 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 532 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
528 err = -ENOMEM; 533 err = -ENOMEM;
@@ -534,7 +539,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
534 mthca_alloc_icm_table(mdev, init_hca->srqc_base, 539 mthca_alloc_icm_table(mdev, init_hca->srqc_base,
535 dev_lim->srq_entry_sz, 540 dev_lim->srq_entry_sz,
536 mdev->limits.num_srqs, 541 mdev->limits.num_srqs,
537 mdev->limits.reserved_srqs, 0); 542 mdev->limits.reserved_srqs,
543 0, 0);
538 if (!mdev->srq_table.table) { 544 if (!mdev->srq_table.table) {
539 mthca_err(mdev, "Failed to map SRQ context memory, " 545 mthca_err(mdev, "Failed to map SRQ context memory, "
540 "aborting.\n"); 546 "aborting.\n");
@@ -554,7 +560,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
554 mdev->limits.num_amgms, 560 mdev->limits.num_amgms,
555 mdev->limits.num_mgms + 561 mdev->limits.num_mgms +
556 mdev->limits.num_amgms, 562 mdev->limits.num_amgms,
557 0); 563 0, 0);
558 if (!mdev->mcg_table.table) { 564 if (!mdev->mcg_table.table) {
559 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 565 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
560 err = -ENOMEM; 566 err = -ENOMEM;
@@ -592,7 +598,7 @@ err_unmap_aux:
592 mthca_UNMAP_ICM_AUX(mdev, &status); 598 mthca_UNMAP_ICM_AUX(mdev, &status);
593 599
594err_free_aux: 600err_free_aux:
595 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); 601 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
596 602
597 return err; 603 return err;
598} 604}
@@ -613,7 +619,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
613 mthca_unmap_eq_icm(mdev); 619 mthca_unmap_eq_icm(mdev);
614 620
615 mthca_UNMAP_ICM_AUX(mdev, &status); 621 mthca_UNMAP_ICM_AUX(mdev, &status);
616 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); 622 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
617} 623}
618 624
619static int mthca_init_arbel(struct mthca_dev *mdev) 625static int mthca_init_arbel(struct mthca_dev *mdev)
@@ -697,7 +703,7 @@ err_free_icm:
697 703
698err_stop_fw: 704err_stop_fw:
699 mthca_UNMAP_FA(mdev, &status); 705 mthca_UNMAP_FA(mdev, &status);
700 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 706 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
701 707
702err_disable: 708err_disable:
703 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 709 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
@@ -716,7 +722,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
716 mthca_free_icms(mdev); 722 mthca_free_icms(mdev);
717 723
718 mthca_UNMAP_FA(mdev, &status); 724 mthca_UNMAP_FA(mdev, &status);
719 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 725 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
720 726
721 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 727 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
722 mthca_DISABLE_LAM(mdev, &status); 728 mthca_DISABLE_LAM(mdev, &status);