aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorRoland Dreier <roland@topspin.com>2005-06-27 17:36:45 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 18:11:46 -0400
commited878458eeff9754d66f1b0325df6ebbfcdce668 (patch)
treeeab302706f069a7922e1d953b5f33b61bdc868a4 /drivers/infiniband/hw
parent80fd8238734c852a8ed1ea39f8444a2df33bd161 (diff)
[PATCH] IB/mthca: Align FW command mailboxes to 4K
Future versions of Mellanox HCA firmware will require command mailboxes to be aligned to 4K. Support this by using a pci_pool to allocate all mailboxes. This has the added benefit of shrinking the source and text of mthca. Signed-off-by: Roland Dreier <roland@topspin.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c510
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h46
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c34
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c37
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c63
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c46
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c14
8 files changed, 329 insertions, 422 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 78d4891720e..1557a522d83 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -444,11 +444,20 @@ int mthca_cmd_init(struct mthca_dev *dev)
444 return -ENOMEM; 444 return -ENOMEM;
445 } 445 }
446 446
447 dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
448 MTHCA_MAILBOX_SIZE,
449 MTHCA_MAILBOX_SIZE, 0);
450 if (!dev->cmd.pool) {
451 iounmap(dev->hcr);
452 return -ENOMEM;
453 }
454
447 return 0; 455 return 0;
448} 456}
449 457
450void mthca_cmd_cleanup(struct mthca_dev *dev) 458void mthca_cmd_cleanup(struct mthca_dev *dev)
451{ 459{
460 pci_pool_destroy(dev->cmd.pool);
452 iounmap(dev->hcr); 461 iounmap(dev->hcr);
453} 462}
454 463
@@ -510,6 +519,33 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
510 up(&dev->cmd.poll_sem); 519 up(&dev->cmd.poll_sem);
511} 520}
512 521
522struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
523 unsigned int gfp_mask)
524{
525 struct mthca_mailbox *mailbox;
526
527 mailbox = kmalloc(sizeof *mailbox, gfp_mask);
528 if (!mailbox)
529 return ERR_PTR(-ENOMEM);
530
531 mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
532 if (!mailbox->buf) {
533 kfree(mailbox);
534 return ERR_PTR(-ENOMEM);
535 }
536
537 return mailbox;
538}
539
540void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
541{
542 if (!mailbox)
543 return;
544
545 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
546 kfree(mailbox);
547}
548
513int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) 549int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
514{ 550{
515 u64 out; 551 u64 out;
@@ -534,20 +570,20 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
534static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, 570static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
535 u64 virt, u8 *status) 571 u64 virt, u8 *status)
536{ 572{
537 u32 *inbox; 573 struct mthca_mailbox *mailbox;
538 dma_addr_t indma;
539 struct mthca_icm_iter iter; 574 struct mthca_icm_iter iter;
575 __be64 *pages;
540 int lg; 576 int lg;
541 int nent = 0; 577 int nent = 0;
542 int i; 578 int i;
543 int err = 0; 579 int err = 0;
544 int ts = 0, tc = 0; 580 int ts = 0, tc = 0;
545 581
546 inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma); 582 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
547 if (!inbox) 583 if (IS_ERR(mailbox))
548 return -ENOMEM; 584 return PTR_ERR(mailbox);
549 585 memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
550 memset(inbox, 0, PAGE_SIZE); 586 pages = mailbox->buf;
551 587
552 for (mthca_icm_first(icm, &iter); 588 for (mthca_icm_first(icm, &iter);
553 !mthca_icm_last(&iter); 589 !mthca_icm_last(&iter);
@@ -567,19 +603,17 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
567 } 603 }
568 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { 604 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) {
569 if (virt != -1) { 605 if (virt != -1) {
570 *((__be64 *) (inbox + nent * 4)) = 606 pages[nent * 2] = cpu_to_be64(virt);
571 cpu_to_be64(virt);
572 virt += 1 << lg; 607 virt += 1 << lg;
573 } 608 }
574 609
575 *((__be64 *) (inbox + nent * 4 + 2)) = 610 pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) +
576 cpu_to_be64((mthca_icm_addr(&iter) + 611 (i << lg)) | (lg - 12));
577 (i << lg)) | (lg - 12));
578 ts += 1 << (lg - 10); 612 ts += 1 << (lg - 10);
579 ++tc; 613 ++tc;
580 614
581 if (nent == PAGE_SIZE / 16) { 615 if (nent == MTHCA_MAILBOX_SIZE / 16) {
582 err = mthca_cmd(dev, indma, nent, 0, op, 616 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
583 CMD_TIME_CLASS_B, status); 617 CMD_TIME_CLASS_B, status);
584 if (err || *status) 618 if (err || *status)
585 goto out; 619 goto out;
@@ -589,7 +623,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
589 } 623 }
590 624
591 if (nent) 625 if (nent)
592 err = mthca_cmd(dev, indma, nent, 0, op, 626 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
593 CMD_TIME_CLASS_B, status); 627 CMD_TIME_CLASS_B, status);
594 628
595 switch (op) { 629 switch (op) {
@@ -606,7 +640,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
606 } 640 }
607 641
608out: 642out:
609 pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma); 643 mthca_free_mailbox(dev, mailbox);
610 return err; 644 return err;
611} 645}
612 646
@@ -627,8 +661,8 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
627 661
628int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) 662int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
629{ 663{
664 struct mthca_mailbox *mailbox;
630 u32 *outbox; 665 u32 *outbox;
631 dma_addr_t outdma;
632 int err = 0; 666 int err = 0;
633 u8 lg; 667 u8 lg;
634 668
@@ -646,12 +680,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
646#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 680#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40
647#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 681#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
648 682
649 outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma); 683 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
650 if (!outbox) { 684 if (IS_ERR(mailbox))
651 return -ENOMEM; 685 return PTR_ERR(mailbox);
652 } 686 outbox = mailbox->buf;
653 687
654 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW, 688 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
655 CMD_TIME_CLASS_A, status); 689 CMD_TIME_CLASS_A, status);
656 690
657 if (err) 691 if (err)
@@ -702,15 +736,15 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
702 } 736 }
703 737
704out: 738out:
705 pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma); 739 mthca_free_mailbox(dev, mailbox);
706 return err; 740 return err;
707} 741}
708 742
709int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) 743int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
710{ 744{
745 struct mthca_mailbox *mailbox;
711 u8 info; 746 u8 info;
712 u32 *outbox; 747 u32 *outbox;
713 dma_addr_t outdma;
714 int err = 0; 748 int err = 0;
715 749
716#define ENABLE_LAM_OUT_SIZE 0x100 750#define ENABLE_LAM_OUT_SIZE 0x100
@@ -721,11 +755,12 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
721#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) 755#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
722#define ENABLE_LAM_INFO_ECC_MASK 0x3 756#define ENABLE_LAM_INFO_ECC_MASK 0x3
723 757
724 outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma); 758 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
725 if (!outbox) 759 if (IS_ERR(mailbox))
726 return -ENOMEM; 760 return PTR_ERR(mailbox);
761 outbox = mailbox->buf;
727 762
728 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM, 763 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
729 CMD_TIME_CLASS_C, status); 764 CMD_TIME_CLASS_C, status);
730 765
731 if (err) 766 if (err)
@@ -754,7 +789,7 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
754 (unsigned long long) dev->ddr_end); 789 (unsigned long long) dev->ddr_end);
755 790
756out: 791out:
757 pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma); 792 mthca_free_mailbox(dev, mailbox);
758 return err; 793 return err;
759} 794}
760 795
@@ -765,9 +800,9 @@ int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
765 800
766int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) 801int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
767{ 802{
803 struct mthca_mailbox *mailbox;
768 u8 info; 804 u8 info;
769 u32 *outbox; 805 u32 *outbox;
770 dma_addr_t outdma;
771 int err = 0; 806 int err = 0;
772 807
773#define QUERY_DDR_OUT_SIZE 0x100 808#define QUERY_DDR_OUT_SIZE 0x100
@@ -778,11 +813,12 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
778#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) 813#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
779#define QUERY_DDR_INFO_ECC_MASK 0x3 814#define QUERY_DDR_INFO_ECC_MASK 0x3
780 815
781 outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma); 816 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
782 if (!outbox) 817 if (IS_ERR(mailbox))
783 return -ENOMEM; 818 return PTR_ERR(mailbox);
819 outbox = mailbox->buf;
784 820
785 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR, 821 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
786 CMD_TIME_CLASS_A, status); 822 CMD_TIME_CLASS_A, status);
787 823
788 if (err) 824 if (err)
@@ -808,15 +844,15 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
808 (unsigned long long) dev->ddr_end); 844 (unsigned long long) dev->ddr_end);
809 845
810out: 846out:
811 pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma); 847 mthca_free_mailbox(dev, mailbox);
812 return err; 848 return err;
813} 849}
814 850
815int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 851int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
816 struct mthca_dev_lim *dev_lim, u8 *status) 852 struct mthca_dev_lim *dev_lim, u8 *status)
817{ 853{
854 struct mthca_mailbox *mailbox;
818 u32 *outbox; 855 u32 *outbox;
819 dma_addr_t outdma;
820 u8 field; 856 u8 field;
821 u16 size; 857 u16 size;
822 int err; 858 int err;
@@ -881,11 +917,12 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
881#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f 917#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f
882#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 918#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0
883 919
884 outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma); 920 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
885 if (!outbox) 921 if (IS_ERR(mailbox))
886 return -ENOMEM; 922 return PTR_ERR(mailbox);
923 outbox = mailbox->buf;
887 924
888 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM, 925 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
889 CMD_TIME_CLASS_A, status); 926 CMD_TIME_CLASS_A, status);
890 927
891 if (err) 928 if (err)
@@ -1041,15 +1078,15 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1041 } 1078 }
1042 1079
1043out: 1080out:
1044 pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); 1081 mthca_free_mailbox(dev, mailbox);
1045 return err; 1082 return err;
1046} 1083}
1047 1084
1048int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1085int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1049 struct mthca_adapter *adapter, u8 *status) 1086 struct mthca_adapter *adapter, u8 *status)
1050{ 1087{
1088 struct mthca_mailbox *mailbox;
1051 u32 *outbox; 1089 u32 *outbox;
1052 dma_addr_t outdma;
1053 int err; 1090 int err;
1054 1091
1055#define QUERY_ADAPTER_OUT_SIZE 0x100 1092#define QUERY_ADAPTER_OUT_SIZE 0x100
@@ -1058,23 +1095,24 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1058#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 1095#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
1059#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1096#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1060 1097
1061 outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma); 1098 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1062 if (!outbox) 1099 if (IS_ERR(mailbox))
1063 return -ENOMEM; 1100 return PTR_ERR(mailbox);
1101 outbox = mailbox->buf;
1064 1102
1065 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER, 1103 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1066 CMD_TIME_CLASS_A, status); 1104 CMD_TIME_CLASS_A, status);
1067 1105
1068 if (err) 1106 if (err)
1069 goto out; 1107 goto out;
1070 1108
1071 MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); 1109 MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
1072 MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); 1110 MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
1073 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1111 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1074 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1112 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1075 1113
1076out: 1114out:
1077 pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); 1115 mthca_free_mailbox(dev, mailbox);
1078 return err; 1116 return err;
1079} 1117}
1080 1118
@@ -1082,8 +1120,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1082 struct mthca_init_hca_param *param, 1120 struct mthca_init_hca_param *param,
1083 u8 *status) 1121 u8 *status)
1084{ 1122{
1123 struct mthca_mailbox *mailbox;
1085 u32 *inbox; 1124 u32 *inbox;
1086 dma_addr_t indma;
1087 int err; 1125 int err;
1088 1126
1089#define INIT_HCA_IN_SIZE 0x200 1127#define INIT_HCA_IN_SIZE 0x200
@@ -1123,9 +1161,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1123#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) 1161#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1124#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) 1162#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18)
1125 1163
1126 inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma); 1164 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1127 if (!inbox) 1165 if (IS_ERR(mailbox))
1128 return -ENOMEM; 1166 return PTR_ERR(mailbox);
1167 inbox = mailbox->buf;
1129 1168
1130 memset(inbox, 0, INIT_HCA_IN_SIZE); 1169 memset(inbox, 0, INIT_HCA_IN_SIZE);
1131 1170
@@ -1188,10 +1227,9 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1188 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); 1227 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
1189 } 1228 }
1190 1229
1191 err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA, 1230 err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
1192 HZ, status);
1193 1231
1194 pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1232 mthca_free_mailbox(dev, mailbox);
1195 return err; 1233 return err;
1196} 1234}
1197 1235
@@ -1199,8 +1237,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1199 struct mthca_init_ib_param *param, 1237 struct mthca_init_ib_param *param,
1200 int port, u8 *status) 1238 int port, u8 *status)
1201{ 1239{
1240 struct mthca_mailbox *mailbox;
1202 u32 *inbox; 1241 u32 *inbox;
1203 dma_addr_t indma;
1204 int err; 1242 int err;
1205 u32 flags; 1243 u32 flags;
1206 1244
@@ -1220,9 +1258,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1220#define INIT_IB_NODE_GUID_OFFSET 0x18 1258#define INIT_IB_NODE_GUID_OFFSET 0x18
1221#define INIT_IB_SI_GUID_OFFSET 0x20 1259#define INIT_IB_SI_GUID_OFFSET 0x20
1222 1260
1223 inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma); 1261 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1224 if (!inbox) 1262 if (IS_ERR(mailbox))
1225 return -ENOMEM; 1263 return PTR_ERR(mailbox);
1264 inbox = mailbox->buf;
1226 1265
1227 memset(inbox, 0, INIT_IB_IN_SIZE); 1266 memset(inbox, 0, INIT_IB_IN_SIZE);
1228 1267
@@ -1242,10 +1281,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1242 MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); 1281 MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1243 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); 1282 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
1244 1283
1245 err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB, 1284 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1246 CMD_TIME_CLASS_A, status); 1285 CMD_TIME_CLASS_A, status);
1247 1286
1248 pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1287 mthca_free_mailbox(dev, mailbox);
1249 return err; 1288 return err;
1250} 1289}
1251 1290
@@ -1262,8 +1301,8 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
1262int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 1301int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1263 int port, u8 *status) 1302 int port, u8 *status)
1264{ 1303{
1304 struct mthca_mailbox *mailbox;
1265 u32 *inbox; 1305 u32 *inbox;
1266 dma_addr_t indma;
1267 int err; 1306 int err;
1268 u32 flags = 0; 1307 u32 flags = 0;
1269 1308
@@ -1274,9 +1313,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1274#define SET_IB_CAP_MASK_OFFSET 0x04 1313#define SET_IB_CAP_MASK_OFFSET 0x04
1275#define SET_IB_SI_GUID_OFFSET 0x08 1314#define SET_IB_SI_GUID_OFFSET 0x08
1276 1315
1277 inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma); 1316 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1278 if (!inbox) 1317 if (IS_ERR(mailbox))
1279 return -ENOMEM; 1318 return PTR_ERR(mailbox);
1319 inbox = mailbox->buf;
1280 1320
1281 memset(inbox, 0, SET_IB_IN_SIZE); 1321 memset(inbox, 0, SET_IB_IN_SIZE);
1282 1322
@@ -1287,10 +1327,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1287 MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); 1327 MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1288 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); 1328 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
1289 1329
1290 err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB, 1330 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1291 CMD_TIME_CLASS_B, status); 1331 CMD_TIME_CLASS_B, status);
1292 1332
1293 pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1333 mthca_free_mailbox(dev, mailbox);
1294 return err; 1334 return err;
1295} 1335}
1296 1336
@@ -1301,20 +1341,22 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
1301 1341
1302int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1342int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1303{ 1343{
1344 struct mthca_mailbox *mailbox;
1304 u64 *inbox; 1345 u64 *inbox;
1305 dma_addr_t indma;
1306 int err; 1346 int err;
1307 1347
1308 inbox = pci_alloc_consistent(dev->pdev, 16, &indma); 1348 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1309 if (!inbox) 1349 if (IS_ERR(mailbox))
1310 return -ENOMEM; 1350 return PTR_ERR(mailbox);
1351 inbox = mailbox->buf;
1311 1352
1312 inbox[0] = cpu_to_be64(virt); 1353 inbox[0] = cpu_to_be64(virt);
1313 inbox[1] = cpu_to_be64(dma_addr); 1354 inbox[1] = cpu_to_be64(dma_addr);
1314 1355
1315 err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status); 1356 err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1357 CMD_TIME_CLASS_B, status);
1316 1358
1317 pci_free_consistent(dev->pdev, 16, inbox, indma); 1359 mthca_free_mailbox(dev, mailbox);
1318 1360
1319 if (!err) 1361 if (!err)
1320 mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", 1362 mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
@@ -1359,69 +1401,26 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1359 return 0; 1401 return 0;
1360} 1402}
1361 1403
1362int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, 1404int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1363 int mpt_index, u8 *status) 1405 int mpt_index, u8 *status)
1364{ 1406{
1365 dma_addr_t indma; 1407 return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1366 int err; 1408 CMD_TIME_CLASS_B, status);
1367
1368 indma = pci_map_single(dev->pdev, mpt_entry,
1369 MTHCA_MPT_ENTRY_SIZE,
1370 PCI_DMA_TODEVICE);
1371 if (pci_dma_mapping_error(indma))
1372 return -ENOMEM;
1373
1374 err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT,
1375 CMD_TIME_CLASS_B, status);
1376
1377 pci_unmap_single(dev->pdev, indma,
1378 MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE);
1379 return err;
1380} 1409}
1381 1410
1382int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, 1411int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1383 int mpt_index, u8 *status) 1412 int mpt_index, u8 *status)
1384{ 1413{
1385 dma_addr_t outdma = 0; 1414 return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1386 int err; 1415 !mailbox, CMD_HW2SW_MPT,
1387 1416 CMD_TIME_CLASS_B, status);
1388 if (mpt_entry) {
1389 outdma = pci_map_single(dev->pdev, mpt_entry,
1390 MTHCA_MPT_ENTRY_SIZE,
1391 PCI_DMA_FROMDEVICE);
1392 if (pci_dma_mapping_error(outdma))
1393 return -ENOMEM;
1394 }
1395
1396 err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry,
1397 CMD_HW2SW_MPT,
1398 CMD_TIME_CLASS_B, status);
1399
1400 if (mpt_entry)
1401 pci_unmap_single(dev->pdev, outdma,
1402 MTHCA_MPT_ENTRY_SIZE,
1403 PCI_DMA_FROMDEVICE);
1404 return err;
1405} 1417}
1406 1418
1407int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, 1419int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1408 int num_mtt, u8 *status) 1420 int num_mtt, u8 *status)
1409{ 1421{
1410 dma_addr_t indma; 1422 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1411 int err; 1423 CMD_TIME_CLASS_B, status);
1412
1413 indma = pci_map_single(dev->pdev, mtt_entry,
1414 (num_mtt + 2) * 8,
1415 PCI_DMA_TODEVICE);
1416 if (pci_dma_mapping_error(indma))
1417 return -ENOMEM;
1418
1419 err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT,
1420 CMD_TIME_CLASS_B, status);
1421
1422 pci_unmap_single(dev->pdev, indma,
1423 (num_mtt + 2) * 8, PCI_DMA_TODEVICE);
1424 return err;
1425} 1424}
1426 1425
1427int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) 1426int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
@@ -1439,92 +1438,38 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1439 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); 1438 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
1440} 1439}
1441 1440
1442int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, 1441int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1443 int eq_num, u8 *status) 1442 int eq_num, u8 *status)
1444{ 1443{
1445 dma_addr_t indma; 1444 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1446 int err; 1445 CMD_TIME_CLASS_A, status);
1447
1448 indma = pci_map_single(dev->pdev, eq_context,
1449 MTHCA_EQ_CONTEXT_SIZE,
1450 PCI_DMA_TODEVICE);
1451 if (pci_dma_mapping_error(indma))
1452 return -ENOMEM;
1453
1454 err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ,
1455 CMD_TIME_CLASS_A, status);
1456
1457 pci_unmap_single(dev->pdev, indma,
1458 MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
1459 return err;
1460} 1446}
1461 1447
1462int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, 1448int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1463 int eq_num, u8 *status) 1449 int eq_num, u8 *status)
1464{ 1450{
1465 dma_addr_t outdma = 0; 1451 return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1466 int err; 1452 CMD_HW2SW_EQ,
1467 1453 CMD_TIME_CLASS_A, status);
1468 outdma = pci_map_single(dev->pdev, eq_context,
1469 MTHCA_EQ_CONTEXT_SIZE,
1470 PCI_DMA_FROMDEVICE);
1471 if (pci_dma_mapping_error(outdma))
1472 return -ENOMEM;
1473
1474 err = mthca_cmd_box(dev, 0, outdma, eq_num, 0,
1475 CMD_HW2SW_EQ,
1476 CMD_TIME_CLASS_A, status);
1477
1478 pci_unmap_single(dev->pdev, outdma,
1479 MTHCA_EQ_CONTEXT_SIZE,
1480 PCI_DMA_FROMDEVICE);
1481 return err;
1482} 1454}
1483 1455
1484int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, 1456int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1485 int cq_num, u8 *status) 1457 int cq_num, u8 *status)
1486{ 1458{
1487 dma_addr_t indma; 1459 return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1488 int err;
1489
1490 indma = pci_map_single(dev->pdev, cq_context,
1491 MTHCA_CQ_CONTEXT_SIZE,
1492 PCI_DMA_TODEVICE);
1493 if (pci_dma_mapping_error(indma))
1494 return -ENOMEM;
1495
1496 err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ,
1497 CMD_TIME_CLASS_A, status); 1460 CMD_TIME_CLASS_A, status);
1498
1499 pci_unmap_single(dev->pdev, indma,
1500 MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
1501 return err;
1502} 1461}
1503 1462
1504int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, 1463int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1505 int cq_num, u8 *status) 1464 int cq_num, u8 *status)
1506{ 1465{
1507 dma_addr_t outdma = 0; 1466 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1508 int err; 1467 CMD_HW2SW_CQ,
1509 1468 CMD_TIME_CLASS_A, status);
1510 outdma = pci_map_single(dev->pdev, cq_context,
1511 MTHCA_CQ_CONTEXT_SIZE,
1512 PCI_DMA_FROMDEVICE);
1513 if (pci_dma_mapping_error(outdma))
1514 return -ENOMEM;
1515
1516 err = mthca_cmd_box(dev, 0, outdma, cq_num, 0,
1517 CMD_HW2SW_CQ,
1518 CMD_TIME_CLASS_A, status);
1519
1520 pci_unmap_single(dev->pdev, outdma,
1521 MTHCA_CQ_CONTEXT_SIZE,
1522 PCI_DMA_FROMDEVICE);
1523 return err;
1524} 1469}
1525 1470
1526int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1471int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1527 int is_ee, void *qp_context, u32 optmask, 1472 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1528 u8 *status) 1473 u8 *status)
1529{ 1474{
1530 static const u16 op[] = { 1475 static const u16 op[] = {
@@ -1541,36 +1486,34 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1541 [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE 1486 [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE
1542 }; 1487 };
1543 u8 op_mod = 0; 1488 u8 op_mod = 0;
1544 1489 int my_mailbox = 0;
1545 dma_addr_t indma;
1546 int err; 1490 int err;
1547 1491
1548 if (trans < 0 || trans >= ARRAY_SIZE(op)) 1492 if (trans < 0 || trans >= ARRAY_SIZE(op))
1549 return -EINVAL; 1493 return -EINVAL;
1550 1494
1551 if (trans == MTHCA_TRANS_ANY2RST) { 1495 if (trans == MTHCA_TRANS_ANY2RST) {
1552 indma = 0;
1553 op_mod = 3; /* don't write outbox, any->reset */ 1496 op_mod = 3; /* don't write outbox, any->reset */
1554 1497
1555 /* For debugging */ 1498 /* For debugging */
1556 qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, 1499 if (!mailbox) {
1557 &indma); 1500 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1558 op_mod = 2; /* write outbox, any->reset */ 1501 if (!IS_ERR(mailbox)) {
1502 my_mailbox = 1;
1503 op_mod = 2; /* write outbox, any->reset */
1504 } else
1505 mailbox = NULL;
1506 }
1559 } else { 1507 } else {
1560 indma = pci_map_single(dev->pdev, qp_context,
1561 MTHCA_QP_CONTEXT_SIZE,
1562 PCI_DMA_TODEVICE);
1563 if (pci_dma_mapping_error(indma))
1564 return -ENOMEM;
1565
1566 if (0) { 1508 if (0) {
1567 int i; 1509 int i;
1568 mthca_dbg(dev, "Dumping QP context:\n"); 1510 mthca_dbg(dev, "Dumping QP context:\n");
1569 printk(" opt param mask: %08x\n", be32_to_cpup(qp_context)); 1511 printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
1570 for (i = 0; i < 0x100 / 4; ++i) { 1512 for (i = 0; i < 0x100 / 4; ++i) {
1571 if (i % 8 == 0) 1513 if (i % 8 == 0)
1572 printk(" [%02x] ", i * 4); 1514 printk(" [%02x] ", i * 4);
1573 printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); 1515 printk(" %08x",
1516 be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
1574 if ((i + 1) % 8 == 0) 1517 if ((i + 1) % 8 == 0)
1575 printk("\n"); 1518 printk("\n");
1576 } 1519 }
@@ -1578,55 +1521,39 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1578 } 1521 }
1579 1522
1580 if (trans == MTHCA_TRANS_ANY2RST) { 1523 if (trans == MTHCA_TRANS_ANY2RST) {
1581 err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num, 1524 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1582 op_mod, op[trans], CMD_TIME_CLASS_C, status); 1525 (!!is_ee << 24) | num, op_mod,
1526 op[trans], CMD_TIME_CLASS_C, status);
1583 1527
1584 if (0) { 1528 if (0 && mailbox) {
1585 int i; 1529 int i;
1586 mthca_dbg(dev, "Dumping QP context:\n"); 1530 mthca_dbg(dev, "Dumping QP context:\n");
1587 printk(" %08x\n", be32_to_cpup(qp_context)); 1531 printk(" %08x\n", be32_to_cpup(mailbox->buf));
1588 for (i = 0; i < 0x100 / 4; ++i) { 1532 for (i = 0; i < 0x100 / 4; ++i) {
1589 if (i % 8 == 0) 1533 if (i % 8 == 0)
1590 printk("[%02x] ", i * 4); 1534 printk("[%02x] ", i * 4);
1591 printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); 1535 printk(" %08x",
1536 be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
1592 if ((i + 1) % 8 == 0) 1537 if ((i + 1) % 8 == 0)
1593 printk("\n"); 1538 printk("\n");
1594 } 1539 }
1595 } 1540 }
1596 1541
1597 } else 1542 } else
1598 err = mthca_cmd(dev, indma, (!!is_ee << 24) | num, 1543 err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
1599 op_mod, op[trans], CMD_TIME_CLASS_C, status); 1544 op_mod, op[trans], CMD_TIME_CLASS_C, status);
1600 1545
1601 if (trans != MTHCA_TRANS_ANY2RST) 1546 if (my_mailbox)
1602 pci_unmap_single(dev->pdev, indma, 1547 mthca_free_mailbox(dev, mailbox);
1603 MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE); 1548
1604 else
1605 pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE,
1606 qp_context, indma);
1607 return err; 1549 return err;
1608} 1550}
1609 1551
1610int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 1552int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1611 void *qp_context, u8 *status) 1553 struct mthca_mailbox *mailbox, u8 *status)
1612{ 1554{
1613 dma_addr_t outdma = 0; 1555 return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1614 int err; 1556 CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
1615
1616 outdma = pci_map_single(dev->pdev, qp_context,
1617 MTHCA_QP_CONTEXT_SIZE,
1618 PCI_DMA_FROMDEVICE);
1619 if (pci_dma_mapping_error(outdma))
1620 return -ENOMEM;
1621
1622 err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0,
1623 CMD_QUERY_QPEE,
1624 CMD_TIME_CLASS_A, status);
1625
1626 pci_unmap_single(dev->pdev, outdma,
1627 MTHCA_QP_CONTEXT_SIZE,
1628 PCI_DMA_FROMDEVICE);
1629 return err;
1630} 1557}
1631 1558
1632int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 1559int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
@@ -1656,11 +1583,11 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1656} 1583}
1657 1584
1658int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 1585int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1659 int port, struct ib_wc* in_wc, struct ib_grh* in_grh, 1586 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
1660 void *in_mad, void *response_mad, u8 *status) 1587 void *in_mad, void *response_mad, u8 *status)
1661{ 1588{
1662 void *box; 1589 struct mthca_mailbox *inmailbox, *outmailbox;
1663 dma_addr_t dma; 1590 void *inbox;
1664 int err; 1591 int err;
1665 u32 in_modifier = port; 1592 u32 in_modifier = port;
1666 u8 op_modifier = 0; 1593 u8 op_modifier = 0;
@@ -1674,11 +1601,18 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1674#define MAD_IFC_PKEY_OFFSET 0x10e 1601#define MAD_IFC_PKEY_OFFSET 0x10e
1675#define MAD_IFC_GRH_OFFSET 0x140 1602#define MAD_IFC_GRH_OFFSET 0x140
1676 1603
1677 box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma); 1604 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1678 if (!box) 1605 if (IS_ERR(inmailbox))
1679 return -ENOMEM; 1606 return PTR_ERR(inmailbox);
1607 inbox = inmailbox->buf;
1608
1609 outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1610 if (IS_ERR(outmailbox)) {
1611 mthca_free_mailbox(dev, inmailbox);
1612 return PTR_ERR(outmailbox);
1613 }
1680 1614
1681 memcpy(box, in_mad, 256); 1615 memcpy(inbox, in_mad, 256);
1682 1616
1683 /* 1617 /*
1684 * Key check traps can't be generated unless we have in_wc to 1618 * Key check traps can't be generated unless we have in_wc to
@@ -1692,97 +1626,65 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1692 if (in_wc) { 1626 if (in_wc) {
1693 u8 val; 1627 u8 val;
1694 1628
1695 memset(box + 256, 0, 256); 1629 memset(inbox + 256, 0, 256);
1696 1630
1697 MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1631 MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
1698 MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1632 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1699 1633
1700 val = in_wc->sl << 4; 1634 val = in_wc->sl << 4;
1701 MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET); 1635 MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
1702 1636
1703 val = in_wc->dlid_path_bits | 1637 val = in_wc->dlid_path_bits |
1704 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); 1638 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
1705 MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET); 1639 MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET);
1706 1640
1707 MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET); 1641 MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
1708 MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); 1642 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
1709 1643
1710 if (in_grh) 1644 if (in_grh)
1711 memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40); 1645 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1712 1646
1713 op_modifier |= 0x10; 1647 op_modifier |= 0x10;
1714 1648
1715 in_modifier |= in_wc->slid << 16; 1649 in_modifier |= in_wc->slid << 16;
1716 } 1650 }
1717 1651
1718 err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier, 1652 err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1653 in_modifier, op_modifier,
1719 CMD_MAD_IFC, CMD_TIME_CLASS_C, status); 1654 CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
1720 1655
1721 if (!err && !*status) 1656 if (!err && !*status)
1722 memcpy(response_mad, box + 512, 256); 1657 memcpy(response_mad, outmailbox->buf, 256);
1723 1658
1724 pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma); 1659 mthca_free_mailbox(dev, inmailbox);
1660 mthca_free_mailbox(dev, outmailbox);
1725 return err; 1661 return err;
1726} 1662}
1727 1663
1728int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, 1664int mthca_READ_MGM(struct mthca_dev *dev, int index,
1729 u8 *status) 1665 struct mthca_mailbox *mailbox, u8 *status)
1730{ 1666{
1731 dma_addr_t outdma = 0; 1667 return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1732 int err; 1668 CMD_READ_MGM, CMD_TIME_CLASS_A, status);
1733
1734 outdma = pci_map_single(dev->pdev, mgm,
1735 MTHCA_MGM_ENTRY_SIZE,
1736 PCI_DMA_FROMDEVICE);
1737 if (pci_dma_mapping_error(outdma))
1738 return -ENOMEM;
1739
1740 err = mthca_cmd_box(dev, 0, outdma, index, 0,
1741 CMD_READ_MGM,
1742 CMD_TIME_CLASS_A, status);
1743
1744 pci_unmap_single(dev->pdev, outdma,
1745 MTHCA_MGM_ENTRY_SIZE,
1746 PCI_DMA_FROMDEVICE);
1747 return err;
1748} 1669}
1749 1670
1750int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, 1671int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1751 u8 *status) 1672 struct mthca_mailbox *mailbox, u8 *status)
1752{ 1673{
1753 dma_addr_t indma; 1674 return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1754 int err; 1675 CMD_TIME_CLASS_A, status);
1755
1756 indma = pci_map_single(dev->pdev, mgm,
1757 MTHCA_MGM_ENTRY_SIZE,
1758 PCI_DMA_TODEVICE);
1759 if (pci_dma_mapping_error(indma))
1760 return -ENOMEM;
1761
1762 err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM,
1763 CMD_TIME_CLASS_A, status);
1764
1765 pci_unmap_single(dev->pdev, indma,
1766 MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE);
1767 return err;
1768} 1676}
1769 1677
1770int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, 1678int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1771 u8 *status) 1679 u16 *hash, u8 *status)
1772{ 1680{
1773 dma_addr_t indma;
1774 u64 imm; 1681 u64 imm;
1775 int err; 1682 int err;
1776 1683
1777 indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE); 1684 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1778 if (pci_dma_mapping_error(indma))
1779 return -ENOMEM;
1780
1781 err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH,
1782 CMD_TIME_CLASS_A, status); 1685 CMD_TIME_CLASS_A, status);
1783 *hash = imm;
1784 1686
1785 pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE); 1687 *hash = imm;
1786 return err; 1688 return err;
1787} 1689}
1788 1690
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index a8d4231e886..ed517f175dd 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -37,8 +37,7 @@
37 37
38#include <ib_verbs.h> 38#include <ib_verbs.h>
39 39
40#define MTHCA_CMD_MAILBOX_ALIGN 16UL 40#define MTHCA_MAILBOX_SIZE 4096
41#define MTHCA_CMD_MAILBOX_EXTRA (MTHCA_CMD_MAILBOX_ALIGN - 1)
42 41
43enum { 42enum {
44 /* command completed successfully: */ 43 /* command completed successfully: */
@@ -112,6 +111,11 @@ enum {
112 DEV_LIM_FLAG_UD_MULTI = 1 << 21, 111 DEV_LIM_FLAG_UD_MULTI = 1 << 21,
113}; 112};
114 113
114struct mthca_mailbox {
115 dma_addr_t dma;
116 void *buf;
117};
118
115struct mthca_dev_lim { 119struct mthca_dev_lim {
116 int max_srq_sz; 120 int max_srq_sz;
117 int max_qp_sz; 121 int max_qp_sz;
@@ -242,6 +246,10 @@ void mthca_cmd_use_polling(struct mthca_dev *dev);
242void mthca_cmd_event(struct mthca_dev *dev, u16 token, 246void mthca_cmd_event(struct mthca_dev *dev, u16 token,
243 u8 status, u64 out_param); 247 u8 status, u64 out_param);
244 248
249struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
250 unsigned int gfp_mask);
251void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
252
245int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); 253int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
246int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); 254int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
247int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); 255int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
@@ -272,41 +280,39 @@ int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
272int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); 280int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
273int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, 281int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
274 u8 *status); 282 u8 *status);
275int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, 283int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
276 int mpt_index, u8 *status); 284 int mpt_index, u8 *status);
277int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, 285int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
278 int mpt_index, u8 *status); 286 int mpt_index, u8 *status);
279int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, 287int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
280 int num_mtt, u8 *status); 288 int num_mtt, u8 *status);
281int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); 289int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status);
282int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 290int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
283 int eq_num, u8 *status); 291 int eq_num, u8 *status);
284int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, 292int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
285 int eq_num, u8 *status); 293 int eq_num, u8 *status);
286int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, 294int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
287 int eq_num, u8 *status); 295 int eq_num, u8 *status);
288int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, 296int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
289 int cq_num, u8 *status); 297 int cq_num, u8 *status);
290int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, 298int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
291 int cq_num, u8 *status); 299 int cq_num, u8 *status);
292int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 300int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
293 int is_ee, void *qp_context, u32 optmask, 301 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
294 u8 *status); 302 u8 *status);
295int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 303int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
296 void *qp_context, u8 *status); 304 struct mthca_mailbox *mailbox, u8 *status);
297int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 305int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
298 u8 *status); 306 u8 *status);
299int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 307int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
300 int port, struct ib_wc* in_wc, struct ib_grh* in_grh, 308 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
301 void *in_mad, void *response_mad, u8 *status); 309 void *in_mad, void *response_mad, u8 *status);
302int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, 310int mthca_READ_MGM(struct mthca_dev *dev, int index,
303 u8 *status); 311 struct mthca_mailbox *mailbox, u8 *status);
304int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, 312int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
305 u8 *status); 313 struct mthca_mailbox *mailbox, u8 *status);
306int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, 314int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
307 u8 *status); 315 u16 *hash, u8 *status);
308int mthca_NOP(struct mthca_dev *dev, u8 *status); 316int mthca_NOP(struct mthca_dev *dev, u8 *status);
309 317
310#define MAILBOX_ALIGN(x) ((void *) ALIGN((unsigned long) (x), MTHCA_CMD_MAILBOX_ALIGN))
311
312#endif /* MTHCA_CMD_H */ 318#endif /* MTHCA_CMD_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 505d059216e..766e9031ec4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -745,7 +745,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
745 struct mthca_cq *cq) 745 struct mthca_cq *cq)
746{ 746{
747 int size = nent * MTHCA_CQ_ENTRY_SIZE; 747 int size = nent * MTHCA_CQ_ENTRY_SIZE;
748 void *mailbox = NULL; 748 struct mthca_mailbox *mailbox;
749 struct mthca_cq_context *cq_context; 749 struct mthca_cq_context *cq_context;
750 int err = -ENOMEM; 750 int err = -ENOMEM;
751 u8 status; 751 u8 status;
@@ -779,12 +779,11 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
779 goto err_out_ci; 779 goto err_out_ci;
780 } 780 }
781 781
782 mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, 782 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
783 GFP_KERNEL); 783 if (IS_ERR(mailbox))
784 if (!mailbox) 784 goto err_out_arm;
785 goto err_out_mailbox;
786 785
787 cq_context = MAILBOX_ALIGN(mailbox); 786 cq_context = mailbox->buf;
788 787
789 err = mthca_alloc_cq_buf(dev, size, cq); 788 err = mthca_alloc_cq_buf(dev, size, cq);
790 if (err) 789 if (err)
@@ -815,7 +814,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
815 cq_context->state_db = cpu_to_be32(cq->arm_db_index); 814 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
816 } 815 }
817 816
818 err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status); 817 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
819 if (err) { 818 if (err) {
820 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); 819 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
821 goto err_out_free_mr; 820 goto err_out_free_mr;
@@ -839,7 +838,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
839 838
840 cq->cons_index = 0; 839 cq->cons_index = 0;
841 840
842 kfree(mailbox); 841 mthca_free_mailbox(dev, mailbox);
843 842
844 return 0; 843 return 0;
845 844
@@ -848,8 +847,9 @@ err_out_free_mr:
848 mthca_free_cq_buf(dev, cq); 847 mthca_free_cq_buf(dev, cq);
849 848
850err_out_mailbox: 849err_out_mailbox:
851 kfree(mailbox); 850 mthca_free_mailbox(dev, mailbox);
852 851
852err_out_arm:
853 if (mthca_is_memfree(dev)) 853 if (mthca_is_memfree(dev))
854 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 854 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
855 855
@@ -869,28 +869,26 @@ err_out:
869void mthca_free_cq(struct mthca_dev *dev, 869void mthca_free_cq(struct mthca_dev *dev,
870 struct mthca_cq *cq) 870 struct mthca_cq *cq)
871{ 871{
872 void *mailbox; 872 struct mthca_mailbox *mailbox;
873 int err; 873 int err;
874 u8 status; 874 u8 status;
875 875
876 might_sleep(); 876 might_sleep();
877 877
878 mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, 878 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
879 GFP_KERNEL); 879 if (IS_ERR(mailbox)) {
880 if (!mailbox) {
881 mthca_warn(dev, "No memory for mailbox to free CQ.\n"); 880 mthca_warn(dev, "No memory for mailbox to free CQ.\n");
882 return; 881 return;
883 } 882 }
884 883
885 err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status); 884 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
886 if (err) 885 if (err)
887 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); 886 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
888 else if (status) 887 else if (status)
889 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", 888 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
890 status);
891 889
892 if (0) { 890 if (0) {
893 u32 *ctx = MAILBOX_ALIGN(mailbox); 891 u32 *ctx = mailbox->buf;
894 int j; 892 int j;
895 893
896 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", 894 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
@@ -922,7 +920,7 @@ void mthca_free_cq(struct mthca_dev *dev,
922 920
923 mthca_table_put(dev, dev->cq_table.table, cq->cqn); 921 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
924 mthca_free(&dev->cq_table.alloc, cq->cqn); 922 mthca_free(&dev->cq_table.alloc, cq->cqn);
925 kfree(mailbox); 923 mthca_free_mailbox(dev, mailbox);
926} 924}
927 925
928int __devinit mthca_init_cq_table(struct mthca_dev *dev) 926int __devinit mthca_init_cq_table(struct mthca_dev *dev)
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index e8cf4d68d11..50b2aab114a 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -99,6 +99,7 @@ enum {
99}; 99};
100 100
101struct mthca_cmd { 101struct mthca_cmd {
102 struct pci_pool *pool;
102 int use_events; 103 int use_events;
103 struct semaphore hcr_sem; 104 struct semaphore hcr_sem;
104 struct semaphore poll_sem; 105 struct semaphore poll_sem;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 970cba24e79..cbcf2b4722e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -469,7 +469,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
469 PAGE_SIZE; 469 PAGE_SIZE;
470 u64 *dma_list = NULL; 470 u64 *dma_list = NULL;
471 dma_addr_t t; 471 dma_addr_t t;
472 void *mailbox = NULL; 472 struct mthca_mailbox *mailbox;
473 struct mthca_eq_context *eq_context; 473 struct mthca_eq_context *eq_context;
474 int err = -ENOMEM; 474 int err = -ENOMEM;
475 int i; 475 int i;
@@ -494,17 +494,16 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
494 if (!dma_list) 494 if (!dma_list)
495 goto err_out_free; 495 goto err_out_free;
496 496
497 mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA, 497 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
498 GFP_KERNEL); 498 if (IS_ERR(mailbox))
499 if (!mailbox)
500 goto err_out_free; 499 goto err_out_free;
501 eq_context = MAILBOX_ALIGN(mailbox); 500 eq_context = mailbox->buf;
502 501
503 for (i = 0; i < npages; ++i) { 502 for (i = 0; i < npages; ++i) {
504 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 503 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
505 PAGE_SIZE, &t, GFP_KERNEL); 504 PAGE_SIZE, &t, GFP_KERNEL);
506 if (!eq->page_list[i].buf) 505 if (!eq->page_list[i].buf)
507 goto err_out_free; 506 goto err_out_free_pages;
508 507
509 dma_list[i] = t; 508 dma_list[i] = t;
510 pci_unmap_addr_set(&eq->page_list[i], mapping, t); 509 pci_unmap_addr_set(&eq->page_list[i], mapping, t);
@@ -517,7 +516,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
517 516
518 eq->eqn = mthca_alloc(&dev->eq_table.alloc); 517 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
519 if (eq->eqn == -1) 518 if (eq->eqn == -1)
520 goto err_out_free; 519 goto err_out_free_pages;
521 520
522 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, 521 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
523 dma_list, PAGE_SHIFT, npages, 522 dma_list, PAGE_SHIFT, npages,
@@ -548,7 +547,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
548 eq_context->intr = intr; 547 eq_context->intr = intr;
549 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); 548 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
550 549
551 err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status); 550 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
552 if (err) { 551 if (err) {
553 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); 552 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
554 goto err_out_free_mr; 553 goto err_out_free_mr;
@@ -561,7 +560,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
561 } 560 }
562 561
563 kfree(dma_list); 562 kfree(dma_list);
564 kfree(mailbox); 563 mthca_free_mailbox(dev, mailbox);
565 564
566 eq->eqn_mask = swab32(1 << eq->eqn); 565 eq->eqn_mask = swab32(1 << eq->eqn);
567 eq->cons_index = 0; 566 eq->cons_index = 0;
@@ -579,7 +578,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
579 err_out_free_eq: 578 err_out_free_eq:
580 mthca_free(&dev->eq_table.alloc, eq->eqn); 579 mthca_free(&dev->eq_table.alloc, eq->eqn);
581 580
582 err_out_free: 581 err_out_free_pages:
583 for (i = 0; i < npages; ++i) 582 for (i = 0; i < npages; ++i)
584 if (eq->page_list[i].buf) 583 if (eq->page_list[i].buf)
585 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 584 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
@@ -587,9 +586,11 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
587 pci_unmap_addr(&eq->page_list[i], 586 pci_unmap_addr(&eq->page_list[i],
588 mapping)); 587 mapping));
589 588
589 mthca_free_mailbox(dev, mailbox);
590
591 err_out_free:
590 kfree(eq->page_list); 592 kfree(eq->page_list);
591 kfree(dma_list); 593 kfree(dma_list);
592 kfree(mailbox);
593 594
594 err_out: 595 err_out:
595 return err; 596 return err;
@@ -598,20 +599,18 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
598static void mthca_free_eq(struct mthca_dev *dev, 599static void mthca_free_eq(struct mthca_dev *dev,
599 struct mthca_eq *eq) 600 struct mthca_eq *eq)
600{ 601{
601 void *mailbox = NULL; 602 struct mthca_mailbox *mailbox;
602 int err; 603 int err;
603 u8 status; 604 u8 status;
604 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / 605 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
605 PAGE_SIZE; 606 PAGE_SIZE;
606 int i; 607 int i;
607 608
608 mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA, 609 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
609 GFP_KERNEL); 610 if (IS_ERR(mailbox))
610 if (!mailbox)
611 return; 611 return;
612 612
613 err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox), 613 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
614 eq->eqn, &status);
615 if (err) 614 if (err)
616 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); 615 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
617 if (status) 616 if (status)
@@ -624,7 +623,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
624 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { 623 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
625 if (i % 4 == 0) 624 if (i % 4 == 0)
626 printk("[%02x] ", i * 4); 625 printk("[%02x] ", i * 4);
627 printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4)); 626 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
628 if ((i + 1) % 4 == 0) 627 if ((i + 1) % 4 == 0)
629 printk("\n"); 628 printk("\n");
630 } 629 }
@@ -637,7 +636,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
637 pci_unmap_addr(&eq->page_list[i], mapping)); 636 pci_unmap_addr(&eq->page_list[i], mapping));
638 637
639 kfree(eq->page_list); 638 kfree(eq->page_list);
640 kfree(mailbox); 639 mthca_free_mailbox(dev, mailbox);
641} 640}
642 641
643static void mthca_free_irqs(struct mthca_dev *dev) 642static void mthca_free_irqs(struct mthca_dev *dev)
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 70a6553a588..5be7d949dbf 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -66,22 +66,23 @@ static const u8 zero_gid[16]; /* automatically initialized to 0 */
66 * entry in hash chain and *mgm holds end of hash chain. 66 * entry in hash chain and *mgm holds end of hash chain.
67 */ 67 */
68static int find_mgm(struct mthca_dev *dev, 68static int find_mgm(struct mthca_dev *dev,
69 u8 *gid, struct mthca_mgm *mgm, 69 u8 *gid, struct mthca_mailbox *mgm_mailbox,
70 u16 *hash, int *prev, int *index) 70 u16 *hash, int *prev, int *index)
71{ 71{
72 void *mailbox; 72 struct mthca_mailbox *mailbox;
73 struct mthca_mgm *mgm = mgm_mailbox->buf;
73 u8 *mgid; 74 u8 *mgid;
74 int err; 75 int err;
75 u8 status; 76 u8 status;
76 77
77 mailbox = kmalloc(16 + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 78 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
78 if (!mailbox) 79 if (IS_ERR(mailbox))
79 return -ENOMEM; 80 return -ENOMEM;
80 mgid = MAILBOX_ALIGN(mailbox); 81 mgid = mailbox->buf;
81 82
82 memcpy(mgid, gid, 16); 83 memcpy(mgid, gid, 16);
83 84
84 err = mthca_MGID_HASH(dev, mgid, hash, &status); 85 err = mthca_MGID_HASH(dev, mailbox, hash, &status);
85 if (err) 86 if (err)
86 goto out; 87 goto out;
87 if (status) { 88 if (status) {
@@ -103,7 +104,7 @@ static int find_mgm(struct mthca_dev *dev,
103 *prev = -1; 104 *prev = -1;
104 105
105 do { 106 do {
106 err = mthca_READ_MGM(dev, *index, mgm, &status); 107 err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
107 if (err) 108 if (err)
108 goto out; 109 goto out;
109 if (status) { 110 if (status) {
@@ -129,14 +130,14 @@ static int find_mgm(struct mthca_dev *dev,
129 *index = -1; 130 *index = -1;
130 131
131 out: 132 out:
132 kfree(mailbox); 133 mthca_free_mailbox(dev, mailbox);
133 return err; 134 return err;
134} 135}
135 136
136int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 137int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
137{ 138{
138 struct mthca_dev *dev = to_mdev(ibqp->device); 139 struct mthca_dev *dev = to_mdev(ibqp->device);
139 void *mailbox; 140 struct mthca_mailbox *mailbox;
140 struct mthca_mgm *mgm; 141 struct mthca_mgm *mgm;
141 u16 hash; 142 u16 hash;
142 int index, prev; 143 int index, prev;
@@ -145,15 +146,15 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
145 int err; 146 int err;
146 u8 status; 147 u8 status;
147 148
148 mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 149 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
149 if (!mailbox) 150 if (IS_ERR(mailbox))
150 return -ENOMEM; 151 return PTR_ERR(mailbox);
151 mgm = MAILBOX_ALIGN(mailbox); 152 mgm = mailbox->buf;
152 153
153 if (down_interruptible(&dev->mcg_table.sem)) 154 if (down_interruptible(&dev->mcg_table.sem))
154 return -EINTR; 155 return -EINTR;
155 156
156 err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index); 157 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
157 if (err) 158 if (err)
158 goto out; 159 goto out;
159 160
@@ -170,7 +171,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
170 goto out; 171 goto out;
171 } 172 }
172 173
173 err = mthca_READ_MGM(dev, index, mgm, &status); 174 err = mthca_READ_MGM(dev, index, mailbox, &status);
174 if (err) 175 if (err)
175 goto out; 176 goto out;
176 if (status) { 177 if (status) {
@@ -195,7 +196,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
195 goto out; 196 goto out;
196 } 197 }
197 198
198 err = mthca_WRITE_MGM(dev, index, mgm, &status); 199 err = mthca_WRITE_MGM(dev, index, mailbox, &status);
199 if (err) 200 if (err)
200 goto out; 201 goto out;
201 if (status) { 202 if (status) {
@@ -206,7 +207,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
206 if (!link) 207 if (!link)
207 goto out; 208 goto out;
208 209
209 err = mthca_READ_MGM(dev, prev, mgm, &status); 210 err = mthca_READ_MGM(dev, prev, mailbox, &status);
210 if (err) 211 if (err)
211 goto out; 212 goto out;
212 if (status) { 213 if (status) {
@@ -217,7 +218,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
217 218
218 mgm->next_gid_index = cpu_to_be32(index << 5); 219 mgm->next_gid_index = cpu_to_be32(index << 5);
219 220
220 err = mthca_WRITE_MGM(dev, prev, mgm, &status); 221 err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
221 if (err) 222 if (err)
222 goto out; 223 goto out;
223 if (status) { 224 if (status) {
@@ -227,14 +228,14 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
227 228
228 out: 229 out:
229 up(&dev->mcg_table.sem); 230 up(&dev->mcg_table.sem);
230 kfree(mailbox); 231 mthca_free_mailbox(dev, mailbox);
231 return err; 232 return err;
232} 233}
233 234
234int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 235int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
235{ 236{
236 struct mthca_dev *dev = to_mdev(ibqp->device); 237 struct mthca_dev *dev = to_mdev(ibqp->device);
237 void *mailbox; 238 struct mthca_mailbox *mailbox;
238 struct mthca_mgm *mgm; 239 struct mthca_mgm *mgm;
239 u16 hash; 240 u16 hash;
240 int prev, index; 241 int prev, index;
@@ -242,15 +243,15 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
242 int err; 243 int err;
243 u8 status; 244 u8 status;
244 245
245 mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 246 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
246 if (!mailbox) 247 if (IS_ERR(mailbox))
247 return -ENOMEM; 248 return PTR_ERR(mailbox);
248 mgm = MAILBOX_ALIGN(mailbox); 249 mgm = mailbox->buf;
249 250
250 if (down_interruptible(&dev->mcg_table.sem)) 251 if (down_interruptible(&dev->mcg_table.sem))
251 return -EINTR; 252 return -EINTR;
252 253
253 err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index); 254 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
254 if (err) 255 if (err)
255 goto out; 256 goto out;
256 257
@@ -285,7 +286,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
285 mgm->qp[loc] = mgm->qp[i - 1]; 286 mgm->qp[loc] = mgm->qp[i - 1];
286 mgm->qp[i - 1] = 0; 287 mgm->qp[i - 1] = 0;
287 288
288 err = mthca_WRITE_MGM(dev, index, mgm, &status); 289 err = mthca_WRITE_MGM(dev, index, mailbox, &status);
289 if (err) 290 if (err)
290 goto out; 291 goto out;
291 if (status) { 292 if (status) {
@@ -304,7 +305,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
304 if (be32_to_cpu(mgm->next_gid_index) >> 5) { 305 if (be32_to_cpu(mgm->next_gid_index) >> 5) {
305 err = mthca_READ_MGM(dev, 306 err = mthca_READ_MGM(dev,
306 be32_to_cpu(mgm->next_gid_index) >> 5, 307 be32_to_cpu(mgm->next_gid_index) >> 5,
307 mgm, &status); 308 mailbox, &status);
308 if (err) 309 if (err)
309 goto out; 310 goto out;
310 if (status) { 311 if (status) {
@@ -316,7 +317,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
316 } else 317 } else
317 memset(mgm->gid, 0, 16); 318 memset(mgm->gid, 0, 16);
318 319
319 err = mthca_WRITE_MGM(dev, index, mgm, &status); 320 err = mthca_WRITE_MGM(dev, index, mailbox, &status);
320 if (err) 321 if (err)
321 goto out; 322 goto out;
322 if (status) { 323 if (status) {
@@ -327,7 +328,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
327 } else { 328 } else {
328 /* Remove entry from AMGM */ 329 /* Remove entry from AMGM */
329 index = be32_to_cpu(mgm->next_gid_index) >> 5; 330 index = be32_to_cpu(mgm->next_gid_index) >> 5;
330 err = mthca_READ_MGM(dev, prev, mgm, &status); 331 err = mthca_READ_MGM(dev, prev, mailbox, &status);
331 if (err) 332 if (err)
332 goto out; 333 goto out;
333 if (status) { 334 if (status) {
@@ -338,7 +339,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
338 339
339 mgm->next_gid_index = cpu_to_be32(index << 5); 340 mgm->next_gid_index = cpu_to_be32(index << 5);
340 341
341 err = mthca_WRITE_MGM(dev, prev, mgm, &status); 342 err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
342 if (err) 343 if (err)
343 goto out; 344 goto out;
344 if (status) { 345 if (status) {
@@ -350,7 +351,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
350 351
351 out: 352 out:
352 up(&dev->mcg_table.sem); 353 up(&dev->mcg_table.sem);
353 kfree(mailbox); 354 mthca_free_mailbox(dev, mailbox);
354 return err; 355 return err;
355} 356}
356 357
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 877654ae42d..cbe50feaf68 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -246,21 +246,23 @@ void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
246int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, 246int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
247 int start_index, u64 *buffer_list, int list_len) 247 int start_index, u64 *buffer_list, int list_len)
248{ 248{
249 struct mthca_mailbox *mailbox;
249 u64 *mtt_entry; 250 u64 *mtt_entry;
250 int err = 0; 251 int err = 0;
251 u8 status; 252 u8 status;
252 int i; 253 int i;
253 254
254 mtt_entry = (u64 *) __get_free_page(GFP_KERNEL); 255 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
255 if (!mtt_entry) 256 if (IS_ERR(mailbox))
256 return -ENOMEM; 257 return PTR_ERR(mailbox);
258 mtt_entry = mailbox->buf;
257 259
258 while (list_len > 0) { 260 while (list_len > 0) {
259 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + 261 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
260 mtt->first_seg * MTHCA_MTT_SEG_SIZE + 262 mtt->first_seg * MTHCA_MTT_SEG_SIZE +
261 start_index * 8); 263 start_index * 8);
262 mtt_entry[1] = 0; 264 mtt_entry[1] = 0;
263 for (i = 0; i < list_len && i < PAGE_SIZE / 8 - 2; ++i) 265 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
264 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | 266 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
265 MTHCA_MTT_FLAG_PRESENT); 267 MTHCA_MTT_FLAG_PRESENT);
266 268
@@ -271,7 +273,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
271 if (i & 1) 273 if (i & 1)
272 mtt_entry[i + 2] = 0; 274 mtt_entry[i + 2] = 0;
273 275
274 err = mthca_WRITE_MTT(dev, mtt_entry, (i + 1) & ~1, &status); 276 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
275 if (err) { 277 if (err) {
276 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); 278 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
277 goto out; 279 goto out;
@@ -289,7 +291,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
289 } 291 }
290 292
291out: 293out:
292 free_page((unsigned long) mtt_entry); 294 mthca_free_mailbox(dev, mailbox);
293 return err; 295 return err;
294} 296}
295 297
@@ -332,7 +334,7 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
332int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, 334int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
333 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) 335 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
334{ 336{
335 void *mailbox; 337 struct mthca_mailbox *mailbox;
336 struct mthca_mpt_entry *mpt_entry; 338 struct mthca_mpt_entry *mpt_entry;
337 u32 key; 339 u32 key;
338 int i; 340 int i;
@@ -354,13 +356,12 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
354 goto err_out_mpt_free; 356 goto err_out_mpt_free;
355 } 357 }
356 358
357 mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, 359 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
358 GFP_KERNEL); 360 if (IS_ERR(mailbox)) {
359 if (!mailbox) { 361 err = PTR_ERR(mailbox);
360 err = -ENOMEM;
361 goto err_out_table; 362 goto err_out_table;
362 } 363 }
363 mpt_entry = MAILBOX_ALIGN(mailbox); 364 mpt_entry = mailbox->buf;
364 365
365 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 366 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
366 MTHCA_MPT_FLAG_MIO | 367 MTHCA_MPT_FLAG_MIO |
@@ -394,7 +395,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
394 } 395 }
395 } 396 }
396 397
397 err = mthca_SW2HW_MPT(dev, mpt_entry, 398 err = mthca_SW2HW_MPT(dev, mailbox,
398 key & (dev->limits.num_mpts - 1), 399 key & (dev->limits.num_mpts - 1),
399 &status); 400 &status);
400 if (err) { 401 if (err) {
@@ -407,11 +408,11 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
407 goto err_out_mailbox; 408 goto err_out_mailbox;
408 } 409 }
409 410
410 kfree(mailbox); 411 mthca_free_mailbox(dev, mailbox);
411 return err; 412 return err;
412 413
413err_out_mailbox: 414err_out_mailbox:
414 kfree(mailbox); 415 mthca_free_mailbox(dev, mailbox);
415 416
416err_out_table: 417err_out_table:
417 mthca_table_put(dev, dev->mr_table.mpt_table, key); 418 mthca_table_put(dev, dev->mr_table.mpt_table, key);
@@ -487,7 +488,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
487 u32 access, struct mthca_fmr *mr) 488 u32 access, struct mthca_fmr *mr)
488{ 489{
489 struct mthca_mpt_entry *mpt_entry; 490 struct mthca_mpt_entry *mpt_entry;
490 void *mailbox; 491 struct mthca_mailbox *mailbox;
491 u64 mtt_seg; 492 u64 mtt_seg;
492 u32 key, idx; 493 u32 key, idx;
493 u8 status; 494 u8 status;
@@ -538,12 +539,11 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
538 } else 539 } else
539 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; 540 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
540 541
541 mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, 542 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
542 GFP_KERNEL); 543 if (IS_ERR(mailbox))
543 if (!mailbox)
544 goto err_out_free_mtt; 544 goto err_out_free_mtt;
545 545
546 mpt_entry = MAILBOX_ALIGN(mailbox); 546 mpt_entry = mailbox->buf;
547 547
548 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 548 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
549 MTHCA_MPT_FLAG_MIO | 549 MTHCA_MPT_FLAG_MIO |
@@ -568,7 +568,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
568 } 568 }
569 } 569 }
570 570
571 err = mthca_SW2HW_MPT(dev, mpt_entry, 571 err = mthca_SW2HW_MPT(dev, mailbox,
572 key & (dev->limits.num_mpts - 1), 572 key & (dev->limits.num_mpts - 1),
573 &status); 573 &status);
574 if (err) { 574 if (err) {
@@ -582,11 +582,11 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
582 goto err_out_mailbox_free; 582 goto err_out_mailbox_free;
583 } 583 }
584 584
585 kfree(mailbox); 585 mthca_free_mailbox(dev, mailbox);
586 return 0; 586 return 0;
587 587
588err_out_mailbox_free: 588err_out_mailbox_free:
589 kfree(mailbox); 589 mthca_free_mailbox(dev, mailbox);
590 590
591err_out_free_mtt: 591err_out_free_mtt:
592 mthca_free_mtt(dev, mr->mtt); 592 mthca_free_mtt(dev, mr->mtt);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index a92e870dfb9..163a8ef4186 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -589,7 +589,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
589 struct mthca_dev *dev = to_mdev(ibqp->device); 589 struct mthca_dev *dev = to_mdev(ibqp->device);
590 struct mthca_qp *qp = to_mqp(ibqp); 590 struct mthca_qp *qp = to_mqp(ibqp);
591 enum ib_qp_state cur_state, new_state; 591 enum ib_qp_state cur_state, new_state;
592 void *mailbox = NULL; 592 struct mthca_mailbox *mailbox;
593 struct mthca_qp_param *qp_param; 593 struct mthca_qp_param *qp_param;
594 struct mthca_qp_context *qp_context; 594 struct mthca_qp_context *qp_context;
595 u32 req_param, opt_param; 595 u32 req_param, opt_param;
@@ -646,10 +646,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
646 return -EINVAL; 646 return -EINVAL;
647 } 647 }
648 648
649 mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 649 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
650 if (!mailbox) 650 if (IS_ERR(mailbox))
651 return -ENOMEM; 651 return PTR_ERR(mailbox);
652 qp_param = MAILBOX_ALIGN(mailbox); 652 qp_param = mailbox->buf;
653 qp_context = &qp_param->context; 653 qp_context = &qp_param->context;
654 memset(qp_param, 0, sizeof *qp_param); 654 memset(qp_param, 0, sizeof *qp_param);
655 655
@@ -872,7 +872,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
872 } 872 }
873 873
874 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 874 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
875 qp->qpn, 0, qp_param, 0, &status); 875 qp->qpn, 0, mailbox, 0, &status);
876 if (status) { 876 if (status) {
877 mthca_warn(dev, "modify QP %d returned status %02x.\n", 877 mthca_warn(dev, "modify QP %d returned status %02x.\n",
878 state_table[cur_state][new_state].trans, status); 878 state_table[cur_state][new_state].trans, status);
@@ -882,7 +882,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
882 if (!err) 882 if (!err)
883 qp->state = new_state; 883 qp->state = new_state;
884 884
885 kfree(mailbox); 885 mthca_free_mailbox(dev, mailbox);
886 886
887 if (is_sqp(dev, qp)) 887 if (is_sqp(dev, qp))
888 store_attrs(to_msqp(qp), attr, attr_mask); 888 store_attrs(to_msqp(qp), attr, attr_mask);