aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_cmd.c
diff options
context:
space:
mode:
authorRoland Dreier <roland@topspin.com>2005-06-27 17:36:45 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 18:11:46 -0400
commited878458eeff9754d66f1b0325df6ebbfcdce668 (patch)
treeeab302706f069a7922e1d953b5f33b61bdc868a4 /drivers/infiniband/hw/mthca/mthca_cmd.c
parent80fd8238734c852a8ed1ea39f8444a2df33bd161 (diff)
[PATCH] IB/mthca: Align FW command mailboxes to 4K
Future versions of Mellanox HCA firmware will require command mailboxes to be aligned to 4K. Support this by using a pci_pool to allocate all mailboxes. This has the added benefit of shrinking the source and text of mthca. Signed-off-by: Roland Dreier <roland@topspin.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_cmd.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c510
1 files changed, 206 insertions, 304 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 78d4891720e9..1557a522d831 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -444,11 +444,20 @@ int mthca_cmd_init(struct mthca_dev *dev)
444 return -ENOMEM; 444 return -ENOMEM;
445 } 445 }
446 446
447 dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
448 MTHCA_MAILBOX_SIZE,
449 MTHCA_MAILBOX_SIZE, 0);
450 if (!dev->cmd.pool) {
451 iounmap(dev->hcr);
452 return -ENOMEM;
453 }
454
447 return 0; 455 return 0;
448} 456}
449 457
450void mthca_cmd_cleanup(struct mthca_dev *dev) 458void mthca_cmd_cleanup(struct mthca_dev *dev)
451{ 459{
460 pci_pool_destroy(dev->cmd.pool);
452 iounmap(dev->hcr); 461 iounmap(dev->hcr);
453} 462}
454 463
@@ -510,6 +519,33 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
510 up(&dev->cmd.poll_sem); 519 up(&dev->cmd.poll_sem);
511} 520}
512 521
522struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
523 unsigned int gfp_mask)
524{
525 struct mthca_mailbox *mailbox;
526
527 mailbox = kmalloc(sizeof *mailbox, gfp_mask);
528 if (!mailbox)
529 return ERR_PTR(-ENOMEM);
530
531 mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
532 if (!mailbox->buf) {
533 kfree(mailbox);
534 return ERR_PTR(-ENOMEM);
535 }
536
537 return mailbox;
538}
539
540void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
541{
542 if (!mailbox)
543 return;
544
545 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
546 kfree(mailbox);
547}
548
513int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) 549int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
514{ 550{
515 u64 out; 551 u64 out;
@@ -534,20 +570,20 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
534static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, 570static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
535 u64 virt, u8 *status) 571 u64 virt, u8 *status)
536{ 572{
537 u32 *inbox; 573 struct mthca_mailbox *mailbox;
538 dma_addr_t indma;
539 struct mthca_icm_iter iter; 574 struct mthca_icm_iter iter;
575 __be64 *pages;
540 int lg; 576 int lg;
541 int nent = 0; 577 int nent = 0;
542 int i; 578 int i;
543 int err = 0; 579 int err = 0;
544 int ts = 0, tc = 0; 580 int ts = 0, tc = 0;
545 581
546 inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma); 582 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
547 if (!inbox) 583 if (IS_ERR(mailbox))
548 return -ENOMEM; 584 return PTR_ERR(mailbox);
549 585 memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
550 memset(inbox, 0, PAGE_SIZE); 586 pages = mailbox->buf;
551 587
552 for (mthca_icm_first(icm, &iter); 588 for (mthca_icm_first(icm, &iter);
553 !mthca_icm_last(&iter); 589 !mthca_icm_last(&iter);
@@ -567,19 +603,17 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
567 } 603 }
568 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { 604 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) {
569 if (virt != -1) { 605 if (virt != -1) {
570 *((__be64 *) (inbox + nent * 4)) = 606 pages[nent * 2] = cpu_to_be64(virt);
571 cpu_to_be64(virt);
572 virt += 1 << lg; 607 virt += 1 << lg;
573 } 608 }
574 609
575 *((__be64 *) (inbox + nent * 4 + 2)) = 610 pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) +
576 cpu_to_be64((mthca_icm_addr(&iter) + 611 (i << lg)) | (lg - 12));
577 (i << lg)) | (lg - 12));
578 ts += 1 << (lg - 10); 612 ts += 1 << (lg - 10);
579 ++tc; 613 ++tc;
580 614
581 if (nent == PAGE_SIZE / 16) { 615 if (nent == MTHCA_MAILBOX_SIZE / 16) {
582 err = mthca_cmd(dev, indma, nent, 0, op, 616 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
583 CMD_TIME_CLASS_B, status); 617 CMD_TIME_CLASS_B, status);
584 if (err || *status) 618 if (err || *status)
585 goto out; 619 goto out;
@@ -589,7 +623,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
589 } 623 }
590 624
591 if (nent) 625 if (nent)
592 err = mthca_cmd(dev, indma, nent, 0, op, 626 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
593 CMD_TIME_CLASS_B, status); 627 CMD_TIME_CLASS_B, status);
594 628
595 switch (op) { 629 switch (op) {
@@ -606,7 +640,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
606 } 640 }
607 641
608out: 642out:
609 pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma); 643 mthca_free_mailbox(dev, mailbox);
610 return err; 644 return err;
611} 645}
612 646
@@ -627,8 +661,8 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
627 661
628int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) 662int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
629{ 663{
664 struct mthca_mailbox *mailbox;
630 u32 *outbox; 665 u32 *outbox;
631 dma_addr_t outdma;
632 int err = 0; 666 int err = 0;
633 u8 lg; 667 u8 lg;
634 668
@@ -646,12 +680,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
646#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 680#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40
647#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 681#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
648 682
649 outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma); 683 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
650 if (!outbox) { 684 if (IS_ERR(mailbox))
651 return -ENOMEM; 685 return PTR_ERR(mailbox);
652 } 686 outbox = mailbox->buf;
653 687
654 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW, 688 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
655 CMD_TIME_CLASS_A, status); 689 CMD_TIME_CLASS_A, status);
656 690
657 if (err) 691 if (err)
@@ -702,15 +736,15 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
702 } 736 }
703 737
704out: 738out:
705 pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma); 739 mthca_free_mailbox(dev, mailbox);
706 return err; 740 return err;
707} 741}
708 742
709int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) 743int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
710{ 744{
745 struct mthca_mailbox *mailbox;
711 u8 info; 746 u8 info;
712 u32 *outbox; 747 u32 *outbox;
713 dma_addr_t outdma;
714 int err = 0; 748 int err = 0;
715 749
716#define ENABLE_LAM_OUT_SIZE 0x100 750#define ENABLE_LAM_OUT_SIZE 0x100
@@ -721,11 +755,12 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
721#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) 755#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
722#define ENABLE_LAM_INFO_ECC_MASK 0x3 756#define ENABLE_LAM_INFO_ECC_MASK 0x3
723 757
724 outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma); 758 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
725 if (!outbox) 759 if (IS_ERR(mailbox))
726 return -ENOMEM; 760 return PTR_ERR(mailbox);
761 outbox = mailbox->buf;
727 762
728 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM, 763 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
729 CMD_TIME_CLASS_C, status); 764 CMD_TIME_CLASS_C, status);
730 765
731 if (err) 766 if (err)
@@ -754,7 +789,7 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
754 (unsigned long long) dev->ddr_end); 789 (unsigned long long) dev->ddr_end);
755 790
756out: 791out:
757 pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma); 792 mthca_free_mailbox(dev, mailbox);
758 return err; 793 return err;
759} 794}
760 795
@@ -765,9 +800,9 @@ int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
765 800
766int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) 801int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
767{ 802{
803 struct mthca_mailbox *mailbox;
768 u8 info; 804 u8 info;
769 u32 *outbox; 805 u32 *outbox;
770 dma_addr_t outdma;
771 int err = 0; 806 int err = 0;
772 807
773#define QUERY_DDR_OUT_SIZE 0x100 808#define QUERY_DDR_OUT_SIZE 0x100
@@ -778,11 +813,12 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
778#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) 813#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
779#define QUERY_DDR_INFO_ECC_MASK 0x3 814#define QUERY_DDR_INFO_ECC_MASK 0x3
780 815
781 outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma); 816 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
782 if (!outbox) 817 if (IS_ERR(mailbox))
783 return -ENOMEM; 818 return PTR_ERR(mailbox);
819 outbox = mailbox->buf;
784 820
785 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR, 821 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
786 CMD_TIME_CLASS_A, status); 822 CMD_TIME_CLASS_A, status);
787 823
788 if (err) 824 if (err)
@@ -808,15 +844,15 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
808 (unsigned long long) dev->ddr_end); 844 (unsigned long long) dev->ddr_end);
809 845
810out: 846out:
811 pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma); 847 mthca_free_mailbox(dev, mailbox);
812 return err; 848 return err;
813} 849}
814 850
815int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 851int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
816 struct mthca_dev_lim *dev_lim, u8 *status) 852 struct mthca_dev_lim *dev_lim, u8 *status)
817{ 853{
854 struct mthca_mailbox *mailbox;
818 u32 *outbox; 855 u32 *outbox;
819 dma_addr_t outdma;
820 u8 field; 856 u8 field;
821 u16 size; 857 u16 size;
822 int err; 858 int err;
@@ -881,11 +917,12 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
881#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f 917#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f
882#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 918#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0
883 919
884 outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma); 920 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
885 if (!outbox) 921 if (IS_ERR(mailbox))
886 return -ENOMEM; 922 return PTR_ERR(mailbox);
923 outbox = mailbox->buf;
887 924
888 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM, 925 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
889 CMD_TIME_CLASS_A, status); 926 CMD_TIME_CLASS_A, status);
890 927
891 if (err) 928 if (err)
@@ -1041,15 +1078,15 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1041 } 1078 }
1042 1079
1043out: 1080out:
1044 pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); 1081 mthca_free_mailbox(dev, mailbox);
1045 return err; 1082 return err;
1046} 1083}
1047 1084
1048int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1085int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1049 struct mthca_adapter *adapter, u8 *status) 1086 struct mthca_adapter *adapter, u8 *status)
1050{ 1087{
1088 struct mthca_mailbox *mailbox;
1051 u32 *outbox; 1089 u32 *outbox;
1052 dma_addr_t outdma;
1053 int err; 1090 int err;
1054 1091
1055#define QUERY_ADAPTER_OUT_SIZE 0x100 1092#define QUERY_ADAPTER_OUT_SIZE 0x100
@@ -1058,23 +1095,24 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1058#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 1095#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
1059#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1096#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1060 1097
1061 outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma); 1098 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1062 if (!outbox) 1099 if (IS_ERR(mailbox))
1063 return -ENOMEM; 1100 return PTR_ERR(mailbox);
1101 outbox = mailbox->buf;
1064 1102
1065 err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER, 1103 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1066 CMD_TIME_CLASS_A, status); 1104 CMD_TIME_CLASS_A, status);
1067 1105
1068 if (err) 1106 if (err)
1069 goto out; 1107 goto out;
1070 1108
1071 MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); 1109 MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
1072 MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); 1110 MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
1073 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1111 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1074 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1112 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1075 1113
1076out: 1114out:
1077 pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); 1115 mthca_free_mailbox(dev, mailbox);
1078 return err; 1116 return err;
1079} 1117}
1080 1118
@@ -1082,8 +1120,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1082 struct mthca_init_hca_param *param, 1120 struct mthca_init_hca_param *param,
1083 u8 *status) 1121 u8 *status)
1084{ 1122{
1123 struct mthca_mailbox *mailbox;
1085 u32 *inbox; 1124 u32 *inbox;
1086 dma_addr_t indma;
1087 int err; 1125 int err;
1088 1126
1089#define INIT_HCA_IN_SIZE 0x200 1127#define INIT_HCA_IN_SIZE 0x200
@@ -1123,9 +1161,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1123#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) 1161#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1124#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) 1162#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18)
1125 1163
1126 inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma); 1164 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1127 if (!inbox) 1165 if (IS_ERR(mailbox))
1128 return -ENOMEM; 1166 return PTR_ERR(mailbox);
1167 inbox = mailbox->buf;
1129 1168
1130 memset(inbox, 0, INIT_HCA_IN_SIZE); 1169 memset(inbox, 0, INIT_HCA_IN_SIZE);
1131 1170
@@ -1188,10 +1227,9 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1188 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); 1227 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
1189 } 1228 }
1190 1229
1191 err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA, 1230 err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
1192 HZ, status);
1193 1231
1194 pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1232 mthca_free_mailbox(dev, mailbox);
1195 return err; 1233 return err;
1196} 1234}
1197 1235
@@ -1199,8 +1237,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1199 struct mthca_init_ib_param *param, 1237 struct mthca_init_ib_param *param,
1200 int port, u8 *status) 1238 int port, u8 *status)
1201{ 1239{
1240 struct mthca_mailbox *mailbox;
1202 u32 *inbox; 1241 u32 *inbox;
1203 dma_addr_t indma;
1204 int err; 1242 int err;
1205 u32 flags; 1243 u32 flags;
1206 1244
@@ -1220,9 +1258,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1220#define INIT_IB_NODE_GUID_OFFSET 0x18 1258#define INIT_IB_NODE_GUID_OFFSET 0x18
1221#define INIT_IB_SI_GUID_OFFSET 0x20 1259#define INIT_IB_SI_GUID_OFFSET 0x20
1222 1260
1223 inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma); 1261 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1224 if (!inbox) 1262 if (IS_ERR(mailbox))
1225 return -ENOMEM; 1263 return PTR_ERR(mailbox);
1264 inbox = mailbox->buf;
1226 1265
1227 memset(inbox, 0, INIT_IB_IN_SIZE); 1266 memset(inbox, 0, INIT_IB_IN_SIZE);
1228 1267
@@ -1242,10 +1281,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1242 MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); 1281 MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1243 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); 1282 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
1244 1283
1245 err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB, 1284 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1246 CMD_TIME_CLASS_A, status); 1285 CMD_TIME_CLASS_A, status);
1247 1286
1248 pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1287 mthca_free_mailbox(dev, mailbox);
1249 return err; 1288 return err;
1250} 1289}
1251 1290
@@ -1262,8 +1301,8 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
1262int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 1301int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1263 int port, u8 *status) 1302 int port, u8 *status)
1264{ 1303{
1304 struct mthca_mailbox *mailbox;
1265 u32 *inbox; 1305 u32 *inbox;
1266 dma_addr_t indma;
1267 int err; 1306 int err;
1268 u32 flags = 0; 1307 u32 flags = 0;
1269 1308
@@ -1274,9 +1313,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1274#define SET_IB_CAP_MASK_OFFSET 0x04 1313#define SET_IB_CAP_MASK_OFFSET 0x04
1275#define SET_IB_SI_GUID_OFFSET 0x08 1314#define SET_IB_SI_GUID_OFFSET 0x08
1276 1315
1277 inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma); 1316 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1278 if (!inbox) 1317 if (IS_ERR(mailbox))
1279 return -ENOMEM; 1318 return PTR_ERR(mailbox);
1319 inbox = mailbox->buf;
1280 1320
1281 memset(inbox, 0, SET_IB_IN_SIZE); 1321 memset(inbox, 0, SET_IB_IN_SIZE);
1282 1322
@@ -1287,10 +1327,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1287 MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); 1327 MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1288 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); 1328 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
1289 1329
1290 err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB, 1330 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1291 CMD_TIME_CLASS_B, status); 1331 CMD_TIME_CLASS_B, status);
1292 1332
1293 pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1333 mthca_free_mailbox(dev, mailbox);
1294 return err; 1334 return err;
1295} 1335}
1296 1336
@@ -1301,20 +1341,22 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
1301 1341
1302int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1342int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1303{ 1343{
1344 struct mthca_mailbox *mailbox;
1304 u64 *inbox; 1345 u64 *inbox;
1305 dma_addr_t indma;
1306 int err; 1346 int err;
1307 1347
1308 inbox = pci_alloc_consistent(dev->pdev, 16, &indma); 1348 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1309 if (!inbox) 1349 if (IS_ERR(mailbox))
1310 return -ENOMEM; 1350 return PTR_ERR(mailbox);
1351 inbox = mailbox->buf;
1311 1352
1312 inbox[0] = cpu_to_be64(virt); 1353 inbox[0] = cpu_to_be64(virt);
1313 inbox[1] = cpu_to_be64(dma_addr); 1354 inbox[1] = cpu_to_be64(dma_addr);
1314 1355
1315 err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status); 1356 err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1357 CMD_TIME_CLASS_B, status);
1316 1358
1317 pci_free_consistent(dev->pdev, 16, inbox, indma); 1359 mthca_free_mailbox(dev, mailbox);
1318 1360
1319 if (!err) 1361 if (!err)
1320 mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", 1362 mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
@@ -1359,69 +1401,26 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1359 return 0; 1401 return 0;
1360} 1402}
1361 1403
1362int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, 1404int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1363 int mpt_index, u8 *status) 1405 int mpt_index, u8 *status)
1364{ 1406{
1365 dma_addr_t indma; 1407 return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1366 int err; 1408 CMD_TIME_CLASS_B, status);
1367
1368 indma = pci_map_single(dev->pdev, mpt_entry,
1369 MTHCA_MPT_ENTRY_SIZE,
1370 PCI_DMA_TODEVICE);
1371 if (pci_dma_mapping_error(indma))
1372 return -ENOMEM;
1373
1374 err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT,
1375 CMD_TIME_CLASS_B, status);
1376
1377 pci_unmap_single(dev->pdev, indma,
1378 MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE);
1379 return err;
1380} 1409}
1381 1410
1382int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, 1411int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1383 int mpt_index, u8 *status) 1412 int mpt_index, u8 *status)
1384{ 1413{
1385 dma_addr_t outdma = 0; 1414 return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1386 int err; 1415 !mailbox, CMD_HW2SW_MPT,
1387 1416 CMD_TIME_CLASS_B, status);
1388 if (mpt_entry) {
1389 outdma = pci_map_single(dev->pdev, mpt_entry,
1390 MTHCA_MPT_ENTRY_SIZE,
1391 PCI_DMA_FROMDEVICE);
1392 if (pci_dma_mapping_error(outdma))
1393 return -ENOMEM;
1394 }
1395
1396 err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry,
1397 CMD_HW2SW_MPT,
1398 CMD_TIME_CLASS_B, status);
1399
1400 if (mpt_entry)
1401 pci_unmap_single(dev->pdev, outdma,
1402 MTHCA_MPT_ENTRY_SIZE,
1403 PCI_DMA_FROMDEVICE);
1404 return err;
1405} 1417}
1406 1418
1407int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, 1419int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1408 int num_mtt, u8 *status) 1420 int num_mtt, u8 *status)
1409{ 1421{
1410 dma_addr_t indma; 1422 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1411 int err; 1423 CMD_TIME_CLASS_B, status);
1412
1413 indma = pci_map_single(dev->pdev, mtt_entry,
1414 (num_mtt + 2) * 8,
1415 PCI_DMA_TODEVICE);
1416 if (pci_dma_mapping_error(indma))
1417 return -ENOMEM;
1418
1419 err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT,
1420 CMD_TIME_CLASS_B, status);
1421
1422 pci_unmap_single(dev->pdev, indma,
1423 (num_mtt + 2) * 8, PCI_DMA_TODEVICE);
1424 return err;
1425} 1424}
1426 1425
1427int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) 1426int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
@@ -1439,92 +1438,38 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1439 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); 1438 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
1440} 1439}
1441 1440
1442int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, 1441int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1443 int eq_num, u8 *status) 1442 int eq_num, u8 *status)
1444{ 1443{
1445 dma_addr_t indma; 1444 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1446 int err; 1445 CMD_TIME_CLASS_A, status);
1447
1448 indma = pci_map_single(dev->pdev, eq_context,
1449 MTHCA_EQ_CONTEXT_SIZE,
1450 PCI_DMA_TODEVICE);
1451 if (pci_dma_mapping_error(indma))
1452 return -ENOMEM;
1453
1454 err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ,
1455 CMD_TIME_CLASS_A, status);
1456
1457 pci_unmap_single(dev->pdev, indma,
1458 MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
1459 return err;
1460} 1446}
1461 1447
1462int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, 1448int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1463 int eq_num, u8 *status) 1449 int eq_num, u8 *status)
1464{ 1450{
1465 dma_addr_t outdma = 0; 1451 return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1466 int err; 1452 CMD_HW2SW_EQ,
1467 1453 CMD_TIME_CLASS_A, status);
1468 outdma = pci_map_single(dev->pdev, eq_context,
1469 MTHCA_EQ_CONTEXT_SIZE,
1470 PCI_DMA_FROMDEVICE);
1471 if (pci_dma_mapping_error(outdma))
1472 return -ENOMEM;
1473
1474 err = mthca_cmd_box(dev, 0, outdma, eq_num, 0,
1475 CMD_HW2SW_EQ,
1476 CMD_TIME_CLASS_A, status);
1477
1478 pci_unmap_single(dev->pdev, outdma,
1479 MTHCA_EQ_CONTEXT_SIZE,
1480 PCI_DMA_FROMDEVICE);
1481 return err;
1482} 1454}
1483 1455
1484int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, 1456int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1485 int cq_num, u8 *status) 1457 int cq_num, u8 *status)
1486{ 1458{
1487 dma_addr_t indma; 1459 return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1488 int err;
1489
1490 indma = pci_map_single(dev->pdev, cq_context,
1491 MTHCA_CQ_CONTEXT_SIZE,
1492 PCI_DMA_TODEVICE);
1493 if (pci_dma_mapping_error(indma))
1494 return -ENOMEM;
1495
1496 err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ,
1497 CMD_TIME_CLASS_A, status); 1460 CMD_TIME_CLASS_A, status);
1498
1499 pci_unmap_single(dev->pdev, indma,
1500 MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
1501 return err;
1502} 1461}
1503 1462
1504int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, 1463int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1505 int cq_num, u8 *status) 1464 int cq_num, u8 *status)
1506{ 1465{
1507 dma_addr_t outdma = 0; 1466 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1508 int err; 1467 CMD_HW2SW_CQ,
1509 1468 CMD_TIME_CLASS_A, status);
1510 outdma = pci_map_single(dev->pdev, cq_context,
1511 MTHCA_CQ_CONTEXT_SIZE,
1512 PCI_DMA_FROMDEVICE);
1513 if (pci_dma_mapping_error(outdma))
1514 return -ENOMEM;
1515
1516 err = mthca_cmd_box(dev, 0, outdma, cq_num, 0,
1517 CMD_HW2SW_CQ,
1518 CMD_TIME_CLASS_A, status);
1519
1520 pci_unmap_single(dev->pdev, outdma,
1521 MTHCA_CQ_CONTEXT_SIZE,
1522 PCI_DMA_FROMDEVICE);
1523 return err;
1524} 1469}
1525 1470
1526int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1471int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1527 int is_ee, void *qp_context, u32 optmask, 1472 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1528 u8 *status) 1473 u8 *status)
1529{ 1474{
1530 static const u16 op[] = { 1475 static const u16 op[] = {
@@ -1541,36 +1486,34 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1541 [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE 1486 [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE
1542 }; 1487 };
1543 u8 op_mod = 0; 1488 u8 op_mod = 0;
1544 1489 int my_mailbox = 0;
1545 dma_addr_t indma;
1546 int err; 1490 int err;
1547 1491
1548 if (trans < 0 || trans >= ARRAY_SIZE(op)) 1492 if (trans < 0 || trans >= ARRAY_SIZE(op))
1549 return -EINVAL; 1493 return -EINVAL;
1550 1494
1551 if (trans == MTHCA_TRANS_ANY2RST) { 1495 if (trans == MTHCA_TRANS_ANY2RST) {
1552 indma = 0;
1553 op_mod = 3; /* don't write outbox, any->reset */ 1496 op_mod = 3; /* don't write outbox, any->reset */
1554 1497
1555 /* For debugging */ 1498 /* For debugging */
1556 qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, 1499 if (!mailbox) {
1557 &indma); 1500 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1558 op_mod = 2; /* write outbox, any->reset */ 1501 if (!IS_ERR(mailbox)) {
1502 my_mailbox = 1;
1503 op_mod = 2; /* write outbox, any->reset */
1504 } else
1505 mailbox = NULL;
1506 }
1559 } else { 1507 } else {
1560 indma = pci_map_single(dev->pdev, qp_context,
1561 MTHCA_QP_CONTEXT_SIZE,
1562 PCI_DMA_TODEVICE);
1563 if (pci_dma_mapping_error(indma))
1564 return -ENOMEM;
1565
1566 if (0) { 1508 if (0) {
1567 int i; 1509 int i;
1568 mthca_dbg(dev, "Dumping QP context:\n"); 1510 mthca_dbg(dev, "Dumping QP context:\n");
1569 printk(" opt param mask: %08x\n", be32_to_cpup(qp_context)); 1511 printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
1570 for (i = 0; i < 0x100 / 4; ++i) { 1512 for (i = 0; i < 0x100 / 4; ++i) {
1571 if (i % 8 == 0) 1513 if (i % 8 == 0)
1572 printk(" [%02x] ", i * 4); 1514 printk(" [%02x] ", i * 4);
1573 printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); 1515 printk(" %08x",
1516 be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
1574 if ((i + 1) % 8 == 0) 1517 if ((i + 1) % 8 == 0)
1575 printk("\n"); 1518 printk("\n");
1576 } 1519 }
@@ -1578,55 +1521,39 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1578 } 1521 }
1579 1522
1580 if (trans == MTHCA_TRANS_ANY2RST) { 1523 if (trans == MTHCA_TRANS_ANY2RST) {
1581 err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num, 1524 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1582 op_mod, op[trans], CMD_TIME_CLASS_C, status); 1525 (!!is_ee << 24) | num, op_mod,
1526 op[trans], CMD_TIME_CLASS_C, status);
1583 1527
1584 if (0) { 1528 if (0 && mailbox) {
1585 int i; 1529 int i;
1586 mthca_dbg(dev, "Dumping QP context:\n"); 1530 mthca_dbg(dev, "Dumping QP context:\n");
1587 printk(" %08x\n", be32_to_cpup(qp_context)); 1531 printk(" %08x\n", be32_to_cpup(mailbox->buf));
1588 for (i = 0; i < 0x100 / 4; ++i) { 1532 for (i = 0; i < 0x100 / 4; ++i) {
1589 if (i % 8 == 0) 1533 if (i % 8 == 0)
1590 printk("[%02x] ", i * 4); 1534 printk("[%02x] ", i * 4);
1591 printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); 1535 printk(" %08x",
1536 be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
1592 if ((i + 1) % 8 == 0) 1537 if ((i + 1) % 8 == 0)
1593 printk("\n"); 1538 printk("\n");
1594 } 1539 }
1595 } 1540 }
1596 1541
1597 } else 1542 } else
1598 err = mthca_cmd(dev, indma, (!!is_ee << 24) | num, 1543 err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
1599 op_mod, op[trans], CMD_TIME_CLASS_C, status); 1544 op_mod, op[trans], CMD_TIME_CLASS_C, status);
1600 1545
1601 if (trans != MTHCA_TRANS_ANY2RST) 1546 if (my_mailbox)
1602 pci_unmap_single(dev->pdev, indma, 1547 mthca_free_mailbox(dev, mailbox);
1603 MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE); 1548
1604 else
1605 pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE,
1606 qp_context, indma);
1607 return err; 1549 return err;
1608} 1550}
1609 1551
1610int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 1552int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1611 void *qp_context, u8 *status) 1553 struct mthca_mailbox *mailbox, u8 *status)
1612{ 1554{
1613 dma_addr_t outdma = 0; 1555 return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1614 int err; 1556 CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
1615
1616 outdma = pci_map_single(dev->pdev, qp_context,
1617 MTHCA_QP_CONTEXT_SIZE,
1618 PCI_DMA_FROMDEVICE);
1619 if (pci_dma_mapping_error(outdma))
1620 return -ENOMEM;
1621
1622 err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0,
1623 CMD_QUERY_QPEE,
1624 CMD_TIME_CLASS_A, status);
1625
1626 pci_unmap_single(dev->pdev, outdma,
1627 MTHCA_QP_CONTEXT_SIZE,
1628 PCI_DMA_FROMDEVICE);
1629 return err;
1630} 1557}
1631 1558
1632int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 1559int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
@@ -1656,11 +1583,11 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1656} 1583}
1657 1584
1658int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 1585int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1659 int port, struct ib_wc* in_wc, struct ib_grh* in_grh, 1586 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
1660 void *in_mad, void *response_mad, u8 *status) 1587 void *in_mad, void *response_mad, u8 *status)
1661{ 1588{
1662 void *box; 1589 struct mthca_mailbox *inmailbox, *outmailbox;
1663 dma_addr_t dma; 1590 void *inbox;
1664 int err; 1591 int err;
1665 u32 in_modifier = port; 1592 u32 in_modifier = port;
1666 u8 op_modifier = 0; 1593 u8 op_modifier = 0;
@@ -1674,11 +1601,18 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1674#define MAD_IFC_PKEY_OFFSET 0x10e 1601#define MAD_IFC_PKEY_OFFSET 0x10e
1675#define MAD_IFC_GRH_OFFSET 0x140 1602#define MAD_IFC_GRH_OFFSET 0x140
1676 1603
1677 box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma); 1604 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1678 if (!box) 1605 if (IS_ERR(inmailbox))
1679 return -ENOMEM; 1606 return PTR_ERR(inmailbox);
1607 inbox = inmailbox->buf;
1608
1609 outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1610 if (IS_ERR(outmailbox)) {
1611 mthca_free_mailbox(dev, inmailbox);
1612 return PTR_ERR(outmailbox);
1613 }
1680 1614
1681 memcpy(box, in_mad, 256); 1615 memcpy(inbox, in_mad, 256);
1682 1616
1683 /* 1617 /*
1684 * Key check traps can't be generated unless we have in_wc to 1618 * Key check traps can't be generated unless we have in_wc to
@@ -1692,97 +1626,65 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1692 if (in_wc) { 1626 if (in_wc) {
1693 u8 val; 1627 u8 val;
1694 1628
1695 memset(box + 256, 0, 256); 1629 memset(inbox + 256, 0, 256);
1696 1630
1697 MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1631 MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
1698 MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1632 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1699 1633
1700 val = in_wc->sl << 4; 1634 val = in_wc->sl << 4;
1701 MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET); 1635 MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
1702 1636
1703 val = in_wc->dlid_path_bits | 1637 val = in_wc->dlid_path_bits |
1704 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); 1638 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
1705 MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET); 1639 MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET);
1706 1640
1707 MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET); 1641 MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
1708 MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); 1642 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
1709 1643
1710 if (in_grh) 1644 if (in_grh)
1711 memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40); 1645 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1712 1646
1713 op_modifier |= 0x10; 1647 op_modifier |= 0x10;
1714 1648
1715 in_modifier |= in_wc->slid << 16; 1649 in_modifier |= in_wc->slid << 16;
1716 } 1650 }
1717 1651
1718 err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier, 1652 err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1653 in_modifier, op_modifier,
1719 CMD_MAD_IFC, CMD_TIME_CLASS_C, status); 1654 CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
1720 1655
1721 if (!err && !*status) 1656 if (!err && !*status)
1722 memcpy(response_mad, box + 512, 256); 1657 memcpy(response_mad, outmailbox->buf, 256);
1723 1658
1724 pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma); 1659 mthca_free_mailbox(dev, inmailbox);
1660 mthca_free_mailbox(dev, outmailbox);
1725 return err; 1661 return err;
1726} 1662}
1727 1663
1728int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, 1664int mthca_READ_MGM(struct mthca_dev *dev, int index,
1729 u8 *status) 1665 struct mthca_mailbox *mailbox, u8 *status)
1730{ 1666{
1731 dma_addr_t outdma = 0; 1667 return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1732 int err; 1668 CMD_READ_MGM, CMD_TIME_CLASS_A, status);
1733
1734 outdma = pci_map_single(dev->pdev, mgm,
1735 MTHCA_MGM_ENTRY_SIZE,
1736 PCI_DMA_FROMDEVICE);
1737 if (pci_dma_mapping_error(outdma))
1738 return -ENOMEM;
1739
1740 err = mthca_cmd_box(dev, 0, outdma, index, 0,
1741 CMD_READ_MGM,
1742 CMD_TIME_CLASS_A, status);
1743
1744 pci_unmap_single(dev->pdev, outdma,
1745 MTHCA_MGM_ENTRY_SIZE,
1746 PCI_DMA_FROMDEVICE);
1747 return err;
1748} 1669}
1749 1670
1750int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, 1671int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1751 u8 *status) 1672 struct mthca_mailbox *mailbox, u8 *status)
1752{ 1673{
1753 dma_addr_t indma; 1674 return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1754 int err; 1675 CMD_TIME_CLASS_A, status);
1755
1756 indma = pci_map_single(dev->pdev, mgm,
1757 MTHCA_MGM_ENTRY_SIZE,
1758 PCI_DMA_TODEVICE);
1759 if (pci_dma_mapping_error(indma))
1760 return -ENOMEM;
1761
1762 err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM,
1763 CMD_TIME_CLASS_A, status);
1764
1765 pci_unmap_single(dev->pdev, indma,
1766 MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE);
1767 return err;
1768} 1676}
1769 1677
1770int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, 1678int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1771 u8 *status) 1679 u16 *hash, u8 *status)
1772{ 1680{
1773 dma_addr_t indma;
1774 u64 imm; 1681 u64 imm;
1775 int err; 1682 int err;
1776 1683
1777 indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE); 1684 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1778 if (pci_dma_mapping_error(indma))
1779 return -ENOMEM;
1780
1781 err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH,
1782 CMD_TIME_CLASS_A, status); 1685 CMD_TIME_CLASS_A, status);
1783 *hash = imm;
1784 1686
1785 pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE); 1687 *hash = imm;
1786 return err; 1688 return err;
1787} 1689}
1788 1690