aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c18
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c18
11 files changed, 62 insertions, 57 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index e4ee5d024dfb..085baf393ca4 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -62,7 +62,7 @@ int mthca_create_ah(struct mthca_dev *dev,
62 62
63 ah->type = MTHCA_AH_PCI_POOL; 63 ah->type = MTHCA_AH_PCI_POOL;
64 64
65 if (dev->hca_type == ARBEL_NATIVE) { 65 if (mthca_is_memfree(dev)) {
66 ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC); 66 ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC);
67 if (!ah->av) 67 if (!ah->av)
68 return -ENOMEM; 68 return -ENOMEM;
@@ -192,7 +192,7 @@ int __devinit mthca_init_av_table(struct mthca_dev *dev)
192{ 192{
193 int err; 193 int err;
194 194
195 if (dev->hca_type == ARBEL_NATIVE) 195 if (mthca_is_memfree(dev))
196 return 0; 196 return 0;
197 197
198 err = mthca_alloc_init(&dev->av_table.alloc, 198 err = mthca_alloc_init(&dev->av_table.alloc,
@@ -231,7 +231,7 @@ int __devinit mthca_init_av_table(struct mthca_dev *dev)
231 231
232void __devexit mthca_cleanup_av_table(struct mthca_dev *dev) 232void __devexit mthca_cleanup_av_table(struct mthca_dev *dev)
233{ 233{
234 if (dev->hca_type == ARBEL_NATIVE) 234 if (mthca_is_memfree(dev))
235 return; 235 return;
236 236
237 if (dev->av_table.av_map) 237 if (dev->av_table.av_map)
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 3c1cfc466522..cd9ed958d92f 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -651,7 +651,7 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
651 mthca_dbg(dev, "FW version %012llx, max commands %d\n", 651 mthca_dbg(dev, "FW version %012llx, max commands %d\n",
652 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); 652 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
653 653
654 if (dev->hca_type == ARBEL_NATIVE) { 654 if (mthca_is_memfree(dev)) {
655 MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 655 MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
656 MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 656 MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
657 MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET); 657 MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
@@ -984,7 +984,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
984 984
985 mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); 985 mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
986 986
987 if (dev->hca_type == ARBEL_NATIVE) { 987 if (mthca_is_memfree(dev)) {
988 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); 988 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
989 dev_lim->hca.arbel.resize_srq = field & 1; 989 dev_lim->hca.arbel.resize_srq = field & 1;
990 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); 990 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
@@ -1148,7 +1148,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1148 /* TPT attributes */ 1148 /* TPT attributes */
1149 1149
1150 MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET); 1150 MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET);
1151 if (dev->hca_type != ARBEL_NATIVE) 1151 if (!mthca_is_memfree(dev))
1152 MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET); 1152 MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
1153 MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 1153 MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1154 MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 1154 MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
@@ -1161,7 +1161,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1161 1161
1162 MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET); 1162 MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
1163 1163
1164 if (dev->hca_type == ARBEL_NATIVE) { 1164 if (mthca_is_memfree(dev)) {
1165 MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET); 1165 MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
1166 MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 1166 MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1167 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); 1167 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 0ad954e18bd7..2bf347b84c31 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -180,7 +180,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
180{ 180{
181 u32 doorbell[2]; 181 u32 doorbell[2];
182 182
183 if (dev->hca_type == ARBEL_NATIVE) { 183 if (mthca_is_memfree(dev)) {
184 *cq->set_ci_db = cpu_to_be32(cq->cons_index); 184 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
185 wmb(); 185 wmb();
186 } else { 186 } else {
@@ -760,7 +760,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
760 if (cq->cqn == -1) 760 if (cq->cqn == -1)
761 return -ENOMEM; 761 return -ENOMEM;
762 762
763 if (dev->hca_type == ARBEL_NATIVE) { 763 if (mthca_is_memfree(dev)) {
764 cq->arm_sn = 1; 764 cq->arm_sn = 1;
765 765
766 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); 766 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
@@ -811,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
811 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey); 811 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
812 cq_context->cqn = cpu_to_be32(cq->cqn); 812 cq_context->cqn = cpu_to_be32(cq->cqn);
813 813
814 if (dev->hca_type == ARBEL_NATIVE) { 814 if (mthca_is_memfree(dev)) {
815 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index); 815 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
816 cq_context->state_db = cpu_to_be32(cq->arm_db_index); 816 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
817 } 817 }
@@ -851,11 +851,11 @@ err_out_free_mr:
851err_out_mailbox: 851err_out_mailbox:
852 kfree(mailbox); 852 kfree(mailbox);
853 853
854 if (dev->hca_type == ARBEL_NATIVE) 854 if (mthca_is_memfree(dev))
855 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 855 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
856 856
857err_out_ci: 857err_out_ci:
858 if (dev->hca_type == ARBEL_NATIVE) 858 if (mthca_is_memfree(dev))
859 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 859 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
860 860
861err_out_icm: 861err_out_icm:
@@ -916,7 +916,7 @@ void mthca_free_cq(struct mthca_dev *dev,
916 mthca_free_mr(dev, &cq->mr); 916 mthca_free_mr(dev, &cq->mr);
917 mthca_free_cq_buf(dev, cq); 917 mthca_free_cq_buf(dev, cq);
918 918
919 if (dev->hca_type == ARBEL_NATIVE) { 919 if (mthca_is_memfree(dev)) {
920 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 920 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
921 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 921 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
922 mthca_table_put(dev, dev->cq_table.table, cq->cqn); 922 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index cca3ca7196a3..616a0de54b40 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -470,4 +470,9 @@ static inline struct mthca_dev *to_mdev(struct ib_device *ibdev)
470 return container_of(ibdev, struct mthca_dev, ib_dev); 470 return container_of(ibdev, struct mthca_dev, ib_dev);
471} 471}
472 472
473static inline int mthca_is_memfree(struct mthca_dev *dev)
474{
475 return dev->hca_type == ARBEL_NATIVE;
476}
477
473#endif /* MTHCA_DEV_H */ 478#endif /* MTHCA_DEV_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index b6f2a46fc84b..f46d615d396f 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -198,7 +198,7 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u
198 198
199static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) 199static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
200{ 200{
201 if (dev->hca_type == ARBEL_NATIVE) 201 if (mthca_is_memfree(dev))
202 arbel_set_eq_ci(dev, eq, ci); 202 arbel_set_eq_ci(dev, eq, ci);
203 else 203 else
204 tavor_set_eq_ci(dev, eq, ci); 204 tavor_set_eq_ci(dev, eq, ci);
@@ -223,7 +223,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
223 223
224static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) 224static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
225{ 225{
226 if (dev->hca_type != ARBEL_NATIVE) { 226 if (!mthca_is_memfree(dev)) {
227 u32 doorbell[2]; 227 u32 doorbell[2];
228 228
229 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); 229 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
@@ -535,11 +535,11 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
535 MTHCA_EQ_OWNER_HW | 535 MTHCA_EQ_OWNER_HW |
536 MTHCA_EQ_STATE_ARMED | 536 MTHCA_EQ_STATE_ARMED |
537 MTHCA_EQ_FLAG_TR); 537 MTHCA_EQ_FLAG_TR);
538 if (dev->hca_type == ARBEL_NATIVE) 538 if (mthca_is_memfree(dev))
539 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); 539 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
540 540
541 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 541 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
542 if (dev->hca_type == ARBEL_NATIVE) { 542 if (mthca_is_memfree(dev)) {
543 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); 543 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
544 } else { 544 } else {
545 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); 545 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
@@ -686,7 +686,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
686 686
687 mthca_base = pci_resource_start(dev->pdev, 0); 687 mthca_base = pci_resource_start(dev->pdev, 0);
688 688
689 if (dev->hca_type == ARBEL_NATIVE) { 689 if (mthca_is_memfree(dev)) {
690 /* 690 /*
691 * We assume that the EQ arm and EQ set CI registers 691 * We assume that the EQ arm and EQ set CI registers
692 * fall within the first BAR. We can't trust the 692 * fall within the first BAR. We can't trust the
@@ -756,7 +756,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
756 756
757static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) 757static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev)
758{ 758{
759 if (dev->hca_type == ARBEL_NATIVE) { 759 if (mthca_is_memfree(dev)) {
760 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & 760 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
761 dev->fw.arbel.eq_set_ci_base, 761 dev->fw.arbel.eq_set_ci_base,
762 MTHCA_EQ_SET_CI_SIZE, 762 MTHCA_EQ_SET_CI_SIZE,
@@ -880,7 +880,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
880 880
881 for (i = 0; i < MTHCA_NUM_EQ; ++i) { 881 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
882 err = request_irq(dev->eq_table.eq[i].msi_x_vector, 882 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
883 dev->hca_type == ARBEL_NATIVE ? 883 mthca_is_memfree(dev) ?
884 mthca_arbel_msi_x_interrupt : 884 mthca_arbel_msi_x_interrupt :
885 mthca_tavor_msi_x_interrupt, 885 mthca_tavor_msi_x_interrupt,
886 0, eq_name[i], dev->eq_table.eq + i); 886 0, eq_name[i], dev->eq_table.eq + i);
@@ -890,7 +890,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
890 } 890 }
891 } else { 891 } else {
892 err = request_irq(dev->pdev->irq, 892 err = request_irq(dev->pdev->irq,
893 dev->hca_type == ARBEL_NATIVE ? 893 mthca_is_memfree(dev) ?
894 mthca_arbel_interrupt : 894 mthca_arbel_interrupt :
895 mthca_tavor_interrupt, 895 mthca_tavor_interrupt,
896 SA_SHIRQ, DRV_NAME, dev); 896 SA_SHIRQ, DRV_NAME, dev);
@@ -918,7 +918,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
918 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); 918 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
919 919
920 for (i = 0; i < MTHCA_EQ_CMD; ++i) 920 for (i = 0; i < MTHCA_EQ_CMD; ++i)
921 if (dev->hca_type == ARBEL_NATIVE) 921 if (mthca_is_memfree(dev))
922 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); 922 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
923 else 923 else
924 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); 924 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index fdfc2b788e64..c9c94686e7a9 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -601,7 +601,7 @@ err_disable:
601 601
602static int __devinit mthca_init_hca(struct mthca_dev *mdev) 602static int __devinit mthca_init_hca(struct mthca_dev *mdev)
603{ 603{
604 if (mdev->hca_type == ARBEL_NATIVE) 604 if (mthca_is_memfree(mdev))
605 return mthca_init_arbel(mdev); 605 return mthca_init_arbel(mdev);
606 else 606 else
607 return mthca_init_tavor(mdev); 607 return mthca_init_tavor(mdev);
@@ -835,7 +835,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
835 835
836 mthca_CLOSE_HCA(mdev, 0, &status); 836 mthca_CLOSE_HCA(mdev, 0, &status);
837 837
838 if (mdev->hca_type == ARBEL_NATIVE) { 838 if (mthca_is_memfree(mdev)) {
839 mthca_free_icm_table(mdev, mdev->cq_table.table); 839 mthca_free_icm_table(mdev, mdev->cq_table.table);
840 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 840 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
841 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); 841 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
@@ -939,7 +939,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
939 mdev->pdev = pdev; 939 mdev->pdev = pdev;
940 mdev->hca_type = id->driver_data; 940 mdev->hca_type = id->driver_data;
941 941
942 if (mdev->hca_type == ARBEL_NATIVE && !mthca_memfree_warned++) 942 if (mthca_is_memfree(mdev) && !mthca_memfree_warned++)
943 mthca_warn(mdev, "Warning: native MT25208 mode support is incomplete. " 943 mthca_warn(mdev, "Warning: native MT25208 mode support is incomplete. "
944 "Your HCA may not work properly.\n"); 944 "Your HCA may not work properly.\n");
945 945
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index ea45de8c5b8e..5824b6d3769f 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_init_db_tab(struct mthca_dev *dev)
472{ 472{
473 int i; 473 int i;
474 474
475 if (dev->hca_type != ARBEL_NATIVE) 475 if (!mthca_is_memfree(dev))
476 return 0; 476 return 0;
477 477
478 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); 478 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
@@ -504,7 +504,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
504 int i; 504 int i;
505 u8 status; 505 u8 status;
506 506
507 if (dev->hca_type != ARBEL_NATIVE) 507 if (!mthca_is_memfree(dev))
508 return; 508 return;
509 509
510 /* 510 /*
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index a85b503b8522..8960fc2306be 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -181,7 +181,7 @@ static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order,
181 if (seg == -1) 181 if (seg == -1)
182 return -1; 182 return -1;
183 183
184 if (dev->hca_type == ARBEL_NATIVE) 184 if (mthca_is_memfree(dev))
185 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, 185 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
186 seg + (1 << order) - 1)) { 186 seg + (1 << order) - 1)) {
187 mthca_buddy_free(buddy, seg, order); 187 mthca_buddy_free(buddy, seg, order);
@@ -196,7 +196,7 @@ static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order,
196{ 196{
197 mthca_buddy_free(buddy, seg, order); 197 mthca_buddy_free(buddy, seg, order);
198 198
199 if (dev->hca_type == ARBEL_NATIVE) 199 if (mthca_is_memfree(dev))
200 mthca_table_put_range(dev, dev->mr_table.mtt_table, seg, 200 mthca_table_put_range(dev, dev->mr_table.mtt_table, seg,
201 seg + (1 << order) - 1); 201 seg + (1 << order) - 1);
202} 202}
@@ -223,7 +223,7 @@ static inline u32 arbel_key_to_hw_index(u32 key)
223 223
224static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) 224static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
225{ 225{
226 if (dev->hca_type == ARBEL_NATIVE) 226 if (mthca_is_memfree(dev))
227 return arbel_hw_index_to_key(ind); 227 return arbel_hw_index_to_key(ind);
228 else 228 else
229 return tavor_hw_index_to_key(ind); 229 return tavor_hw_index_to_key(ind);
@@ -231,7 +231,7 @@ static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
231 231
232static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) 232static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
233{ 233{
234 if (dev->hca_type == ARBEL_NATIVE) 234 if (mthca_is_memfree(dev))
235 return arbel_key_to_hw_index(key); 235 return arbel_key_to_hw_index(key);
236 else 236 else
237 return tavor_key_to_hw_index(key); 237 return tavor_key_to_hw_index(key);
@@ -254,7 +254,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
254 return -ENOMEM; 254 return -ENOMEM;
255 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 255 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
256 256
257 if (dev->hca_type == ARBEL_NATIVE) { 257 if (mthca_is_memfree(dev)) {
258 err = mthca_table_get(dev, dev->mr_table.mpt_table, key); 258 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
259 if (err) 259 if (err)
260 goto err_out_mpt_free; 260 goto err_out_mpt_free;
@@ -299,7 +299,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
299 return err; 299 return err;
300 300
301err_out_table: 301err_out_table:
302 if (dev->hca_type == ARBEL_NATIVE) 302 if (mthca_is_memfree(dev))
303 mthca_table_put(dev, dev->mr_table.mpt_table, key); 303 mthca_table_put(dev, dev->mr_table.mpt_table, key);
304 304
305err_out_mpt_free: 305err_out_mpt_free:
@@ -329,7 +329,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
329 return -ENOMEM; 329 return -ENOMEM;
330 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 330 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
331 331
332 if (dev->hca_type == ARBEL_NATIVE) { 332 if (mthca_is_memfree(dev)) {
333 err = mthca_table_get(dev, dev->mr_table.mpt_table, key); 333 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
334 if (err) 334 if (err)
335 goto err_out_mpt_free; 335 goto err_out_mpt_free;
@@ -437,7 +437,7 @@ err_out_free_mtt:
437 mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy); 437 mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy);
438 438
439err_out_table: 439err_out_table:
440 if (dev->hca_type == ARBEL_NATIVE) 440 if (mthca_is_memfree(dev))
441 mthca_table_put(dev, dev->mr_table.mpt_table, key); 441 mthca_table_put(dev, dev->mr_table.mpt_table, key);
442 442
443err_out_mpt_free: 443err_out_mpt_free:
@@ -452,7 +452,7 @@ static void mthca_free_region(struct mthca_dev *dev, u32 lkey, int order,
452 if (order >= 0) 452 if (order >= 0)
453 mthca_free_mtt(dev, first_seg, order, buddy); 453 mthca_free_mtt(dev, first_seg, order, buddy);
454 454
455 if (dev->hca_type == ARBEL_NATIVE) 455 if (mthca_is_memfree(dev))
456 mthca_table_put(dev, dev->mr_table.mpt_table, 456 mthca_table_put(dev, dev->mr_table.mpt_table,
457 arbel_key_to_hw_index(lkey)); 457 arbel_key_to_hw_index(lkey));
458 458
@@ -498,7 +498,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
498 return -EINVAL; 498 return -EINVAL;
499 499
500 /* For Arbel, all MTTs must fit in the same page. */ 500 /* For Arbel, all MTTs must fit in the same page. */
501 if (dev->hca_type == ARBEL_NATIVE && 501 if (mthca_is_memfree(dev) &&
502 mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE) 502 mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
503 return -EINVAL; 503 return -EINVAL;
504 504
@@ -511,7 +511,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
511 idx = key & (dev->limits.num_mpts - 1); 511 idx = key & (dev->limits.num_mpts - 1);
512 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); 512 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
513 513
514 if (dev->hca_type == ARBEL_NATIVE) { 514 if (mthca_is_memfree(dev)) {
515 err = mthca_table_get(dev, dev->mr_table.mpt_table, key); 515 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
516 if (err) 516 if (err)
517 goto err_out_mpt_free; 517 goto err_out_mpt_free;
@@ -534,7 +534,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
534 534
535 mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE; 535 mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE;
536 536
537 if (dev->hca_type == ARBEL_NATIVE) { 537 if (mthca_is_memfree(dev)) {
538 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, 538 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
539 mr->first_seg); 539 mr->first_seg);
540 BUG_ON(!mr->mem.arbel.mtts); 540 BUG_ON(!mr->mem.arbel.mtts);
@@ -596,7 +596,7 @@ err_out_free_mtt:
596 dev->mr_table.fmr_mtt_buddy); 596 dev->mr_table.fmr_mtt_buddy);
597 597
598err_out_table: 598err_out_table:
599 if (dev->hca_type == ARBEL_NATIVE) 599 if (mthca_is_memfree(dev))
600 mthca_table_put(dev, dev->mr_table.mpt_table, key); 600 mthca_table_put(dev, dev->mr_table.mpt_table, key);
601 601
602err_out_mpt_free: 602err_out_mpt_free:
@@ -765,7 +765,7 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
765 if (err) 765 if (err)
766 return err; 766 return err;
767 767
768 if (dev->hca_type != ARBEL_NATIVE && 768 if (!mthca_is_memfree(dev) &&
769 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) 769 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
770 dev->limits.fmr_reserved_mtts = 0; 770 dev->limits.fmr_reserved_mtts = 0;
771 else 771 else
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index fd3f167e6460..4fedc32d5871 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -116,11 +116,11 @@ u64 mthca_make_profile(struct mthca_dev *dev,
116 profile[i].type = i; 116 profile[i].type = i;
117 profile[i].log_num = max(ffs(profile[i].num) - 1, 0); 117 profile[i].log_num = max(ffs(profile[i].num) - 1, 0);
118 profile[i].size *= profile[i].num; 118 profile[i].size *= profile[i].num;
119 if (dev->hca_type == ARBEL_NATIVE) 119 if (mthca_is_memfree(dev))
120 profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); 120 profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
121 } 121 }
122 122
123 if (dev->hca_type == ARBEL_NATIVE) { 123 if (mthca_is_memfree(dev)) {
124 mem_base = 0; 124 mem_base = 0;
125 mem_avail = dev_lim->hca.arbel.max_icm_sz; 125 mem_avail = dev_lim->hca.arbel.max_icm_sz;
126 } else { 126 } else {
@@ -165,7 +165,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
165 (unsigned long long) profile[i].size); 165 (unsigned long long) profile[i].size);
166 } 166 }
167 167
168 if (dev->hca_type == ARBEL_NATIVE) 168 if (mthca_is_memfree(dev))
169 mthca_dbg(dev, "HCA context memory: reserving %d KB\n", 169 mthca_dbg(dev, "HCA context memory: reserving %d KB\n",
170 (int) (total_size >> 10)); 170 (int) (total_size >> 10));
171 else 171 else
@@ -267,7 +267,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
267 * out of the MR pool. They don't use additional memory, but 267 * out of the MR pool. They don't use additional memory, but
268 * we assign them as part of the HCA profile anyway. 268 * we assign them as part of the HCA profile anyway.
269 */ 269 */
270 if (dev->hca_type == ARBEL_NATIVE) 270 if (mthca_is_memfree(dev))
271 dev->limits.fmr_reserved_mtts = 0; 271 dev->limits.fmr_reserved_mtts = 0;
272 else 272 else
273 dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts; 273 dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 28199e42b36f..501c9cc4e1a1 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -625,7 +625,7 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
625 if (!mdev) 625 if (!mdev)
626 return 0; 626 return 0;
627 627
628 if (mdev->hca_type == ARBEL_NATIVE) { 628 if (mthca_is_memfree(mdev)) {
629 list_for_each_entry(fmr, fmr_list, list) 629 list_for_each_entry(fmr, fmr_list, list)
630 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); 630 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
631 631
@@ -710,7 +710,7 @@ int mthca_register_device(struct mthca_dev *dev)
710 dev->ib_dev.alloc_fmr = mthca_alloc_fmr; 710 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
711 dev->ib_dev.unmap_fmr = mthca_unmap_fmr; 711 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
712 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr; 712 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
713 if (dev->hca_type == ARBEL_NATIVE) 713 if (mthca_is_memfree(dev))
714 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr; 714 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
715 else 715 else
716 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr; 716 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
@@ -720,7 +720,7 @@ int mthca_register_device(struct mthca_dev *dev)
720 dev->ib_dev.detach_mcast = mthca_multicast_detach; 720 dev->ib_dev.detach_mcast = mthca_multicast_detach;
721 dev->ib_dev.process_mad = mthca_process_mad; 721 dev->ib_dev.process_mad = mthca_process_mad;
722 722
723 if (dev->hca_type == ARBEL_NATIVE) { 723 if (mthca_is_memfree(dev)) {
724 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq; 724 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
725 dev->ib_dev.post_send = mthca_arbel_post_send; 725 dev->ib_dev.post_send = mthca_arbel_post_send;
726 dev->ib_dev.post_recv = mthca_arbel_post_receive; 726 dev->ib_dev.post_recv = mthca_arbel_post_receive;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 997a34a2b2be..0db4c9761611 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -639,7 +639,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
639 else if (attr_mask & IB_QP_PATH_MTU) 639 else if (attr_mask & IB_QP_PATH_MTU)
640 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 640 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
641 641
642 if (dev->hca_type == ARBEL_NATIVE) { 642 if (mthca_is_memfree(dev)) {
643 qp_context->rq_size_stride = 643 qp_context->rq_size_stride =
644 ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); 644 ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4);
645 qp_context->sq_size_stride = 645 qp_context->sq_size_stride =
@@ -731,7 +731,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
731 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); 731 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
732 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 732 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
733 733
734 if (dev->hca_type == ARBEL_NATIVE) { 734 if (mthca_is_memfree(dev)) {
735 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); 735 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
736 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 736 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
737 } 737 }
@@ -822,7 +822,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
822 822
823 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); 823 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
824 824
825 if (dev->hca_type == ARBEL_NATIVE) 825 if (mthca_is_memfree(dev))
826 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 826 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
827 827
828 if (attr_mask & IB_QP_QKEY) { 828 if (attr_mask & IB_QP_QKEY) {
@@ -897,7 +897,7 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
897 size += 2 * sizeof (struct mthca_data_seg); 897 size += 2 * sizeof (struct mthca_data_seg);
898 break; 898 break;
899 case UD: 899 case UD:
900 if (dev->hca_type == ARBEL_NATIVE) 900 if (mthca_is_memfree(dev))
901 size += sizeof (struct mthca_arbel_ud_seg); 901 size += sizeof (struct mthca_arbel_ud_seg);
902 else 902 else
903 size += sizeof (struct mthca_tavor_ud_seg); 903 size += sizeof (struct mthca_tavor_ud_seg);
@@ -1016,7 +1016,7 @@ static int mthca_alloc_memfree(struct mthca_dev *dev,
1016{ 1016{
1017 int ret = 0; 1017 int ret = 0;
1018 1018
1019 if (dev->hca_type == ARBEL_NATIVE) { 1019 if (mthca_is_memfree(dev)) {
1020 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1020 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1021 if (ret) 1021 if (ret)
1022 return ret; 1022 return ret;
@@ -1057,7 +1057,7 @@ err_qpc:
1057static void mthca_free_memfree(struct mthca_dev *dev, 1057static void mthca_free_memfree(struct mthca_dev *dev,
1058 struct mthca_qp *qp) 1058 struct mthca_qp *qp)
1059{ 1059{
1060 if (dev->hca_type == ARBEL_NATIVE) { 1060 if (mthca_is_memfree(dev)) {
1061 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1061 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1062 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1062 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1063 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1063 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
@@ -1104,7 +1104,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1104 return ret; 1104 return ret;
1105 } 1105 }
1106 1106
1107 if (dev->hca_type == ARBEL_NATIVE) { 1107 if (mthca_is_memfree(dev)) {
1108 for (i = 0; i < qp->rq.max; ++i) { 1108 for (i = 0; i < qp->rq.max; ++i) {
1109 wqe = get_recv_wqe(qp, i); 1109 wqe = get_recv_wqe(qp, i);
1110 wqe->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1110 wqe->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
@@ -1127,7 +1127,7 @@ static void mthca_align_qp_size(struct mthca_dev *dev, struct mthca_qp *qp)
1127{ 1127{
1128 int i; 1128 int i;
1129 1129
1130 if (dev->hca_type != ARBEL_NATIVE) 1130 if (!mthca_is_memfree(dev))
1131 return; 1131 return;
1132 1132
1133 for (i = 0; 1 << i < qp->rq.max; ++i) 1133 for (i = 0; 1 << i < qp->rq.max; ++i)
@@ -2011,7 +2011,7 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2011 else 2011 else
2012 next = get_recv_wqe(qp, index); 2012 next = get_recv_wqe(qp, index);
2013 2013
2014 if (dev->hca_type == ARBEL_NATIVE) 2014 if (mthca_is_memfree(dev))
2015 *dbd = 1; 2015 *dbd = 1;
2016 else 2016 else
2017 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2017 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));