aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/main.c')
-rw-r--r--drivers/net/mlx4/main.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index dac621b1e9fc..3dd481e77f92 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
525 goto err_unmap_aux; 525 goto err_unmap_aux;
526 } 526 }
527 527
528 err = mlx4_map_eq_icm(dev, init_hca->eqc_base); 528 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
529 init_hca->eqc_base, dev_cap->eqc_entry_sz,
530 dev->caps.num_eqs, dev->caps.num_eqs,
531 0, 0);
529 if (err) { 532 if (err) {
530 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 533 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
531 goto err_unmap_cmpt; 534 goto err_unmap_cmpt;
@@ -668,7 +671,7 @@ err_unmap_mtt:
668 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 671 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
669 672
670err_unmap_eq: 673err_unmap_eq:
671 mlx4_unmap_eq_icm(dev); 674 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
672 675
673err_unmap_cmpt: 676err_unmap_cmpt:
674 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 677 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
698 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 701 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
699 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 702 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
700 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 703 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
704 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
701 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 705 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
702 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 706 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
703 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 707 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
704 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 708 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
705 mlx4_unmap_eq_icm(dev);
706 709
707 mlx4_UNMAP_ICM_AUX(dev); 710 mlx4_UNMAP_ICM_AUX(dev);
708 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 711 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
786 return 0; 789 return 0;
787 790
788err_close: 791err_close:
789 mlx4_close_hca(dev); 792 mlx4_CLOSE_HCA(dev, 0);
790 793
791err_free_icm: 794err_free_icm:
792 mlx4_free_icms(dev); 795 mlx4_free_icms(dev);
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1070 goto err_disable_pdev; 1073 goto err_disable_pdev;
1071 } 1074 }
1072 1075
1073 err = pci_request_region(pdev, 0, DRV_NAME); 1076 err = pci_request_regions(pdev, DRV_NAME);
1074 if (err) { 1077 if (err) {
1075 dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); 1078 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1076 goto err_disable_pdev; 1079 goto err_disable_pdev;
1077 } 1080 }
1078 1081
1079 err = pci_request_region(pdev, 2, DRV_NAME);
1080 if (err) {
1081 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
1082 goto err_release_bar0;
1083 }
1084
1085 pci_set_master(pdev); 1082 pci_set_master(pdev);
1086 1083
1087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1084 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1090 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1091 if (err) { 1088 if (err) {
1092 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1089 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1093 goto err_release_bar2; 1090 goto err_release_regions;
1094 } 1091 }
1095 } 1092 }
1096 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1093 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1101 if (err) { 1098 if (err) {
1102 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1099 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1103 "aborting.\n"); 1100 "aborting.\n");
1104 goto err_release_bar2; 1101 goto err_release_regions;
1105 } 1102 }
1106 } 1103 }
1107 1104
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1110 dev_err(&pdev->dev, "Device struct alloc failed, " 1107 dev_err(&pdev->dev, "Device struct alloc failed, "
1111 "aborting.\n"); 1108 "aborting.\n");
1112 err = -ENOMEM; 1109 err = -ENOMEM;
1113 goto err_release_bar2; 1110 goto err_release_regions;
1114 } 1111 }
1115 1112
1116 dev = &priv->dev; 1113 dev = &priv->dev;
@@ -1205,11 +1202,8 @@ err_cmd:
1205err_free_dev: 1202err_free_dev:
1206 kfree(priv); 1203 kfree(priv);
1207 1204
1208err_release_bar2: 1205err_release_regions:
1209 pci_release_region(pdev, 2); 1206 pci_release_regions(pdev);
1210
1211err_release_bar0:
1212 pci_release_region(pdev, 0);
1213 1207
1214err_disable_pdev: 1208err_disable_pdev:
1215 pci_disable_device(pdev); 1209 pci_disable_device(pdev);
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1265 pci_disable_msix(pdev); 1259 pci_disable_msix(pdev);
1266 1260
1267 kfree(priv); 1261 kfree(priv);
1268 pci_release_region(pdev, 2); 1262 pci_release_regions(pdev);
1269 pci_release_region(pdev, 0);
1270 pci_disable_device(pdev); 1263 pci_disable_device(pdev);
1271 pci_set_drvdata(pdev, NULL); 1264 pci_set_drvdata(pdev, NULL);
1272 } 1265 }