aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/main.c')
-rw-r--r--drivers/net/mlx4/main.c127
1 files changed, 123 insertions, 4 deletions
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 2765a3ce9c24..3814fc9b1145 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -39,6 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h>
42 43
43#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
227 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
228 dev->caps.udp_rss = dev_cap->udp_rss; 229 dev->caps.udp_rss = dev_cap->udp_rss;
229 dev->caps.loopback_support = dev_cap->loopback_support; 230 dev->caps.loopback_support = dev_cap->loopback_support;
231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
233 dev->caps.wol = dev_cap->wol;
230 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 234 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
231 235
232 dev->caps.log_num_macs = log_num_mac; 236 dev->caps.log_num_macs = log_num_mac;
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
718 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
719} 723}
720 724
725static int map_bf_area(struct mlx4_dev *dev)
726{
727 struct mlx4_priv *priv = mlx4_priv(dev);
728 resource_size_t bf_start;
729 resource_size_t bf_len;
730 int err = 0;
731
732 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
733 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
734 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
735 if (!priv->bf_mapping)
736 err = -ENOMEM;
737
738 return err;
739}
740
741static void unmap_bf_area(struct mlx4_dev *dev)
742{
743 if (mlx4_priv(dev)->bf_mapping)
744 io_mapping_free(mlx4_priv(dev)->bf_mapping);
745}
746
721static void mlx4_close_hca(struct mlx4_dev *dev) 747static void mlx4_close_hca(struct mlx4_dev *dev)
722{ 748{
749 unmap_bf_area(dev);
723 mlx4_CLOSE_HCA(dev, 0); 750 mlx4_CLOSE_HCA(dev, 0);
724 mlx4_free_icms(dev); 751 mlx4_free_icms(dev);
725 mlx4_UNMAP_FA(dev); 752 mlx4_UNMAP_FA(dev);
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
772 goto err_stop_fw; 799 goto err_stop_fw;
773 } 800 }
774 801
802 if (map_bf_area(dev))
803 mlx4_dbg(dev, "Failed to map blue flame area\n");
804
775 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 805 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
776 806
777 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 807 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
@@ -802,6 +832,7 @@ err_free_icm:
802 mlx4_free_icms(dev); 832 mlx4_free_icms(dev);
803 833
804err_stop_fw: 834err_stop_fw:
835 unmap_bf_area(dev);
805 mlx4_UNMAP_FA(dev); 836 mlx4_UNMAP_FA(dev);
806 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 837 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
807 838
@@ -913,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
913 } 944 }
914 945
915 for (port = 1; port <= dev->caps.num_ports; port++) { 946 for (port = 1; port <= dev->caps.num_ports; port++) {
947 enum mlx4_port_type port_type = 0;
948 mlx4_SENSE_PORT(dev, port, &port_type);
949 if (port_type)
950 dev->caps.port_type[port] = port_type;
916 ib_port_default_caps = 0; 951 ib_port_default_caps = 0;
917 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); 952 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
918 if (err) 953 if (err)
@@ -927,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
927 goto err_mcg_table_free; 962 goto err_mcg_table_free;
928 } 963 }
929 } 964 }
965 mlx4_set_port_mask(dev);
930 966
931 return 0; 967 return 0;
932 968
@@ -969,13 +1005,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
969{ 1005{
970 struct mlx4_priv *priv = mlx4_priv(dev); 1006 struct mlx4_priv *priv = mlx4_priv(dev);
971 struct msix_entry *entries; 1007 struct msix_entry *entries;
972 int nreq; 1008 int nreq = min_t(int, dev->caps.num_ports *
1009 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1010 + MSIX_LEGACY_SZ, MAX_MSIX);
973 int err; 1011 int err;
974 int i; 1012 int i;
975 1013
976 if (msi_x) { 1014 if (msi_x) {
977 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1015 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
978 num_possible_cpus() + 1); 1016 nreq);
979 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1017 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
980 if (!entries) 1018 if (!entries)
981 goto no_msi; 1019 goto no_msi;
@@ -998,7 +1036,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
998 goto no_msi; 1036 goto no_msi;
999 } 1037 }
1000 1038
1001 dev->caps.num_comp_vectors = nreq - 1; 1039 if (nreq <
1040 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1041 /*Working in legacy mode , all EQ's shared*/
1042 dev->caps.comp_pool = 0;
1043 dev->caps.num_comp_vectors = nreq - 1;
1044 } else {
1045 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1046 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1047 }
1002 for (i = 0; i < nreq; ++i) 1048 for (i = 0; i < nreq; ++i)
1003 priv->eq_table.eq[i].irq = entries[i].vector; 1049 priv->eq_table.eq[i].irq = entries[i].vector;
1004 1050
@@ -1010,6 +1056,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1010 1056
1011no_msi: 1057no_msi:
1012 dev->caps.num_comp_vectors = 1; 1058 dev->caps.num_comp_vectors = 1;
1059 dev->caps.comp_pool = 0;
1013 1060
1014 for (i = 0; i < 2; ++i) 1061 for (i = 0; i < 2; ++i)
1015 priv->eq_table.eq[i].irq = dev->pdev->irq; 1062 priv->eq_table.eq[i].irq = dev->pdev->irq;
@@ -1049,6 +1096,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1049 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1096 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1050} 1097}
1051 1098
1099static int mlx4_init_steering(struct mlx4_dev *dev)
1100{
1101 struct mlx4_priv *priv = mlx4_priv(dev);
1102 int num_entries = dev->caps.num_ports;
1103 int i, j;
1104
1105 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1106 if (!priv->steer)
1107 return -ENOMEM;
1108
1109 for (i = 0; i < num_entries; i++) {
1110 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1111 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1112 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1113 }
1114 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1115 }
1116 return 0;
1117}
1118
1119static void mlx4_clear_steering(struct mlx4_dev *dev)
1120{
1121 struct mlx4_priv *priv = mlx4_priv(dev);
1122 struct mlx4_steer_index *entry, *tmp_entry;
1123 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1124 int num_entries = dev->caps.num_ports;
1125 int i, j;
1126
1127 for (i = 0; i < num_entries; i++) {
1128 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1129 list_for_each_entry_safe(pqp, tmp_pqp,
1130 &priv->steer[i].promisc_qps[j],
1131 list) {
1132 list_del(&pqp->list);
1133 kfree(pqp);
1134 }
1135 list_for_each_entry_safe(entry, tmp_entry,
1136 &priv->steer[i].steer_entries[j],
1137 list) {
1138 list_del(&entry->list);
1139 list_for_each_entry_safe(pqp, tmp_pqp,
1140 &entry->duplicates,
1141 list) {
1142 list_del(&pqp->list);
1143 kfree(pqp);
1144 }
1145 kfree(entry);
1146 }
1147 }
1148 }
1149 kfree(priv->steer);
1150}
1151
1052static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1152static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1053{ 1153{
1054 struct mlx4_priv *priv; 1154 struct mlx4_priv *priv;
@@ -1109,6 +1209,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1109 } 1209 }
1110 } 1210 }
1111 1211
1212 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1213 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1214
1112 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1215 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1113 if (!priv) { 1216 if (!priv) {
1114 dev_err(&pdev->dev, "Device struct alloc failed, " 1217 dev_err(&pdev->dev, "Device struct alloc failed, "
@@ -1127,6 +1230,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1127 INIT_LIST_HEAD(&priv->pgdir_list); 1230 INIT_LIST_HEAD(&priv->pgdir_list);
1128 mutex_init(&priv->pgdir_mutex); 1231 mutex_init(&priv->pgdir_mutex);
1129 1232
1233 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
1234
1235 INIT_LIST_HEAD(&priv->bf_list);
1236 mutex_init(&priv->bf_mutex);
1237
1130 /* 1238 /*
1131 * Now reset the HCA before we touch the PCI capabilities or 1239 * Now reset the HCA before we touch the PCI capabilities or
1132 * attempt a firmware command, since a boot ROM may have left 1240 * attempt a firmware command, since a boot ROM may have left
@@ -1151,8 +1259,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1151 if (err) 1259 if (err)
1152 goto err_close; 1260 goto err_close;
1153 1261
1262 priv->msix_ctl.pool_bm = 0;
1263 spin_lock_init(&priv->msix_ctl.pool_lock);
1264
1154 mlx4_enable_msi_x(dev); 1265 mlx4_enable_msi_x(dev);
1155 1266
1267 err = mlx4_init_steering(dev);
1268 if (err)
1269 goto err_free_eq;
1270
1156 err = mlx4_setup_hca(dev); 1271 err = mlx4_setup_hca(dev);
1157 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1272 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1158 dev->flags &= ~MLX4_FLAG_MSI_X; 1273 dev->flags &= ~MLX4_FLAG_MSI_X;
@@ -1161,7 +1276,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1161 } 1276 }
1162 1277
1163 if (err) 1278 if (err)
1164 goto err_free_eq; 1279 goto err_steer;
1165 1280
1166 for (port = 1; port <= dev->caps.num_ports; port++) { 1281 for (port = 1; port <= dev->caps.num_ports; port++) {
1167 err = mlx4_init_port_info(dev, port); 1282 err = mlx4_init_port_info(dev, port);
@@ -1194,6 +1309,9 @@ err_port:
1194 mlx4_cleanup_pd_table(dev); 1309 mlx4_cleanup_pd_table(dev);
1195 mlx4_cleanup_uar_table(dev); 1310 mlx4_cleanup_uar_table(dev);
1196 1311
1312err_steer:
1313 mlx4_clear_steering(dev);
1314
1197err_free_eq: 1315err_free_eq:
1198 mlx4_free_eq_table(dev); 1316 mlx4_free_eq_table(dev);
1199 1317
@@ -1253,6 +1371,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1253 iounmap(priv->kar); 1371 iounmap(priv->kar);
1254 mlx4_uar_free(dev, &priv->driver_uar); 1372 mlx4_uar_free(dev, &priv->driver_uar);
1255 mlx4_cleanup_uar_table(dev); 1373 mlx4_cleanup_uar_table(dev);
1374 mlx4_clear_steering(dev);
1256 mlx4_free_eq_table(dev); 1375 mlx4_free_eq_table(dev);
1257 mlx4_close_hca(dev); 1376 mlx4_close_hca(dev);
1258 mlx4_cmd_cleanup(dev); 1377 mlx4_cmd_cleanup(dev);