aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/main.c')
-rw-r--r--drivers/net/mlx4/main.c287
1 files changed, 277 insertions, 10 deletions
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..468921b8f4b6 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = {
85 .num_mtt = 1 << 20, 85 .num_mtt = 1 << 20,
86}; 86};
87 87
88static int log_num_mac = 2;
89module_param_named(log_num_mac, log_num_mac, int, 0444);
90MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92static int log_num_vlan;
93module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96static int use_prio;
97module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)");
100
101static int mlx4_check_port_params(struct mlx4_dev *dev,
102 enum mlx4_port_type *port_type)
103{
104 int i;
105
106 for (i = 0; i < dev->caps.num_ports - 1; i++) {
107 if (port_type[i] != port_type[i+1] &&
108 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
109 mlx4_err(dev, "Only same port types supported "
110 "on this HCA, aborting.\n");
111 return -EINVAL;
112 }
113 }
114 if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
115 (port_type[1] == MLX4_PORT_TYPE_IB)) {
116 mlx4_err(dev, "eth-ib configuration is not supported.\n");
117 return -EINVAL;
118 }
119
120 for (i = 0; i < dev->caps.num_ports; i++) {
121 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
122 mlx4_err(dev, "Requested port type for port %d is not "
123 "supported on this HCA\n", i + 1);
124 return -EINVAL;
125 }
126 }
127 return 0;
128}
129
130static void mlx4_set_port_mask(struct mlx4_dev *dev)
131{
132 int i;
133
134 dev->caps.port_mask = 0;
135 for (i = 1; i <= dev->caps.num_ports; ++i)
136 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
137 dev->caps.port_mask |= 1 << (i - 1);
138}
88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 139static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 140{
90 int err; 141 int err;
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
120 dev->caps.num_ports = dev_cap->num_ports; 171 dev->caps.num_ports = dev_cap->num_ports;
121 for (i = 1; i <= dev->caps.num_ports; ++i) { 172 for (i = 1; i <= dev->caps.num_ports; ++i) {
122 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 173 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; 174 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 175 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 176 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 177 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
178 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
179 dev->caps.def_mac[i] = dev_cap->def_mac[i];
180 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
127 } 181 }
128 182
129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 183 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 188 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz; 189 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 190 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 191 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 192 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 193 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
163 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 216 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
164 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 217 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
165 218
219 dev->caps.log_num_macs = log_num_mac;
220 dev->caps.log_num_vlans = log_num_vlan;
221 dev->caps.log_num_prios = use_prio ? 3 : 0;
222
223 for (i = 1; i <= dev->caps.num_ports; ++i) {
224 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
225 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
226 else
227 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
228
229 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
230 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
231 mlx4_warn(dev, "Requested number of MACs is too much "
232 "for port %d, reducing to %d.\n",
233 i, 1 << dev->caps.log_num_macs);
234 }
235 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
236 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
237 mlx4_warn(dev, "Requested number of VLANs is too much "
238 "for port %d, reducing to %d.\n",
239 i, 1 << dev->caps.log_num_vlans);
240 }
241 }
242
243 mlx4_set_port_mask(dev);
244
245 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
246 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
247 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
248 (1 << dev->caps.log_num_macs) *
249 (1 << dev->caps.log_num_vlans) *
250 (1 << dev->caps.log_num_prios) *
251 dev->caps.num_ports;
252 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
253
254 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
255 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
256 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
257 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
258
166 return 0; 259 return 0;
167} 260}
168 261
262/*
263 * Change the port configuration of the device.
264 * Every user of this function must hold the port mutex.
265 */
266static int mlx4_change_port_types(struct mlx4_dev *dev,
267 enum mlx4_port_type *port_types)
268{
269 int err = 0;
270 int change = 0;
271 int port;
272
273 for (port = 0; port < dev->caps.num_ports; port++) {
274 if (port_types[port] != dev->caps.port_type[port + 1]) {
275 change = 1;
276 dev->caps.port_type[port + 1] = port_types[port];
277 }
278 }
279 if (change) {
280 mlx4_unregister_device(dev);
281 for (port = 1; port <= dev->caps.num_ports; port++) {
282 mlx4_CLOSE_PORT(dev, port);
283 err = mlx4_SET_PORT(dev, port);
284 if (err) {
285 mlx4_err(dev, "Failed to set port %d, "
286 "aborting\n", port);
287 goto out;
288 }
289 }
290 mlx4_set_port_mask(dev);
291 err = mlx4_register_device(dev);
292 }
293
294out:
295 return err;
296}
297
298static ssize_t show_port_type(struct device *dev,
299 struct device_attribute *attr,
300 char *buf)
301{
302 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
303 port_attr);
304 struct mlx4_dev *mdev = info->dev;
305
306 return sprintf(buf, "%s\n",
307 mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
308 "ib" : "eth");
309}
310
311static ssize_t set_port_type(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
316 port_attr);
317 struct mlx4_dev *mdev = info->dev;
318 struct mlx4_priv *priv = mlx4_priv(mdev);
319 enum mlx4_port_type types[MLX4_MAX_PORTS];
320 int i;
321 int err = 0;
322
323 if (!strcmp(buf, "ib\n"))
324 info->tmp_type = MLX4_PORT_TYPE_IB;
325 else if (!strcmp(buf, "eth\n"))
326 info->tmp_type = MLX4_PORT_TYPE_ETH;
327 else {
328 mlx4_err(mdev, "%s is not supported port type\n", buf);
329 return -EINVAL;
330 }
331
332 mutex_lock(&priv->port_mutex);
333 for (i = 0; i < mdev->caps.num_ports; i++)
334 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
335 mdev->caps.port_type[i+1];
336
337 err = mlx4_check_port_params(mdev, types);
338 if (err)
339 goto out;
340
341 for (i = 1; i <= mdev->caps.num_ports; i++)
342 priv->port[i].tmp_type = 0;
343
344 err = mlx4_change_port_types(mdev, types);
345
346out:
347 mutex_unlock(&priv->port_mutex);
348 return err ? err : count;
349}
350
169static int mlx4_load_fw(struct mlx4_dev *dev) 351static int mlx4_load_fw(struct mlx4_dev *dev)
170{ 352{
171 struct mlx4_priv *priv = mlx4_priv(dev); 353 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
211 ((u64) (MLX4_CMPT_TYPE_QP * 393 ((u64) (MLX4_CMPT_TYPE_QP *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 394 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_qps, 395 cmpt_entry_sz, dev->caps.num_qps,
214 dev->caps.reserved_qps, 0, 0); 396 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
397 0, 0);
215 if (err) 398 if (err)
216 goto err; 399 goto err;
217 400
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
336 init_hca->qpc_base, 519 init_hca->qpc_base,
337 dev_cap->qpc_entry_sz, 520 dev_cap->qpc_entry_sz,
338 dev->caps.num_qps, 521 dev->caps.num_qps,
339 dev->caps.reserved_qps, 0, 0); 522 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
523 0, 0);
340 if (err) { 524 if (err) {
341 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 525 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
342 goto err_unmap_dmpt; 526 goto err_unmap_dmpt;
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
346 init_hca->auxc_base, 530 init_hca->auxc_base,
347 dev_cap->aux_entry_sz, 531 dev_cap->aux_entry_sz,
348 dev->caps.num_qps, 532 dev->caps.num_qps,
349 dev->caps.reserved_qps, 0, 0); 533 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
534 0, 0);
350 if (err) { 535 if (err) {
351 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 536 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
352 goto err_unmap_qp; 537 goto err_unmap_qp;
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
356 init_hca->altc_base, 541 init_hca->altc_base,
357 dev_cap->altc_entry_sz, 542 dev_cap->altc_entry_sz,
358 dev->caps.num_qps, 543 dev->caps.num_qps,
359 dev->caps.reserved_qps, 0, 0); 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
545 0, 0);
360 if (err) { 546 if (err) {
361 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 547 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
362 goto err_unmap_auxc; 548 goto err_unmap_auxc;
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
366 init_hca->rdmarc_base, 552 init_hca->rdmarc_base,
367 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 553 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
368 dev->caps.num_qps, 554 dev->caps.num_qps,
369 dev->caps.reserved_qps, 0, 0); 555 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
556 0, 0);
370 if (err) { 557 if (err) {
371 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 558 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
372 goto err_unmap_altc; 559 goto err_unmap_altc;
@@ -565,6 +752,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
565{ 752{
566 struct mlx4_priv *priv = mlx4_priv(dev); 753 struct mlx4_priv *priv = mlx4_priv(dev);
567 int err; 754 int err;
755 int port;
568 756
569 err = mlx4_init_uar_table(dev); 757 err = mlx4_init_uar_table(dev);
570 if (err) { 758 if (err) {
@@ -663,8 +851,20 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
663 goto err_qp_table_free; 851 goto err_qp_table_free;
664 } 852 }
665 853
854 for (port = 1; port <= dev->caps.num_ports; port++) {
855 err = mlx4_SET_PORT(dev, port);
856 if (err) {
857 mlx4_err(dev, "Failed to set port %d, aborting\n",
858 port);
859 goto err_mcg_table_free;
860 }
861 }
862
666 return 0; 863 return 0;
667 864
865err_mcg_table_free:
866 mlx4_cleanup_mcg_table(dev);
867
668err_qp_table_free: 868err_qp_table_free:
669 mlx4_cleanup_qp_table(dev); 869 mlx4_cleanup_qp_table(dev);
670 870
@@ -728,11 +928,45 @@ no_msi:
728 priv->eq_table.eq[i].irq = dev->pdev->irq; 928 priv->eq_table.eq[i].irq = dev->pdev->irq;
729} 929}
730 930
931static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
932{
933 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
934 int err = 0;
935
936 info->dev = dev;
937 info->port = port;
938 mlx4_init_mac_table(dev, &info->mac_table);
939 mlx4_init_vlan_table(dev, &info->vlan_table);
940
941 sprintf(info->dev_name, "mlx4_port%d", port);
942 info->port_attr.attr.name = info->dev_name;
943 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
944 info->port_attr.show = show_port_type;
945 info->port_attr.store = set_port_type;
946
947 err = device_create_file(&dev->pdev->dev, &info->port_attr);
948 if (err) {
949 mlx4_err(dev, "Failed to create file for port %d\n", port);
950 info->port = -1;
951 }
952
953 return err;
954}
955
956static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
957{
958 if (info->port < 0)
959 return;
960
961 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
962}
963
731static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 964static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
732{ 965{
733 struct mlx4_priv *priv; 966 struct mlx4_priv *priv;
734 struct mlx4_dev *dev; 967 struct mlx4_dev *dev;
735 int err; 968 int err;
969 int port;
736 970
737 printk(KERN_INFO PFX "Initializing %s\n", 971 printk(KERN_INFO PFX "Initializing %s\n",
738 pci_name(pdev)); 972 pci_name(pdev));
@@ -807,6 +1041,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
807 INIT_LIST_HEAD(&priv->ctx_list); 1041 INIT_LIST_HEAD(&priv->ctx_list);
808 spin_lock_init(&priv->ctx_lock); 1042 spin_lock_init(&priv->ctx_lock);
809 1043
1044 mutex_init(&priv->port_mutex);
1045
810 INIT_LIST_HEAD(&priv->pgdir_list); 1046 INIT_LIST_HEAD(&priv->pgdir_list);
811 mutex_init(&priv->pgdir_mutex); 1047 mutex_init(&priv->pgdir_mutex);
812 1048
@@ -842,15 +1078,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
842 if (err) 1078 if (err)
843 goto err_close; 1079 goto err_close;
844 1080
1081 for (port = 1; port <= dev->caps.num_ports; port++) {
1082 err = mlx4_init_port_info(dev, port);
1083 if (err)
1084 goto err_port;
1085 }
1086
845 err = mlx4_register_device(dev); 1087 err = mlx4_register_device(dev);
846 if (err) 1088 if (err)
847 goto err_cleanup; 1089 goto err_port;
848 1090
849 pci_set_drvdata(pdev, dev); 1091 pci_set_drvdata(pdev, dev);
850 1092
851 return 0; 1093 return 0;
852 1094
853err_cleanup: 1095err_port:
1096 for (port = 1; port <= dev->caps.num_ports; port++)
1097 mlx4_cleanup_port_info(&priv->port[port]);
1098
854 mlx4_cleanup_mcg_table(dev); 1099 mlx4_cleanup_mcg_table(dev);
855 mlx4_cleanup_qp_table(dev); 1100 mlx4_cleanup_qp_table(dev);
856 mlx4_cleanup_srq_table(dev); 1101 mlx4_cleanup_srq_table(dev);
@@ -907,8 +1152,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
907 if (dev) { 1152 if (dev) {
908 mlx4_unregister_device(dev); 1153 mlx4_unregister_device(dev);
909 1154
910 for (p = 1; p <= dev->caps.num_ports; ++p) 1155 for (p = 1; p <= dev->caps.num_ports; p++) {
1156 mlx4_cleanup_port_info(&priv->port[p]);
911 mlx4_CLOSE_PORT(dev, p); 1157 mlx4_CLOSE_PORT(dev, p);
1158 }
912 1159
913 mlx4_cleanup_mcg_table(dev); 1160 mlx4_cleanup_mcg_table(dev);
914 mlx4_cleanup_qp_table(dev); 1161 mlx4_cleanup_qp_table(dev);
@@ -948,6 +1195,8 @@ static struct pci_device_id mlx4_pci_table[] = {
948 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1195 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
949 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 1196 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
950 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1197 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1198 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1199 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
951 { 0, } 1200 { 0, }
952}; 1201};
953 1202
@@ -960,10 +1209,28 @@ static struct pci_driver mlx4_driver = {
960 .remove = __devexit_p(mlx4_remove_one) 1209 .remove = __devexit_p(mlx4_remove_one)
961}; 1210};
962 1211
1212static int __init mlx4_verify_params(void)
1213{
1214 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1215 printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
1216 return -1;
1217 }
1218
1219 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1220 printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1221 return -1;
1222 }
1223
1224 return 0;
1225}
1226
963static int __init mlx4_init(void) 1227static int __init mlx4_init(void)
964{ 1228{
965 int ret; 1229 int ret;
966 1230
1231 if (mlx4_verify_params())
1232 return -EINVAL;
1233
967 ret = mlx4_catas_init(); 1234 ret = mlx4_catas_init();
968 if (ret) 1235 if (ret)
969 return ret; 1236 return ret;