aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c28
6 files changed, 128 insertions, 44 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9a9de51ecc91..8b3d0512a46b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1338,6 +1338,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1338{ 1338{
1339 struct mlx4_cmd_mailbox *mailbox; 1339 struct mlx4_cmd_mailbox *mailbox;
1340 __be32 *outbox; 1340 __be32 *outbox;
1341 u32 dword_field;
1341 int err; 1342 int err;
1342 u8 byte_field; 1343 u8 byte_field;
1343 1344
@@ -1372,10 +1373,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1372 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1373 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1373 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1374 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1374 1375
1376 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1377 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1378 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1379 } else {
1380 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1381 if (byte_field & 0x8)
1382 param->steering_mode = MLX4_STEERING_MODE_B0;
1383 else
1384 param->steering_mode = MLX4_STEERING_MODE_A0;
1385 }
1375 /* steering attributes */ 1386 /* steering attributes */
1376 if (dev->caps.steering_mode == 1387 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1377 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1378
1379 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 1388 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1380 MLX4_GET(param->log_mc_entry_sz, outbox, 1389 MLX4_GET(param->log_mc_entry_sz, outbox,
1381 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1390 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 2c2e7ade2a34..dbf2f69cc59f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -172,6 +172,7 @@ struct mlx4_init_hca_param {
172 u8 log_uar_sz; 172 u8 log_uar_sz;
173 u8 uar_page_sz; /* log pg sz in 4k chunks */ 173 u8 uar_page_sz; /* log pg sz in 4k chunks */
174 u8 fs_hash_enable_bits; 174 u8 fs_hash_enable_bits;
175 u8 steering_mode; /* for QUERY_HCA */
175 u64 dev_cap_enabled; 176 u64 dev_cap_enabled;
176}; 177};
177 178
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b2acbe7706a3..e1bafffbc3b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -85,15 +85,15 @@ static int probe_vf;
85module_param(probe_vf, int, 0644); 85module_param(probe_vf, int, 0644);
86MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
87 87
88int mlx4_log_num_mgm_entry_size = 10; 88int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
89module_param_named(log_num_mgm_entry_size, 89module_param_named(log_num_mgm_entry_size,
90 mlx4_log_num_mgm_entry_size, int, 0444); 90 mlx4_log_num_mgm_entry_size, int, 0444);
91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
92 " of qp per mcg, for example:" 92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<=" 93 " 10 gives 248.range: 7 <="
94 " log_num_mgm_entry_size <= 12." 94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed" 95 " To activate device managed"
96 " flow steering"); 96 " flow steering when available, set to -1");
97 97
98static bool enable_64b_cqe_eqe; 98static bool enable_64b_cqe_eqe;
99module_param(enable_64b_cqe_eqe, bool, 0444); 99module_param(enable_64b_cqe_eqe, bool, 0444);
@@ -281,28 +281,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
281 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 281 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
282 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 282 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
283 283
284 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
285 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
286 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
287 dev->caps.fs_log_max_ucast_qp_range_size =
288 dev_cap->fs_log_max_ucast_qp_range_size;
289 } else {
290 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
291 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
292 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
293 } else {
294 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
295
296 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
297 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
298 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
299 "set to use B0 steering. Falling back to A0 steering mode.\n");
300 }
301 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
302 }
303 mlx4_dbg(dev, "Steering mode is: %s\n",
304 mlx4_steering_mode_str(dev->caps.steering_mode));
305
306 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 284 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
307 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 285 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
308 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 286 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -493,6 +471,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
493} 471}
494EXPORT_SYMBOL(mlx4_is_slave_active); 472EXPORT_SYMBOL(mlx4_is_slave_active);
495 473
474static void slave_adjust_steering_mode(struct mlx4_dev *dev,
475 struct mlx4_dev_cap *dev_cap,
476 struct mlx4_init_hca_param *hca_param)
477{
478 dev->caps.steering_mode = hca_param->steering_mode;
479 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
480 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
481 dev->caps.fs_log_max_ucast_qp_range_size =
482 dev_cap->fs_log_max_ucast_qp_range_size;
483 } else
484 dev->caps.num_qp_per_mgm =
485 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
486
487 mlx4_dbg(dev, "Steering mode is: %s\n",
488 mlx4_steering_mode_str(dev->caps.steering_mode));
489}
490
496static int mlx4_slave_cap(struct mlx4_dev *dev) 491static int mlx4_slave_cap(struct mlx4_dev *dev)
497{ 492{
498 int err; 493 int err;
@@ -635,6 +630,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
635 dev->caps.cqe_size = 32; 630 dev->caps.cqe_size = 32;
636 } 631 }
637 632
633 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
634
638 return 0; 635 return 0;
639 636
640err_mem: 637err_mem:
@@ -1321,6 +1318,59 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1321 } 1318 }
1322} 1319}
1323 1320
1321static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1322{
1323 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1324
1325 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1326 i++) {
1327 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1328 break;
1329 }
1330
1331 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1332}
1333
1334static void choose_steering_mode(struct mlx4_dev *dev,
1335 struct mlx4_dev_cap *dev_cap)
1336{
1337 if (mlx4_log_num_mgm_entry_size == -1 &&
1338 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1339 (!mlx4_is_mfunc(dev) ||
1340 (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
1341 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1342 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1343 dev->oper_log_mgm_entry_size =
1344 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1345 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1346 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1347 dev->caps.fs_log_max_ucast_qp_range_size =
1348 dev_cap->fs_log_max_ucast_qp_range_size;
1349 } else {
1350 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1351 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1352 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1353 else {
1354 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1355
1356 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1357 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1358 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
1359 "set to use B0 steering. Falling back to A0 steering mode.\n");
1360 }
1361 dev->oper_log_mgm_entry_size =
1362 mlx4_log_num_mgm_entry_size > 0 ?
1363 mlx4_log_num_mgm_entry_size :
1364 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1365 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1366 }
1367 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
1368 "modparam log_num_mgm_entry_size = %d\n",
1369 mlx4_steering_mode_str(dev->caps.steering_mode),
1370 dev->oper_log_mgm_entry_size,
1371 mlx4_log_num_mgm_entry_size);
1372}
1373
1324static int mlx4_init_hca(struct mlx4_dev *dev) 1374static int mlx4_init_hca(struct mlx4_dev *dev)
1325{ 1375{
1326 struct mlx4_priv *priv = mlx4_priv(dev); 1376 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1360,6 +1410,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1360 goto err_stop_fw; 1410 goto err_stop_fw;
1361 } 1411 }
1362 1412
1413 choose_steering_mode(dev, &dev_cap);
1414
1363 if (mlx4_is_master(dev)) 1415 if (mlx4_is_master(dev))
1364 mlx4_parav_master_pf_caps(dev); 1416 mlx4_parav_master_pf_caps(dev);
1365 1417
@@ -2452,6 +2504,17 @@ static int __init mlx4_verify_params(void)
2452 port_type_array[0] = true; 2504 port_type_array[0] = true;
2453 } 2505 }
2454 2506
2507 if (mlx4_log_num_mgm_entry_size != -1 &&
2508 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2509 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2510 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
2511 "in legal range (-1 or %d..%d)\n",
2512 mlx4_log_num_mgm_entry_size,
2513 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2514 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2515 return -1;
2516 }
2517
2455 return 0; 2518 return 0;
2456} 2519}
2457 2520
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index e151c21baf2b..1ee4db3c6400 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,12 +54,7 @@ struct mlx4_mgm {
54 54
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 56{
57 if (dev->caps.steering_mode == 57 return 1 << dev->oper_log_mgm_entry_size;
58 MLX4_STEERING_MODE_DEVICE_MANAGED)
59 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
60 else
61 return min((1 << mlx4_log_num_mgm_entry_size),
62 MLX4_MAX_MGM_ENTRY_SIZE);
63} 58}
64 59
65int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 60int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 1cf42036d7bb..116c5c29d2d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -94,8 +94,10 @@ enum {
94}; 94};
95 95
96enum { 96enum {
97 MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, 97 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
98 MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), 98 MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
99 MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
100 MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
99 MLX4_MTT_ENTRY_PER_SEG = 8, 101 MLX4_MTT_ENTRY_PER_SEG = 8,
100}; 102};
101 103
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b05705f50f0f..561ed2a22a17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3071,6 +3071,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3071 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3071 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3072 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 3072 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3073 int err; 3073 int err;
3074 int qpn;
3074 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3075 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3075 struct _rule_hw *rule_header; 3076 struct _rule_hw *rule_header;
3076 int header_id; 3077 int header_id;
@@ -3080,13 +3081,21 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3080 return -EOPNOTSUPP; 3081 return -EOPNOTSUPP;
3081 3082
3082 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3083 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3084 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3085 err = get_res(dev, slave, qpn, RES_QP, NULL);
3086 if (err) {
3087 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3088 return err;
3089 }
3083 rule_header = (struct _rule_hw *)(ctrl + 1); 3090 rule_header = (struct _rule_hw *)(ctrl + 1);
3084 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 3091 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3085 3092
3086 switch (header_id) { 3093 switch (header_id) {
3087 case MLX4_NET_TRANS_RULE_ID_ETH: 3094 case MLX4_NET_TRANS_RULE_ID_ETH:
3088 if (validate_eth_header_mac(slave, rule_header, rlist)) 3095 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3089 return -EINVAL; 3096 err = -EINVAL;
3097 goto err_put;
3098 }
3090 break; 3099 break;
3091 case MLX4_NET_TRANS_RULE_ID_IB: 3100 case MLX4_NET_TRANS_RULE_ID_IB:
3092 break; 3101 break;
@@ -3094,14 +3103,17 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3094 case MLX4_NET_TRANS_RULE_ID_TCP: 3103 case MLX4_NET_TRANS_RULE_ID_TCP:
3095 case MLX4_NET_TRANS_RULE_ID_UDP: 3104 case MLX4_NET_TRANS_RULE_ID_UDP:
3096 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 3105 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3097 if (add_eth_header(dev, slave, inbox, rlist, header_id)) 3106 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3098 return -EINVAL; 3107 err = -EINVAL;
3108 goto err_put;
3109 }
3099 vhcr->in_modifier += 3110 vhcr->in_modifier +=
3100 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 3111 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3101 break; 3112 break;
3102 default: 3113 default:
3103 pr_err("Corrupted mailbox.\n"); 3114 pr_err("Corrupted mailbox.\n");
3104 return -EINVAL; 3115 err = -EINVAL;
3116 goto err_put;
3105 } 3117 }
3106 3118
3107 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 3119 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@@ -3109,16 +3121,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3109 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3121 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3110 MLX4_CMD_NATIVE); 3122 MLX4_CMD_NATIVE);
3111 if (err) 3123 if (err)
3112 return err; 3124 goto err_put;
3113 3125
3114 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); 3126 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3115 if (err) { 3127 if (err) {
3116 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3128 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3117 /* detach rule*/ 3129 /* detach rule*/
3118 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3130 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3119 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3131 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3120 MLX4_CMD_NATIVE); 3132 MLX4_CMD_NATIVE);
3121 } 3133 }
3134err_put:
3135 put_res(dev, slave, qpn, RES_QP);
3122 return err; 3136 return err;
3123} 3137}
3124 3138