aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/main.c62
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--include/linux/mlx4/device.h108
14 files changed, 758 insertions, 81 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3530c41fcd1f..8a3a2037b005 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
718 return ret; 718 return ret;
719} 719}
720 720
721struct mlx4_ib_steering {
722 struct list_head list;
723 u64 reg_id;
724 union ib_gid gid;
725};
726
721static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 727static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
722{ 728{
723 int err; 729 int err;
724 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 730 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
725 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 731 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
732 u64 reg_id;
733 struct mlx4_ib_steering *ib_steering = NULL;
734
735 if (mdev->dev->caps.steering_mode ==
736 MLX4_STEERING_MODE_DEVICE_MANAGED) {
737 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
738 if (!ib_steering)
739 return -ENOMEM;
740 }
726 741
727 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 742 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
728 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 743 !!(mqp->flags &
729 MLX4_PROT_IB_IPV6); 744 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
745 MLX4_PROT_IB_IPV6, &reg_id);
730 if (err) 746 if (err)
731 return err; 747 goto err_malloc;
732 748
733 err = add_gid_entry(ibqp, gid); 749 err = add_gid_entry(ibqp, gid);
734 if (err) 750 if (err)
735 goto err_add; 751 goto err_add;
736 752
753 if (ib_steering) {
754 memcpy(ib_steering->gid.raw, gid->raw, 16);
755 ib_steering->reg_id = reg_id;
756 mutex_lock(&mqp->mutex);
757 list_add(&ib_steering->list, &mqp->steering_rules);
758 mutex_unlock(&mqp->mutex);
759 }
737 return 0; 760 return 0;
738 761
739err_add: 762err_add:
740 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 763 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
764 MLX4_PROT_IB_IPV6, reg_id);
765err_malloc:
766 kfree(ib_steering);
767
741 return err; 768 return err;
742} 769}
743 770
@@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
765 u8 mac[6]; 792 u8 mac[6];
766 struct net_device *ndev; 793 struct net_device *ndev;
767 struct mlx4_ib_gid_entry *ge; 794 struct mlx4_ib_gid_entry *ge;
795 u64 reg_id = 0;
796
797 if (mdev->dev->caps.steering_mode ==
798 MLX4_STEERING_MODE_DEVICE_MANAGED) {
799 struct mlx4_ib_steering *ib_steering;
800
801 mutex_lock(&mqp->mutex);
802 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
803 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
804 list_del(&ib_steering->list);
805 break;
806 }
807 }
808 mutex_unlock(&mqp->mutex);
809 if (&ib_steering->list == &mqp->steering_rules) {
810 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
811 return -EINVAL;
812 }
813 reg_id = ib_steering->reg_id;
814 kfree(ib_steering);
815 }
768 816
769 err = mlx4_multicast_detach(mdev->dev, 817 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
770 &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 818 MLX4_PROT_IB_IPV6, reg_id);
771 if (err) 819 if (err)
772 return err; 820 return err;
773 821
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ff36655d23d3..42df4f7a6a5b 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -163,6 +163,7 @@ struct mlx4_ib_qp {
163 u8 state; 163 u8 state;
164 int mlx_type; 164 int mlx_type;
165 struct list_head gid_list; 165 struct list_head gid_list;
166 struct list_head steering_rules;
166}; 167};
167 168
168struct mlx4_ib_srq { 169struct mlx4_ib_srq {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8d4ed24aef93..6af19f6c2b11 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -495,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
495 spin_lock_init(&qp->sq.lock); 495 spin_lock_init(&qp->sq.lock);
496 spin_lock_init(&qp->rq.lock); 496 spin_lock_init(&qp->rq.lock);
497 INIT_LIST_HEAD(&qp->gid_list); 497 INIT_LIST_HEAD(&qp->gid_list);
498 INIT_LIST_HEAD(&qp->steering_rules);
498 499
499 qp->state = IB_QPS_RESET; 500 qp->state = IB_QPS_RESET;
500 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 501 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 44ff7cdb15e5..eb5ed8e39873 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -463,7 +463,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
463 err = mlx4_multicast_detach(mdev->dev, 463 err = mlx4_multicast_detach(mdev->dev,
464 &priv->rss_map.indir_qp, 464 &priv->rss_map.indir_qp,
465 mc_list, 465 mc_list,
466 MLX4_PROT_ETH); 466 MLX4_PROT_ETH,
467 mclist->reg_id);
467 if (err) 468 if (err)
468 en_err(priv, "Fail to detach multicast address\n"); 469 en_err(priv, "Fail to detach multicast address\n");
469 470
@@ -475,11 +476,14 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
475 if (mclist->action == MCLIST_ADD) { 476 if (mclist->action == MCLIST_ADD) {
476 /* attach the address */ 477 /* attach the address */
477 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 478 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
479 /* needed for B0 steering support */
478 mc_list[5] = priv->port; 480 mc_list[5] = priv->port;
479 err = mlx4_multicast_attach(mdev->dev, 481 err = mlx4_multicast_attach(mdev->dev,
480 &priv->rss_map.indir_qp, 482 &priv->rss_map.indir_qp,
481 mc_list, 0, 483 mc_list,
482 MLX4_PROT_ETH); 484 priv->port, 0,
485 MLX4_PROT_ETH,
486 &mclist->reg_id);
483 if (err) 487 if (err)
484 en_err(priv, "Fail to attach multicast address\n"); 488 en_err(priv, "Fail to attach multicast address\n");
485 489
@@ -827,9 +831,10 @@ int mlx4_en_start_port(struct net_device *dev)
827 831
828 /* Attach rx QP to bradcast address */ 832 /* Attach rx QP to bradcast address */
829 memset(&mc_list[10], 0xff, ETH_ALEN); 833 memset(&mc_list[10], 0xff, ETH_ALEN);
830 mc_list[5] = priv->port; 834 mc_list[5] = priv->port; /* needed for B0 steering support */
831 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 835 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
832 0, MLX4_PROT_ETH)) 836 priv->port, 0, MLX4_PROT_ETH,
837 &priv->broadcast_id))
833 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 838 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
834 839
835 /* Must redo promiscuous mode setup. */ 840 /* Must redo promiscuous mode setup. */
@@ -886,14 +891,14 @@ void mlx4_en_stop_port(struct net_device *dev)
886 891
887 /* Detach All multicasts */ 892 /* Detach All multicasts */
888 memset(&mc_list[10], 0xff, ETH_ALEN); 893 memset(&mc_list[10], 0xff, ETH_ALEN);
889 mc_list[5] = priv->port; 894 mc_list[5] = priv->port; /* needed for B0 steering support */
890 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 895 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
891 MLX4_PROT_ETH); 896 MLX4_PROT_ETH, priv->broadcast_id);
892 list_for_each_entry(mclist, &priv->curr_list, list) { 897 list_for_each_entry(mclist, &priv->curr_list, list) {
893 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 898 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
894 mc_list[5] = priv->port; 899 mc_list[5] = priv->port;
895 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 900 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
896 mc_list, MLX4_PROT_ETH); 901 mc_list, MLX4_PROT_ETH, mclist->reg_id);
897 } 902 }
898 mlx4_en_clear_list(dev); 903 mlx4_en_clear_list(dev);
899 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 904 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 40e048bac024..1d70657058a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
123 static const char * const fname[] = { 123 static const char * const fname[] = {
124 [0] = "RSS support", 124 [0] = "RSS support",
125 [1] = "RSS Toeplitz Hash Function support", 125 [1] = "RSS Toeplitz Hash Function support",
126 [2] = "RSS XOR Hash Function support" 126 [2] = "RSS XOR Hash Function support",
127 [3] = "Device manage flow steering support"
127 }; 128 };
128 int i; 129 int i;
129 130
@@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
391#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 392#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
392#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 393#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
393#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 394#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
395#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
396#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
394#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 397#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
395#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 398#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
396#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 399#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
474 dev_cap->num_ports = field & 0xf; 477 dev_cap->num_ports = field & 0xf;
475 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 478 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
476 dev_cap->max_msg_sz = 1 << (field & 0x1f); 479 dev_cap->max_msg_sz = 1 << (field & 0x1f);
480 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
481 if (field & 0x80)
482 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
483 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
484 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
485 dev_cap->fs_max_num_qp_per_entry = field;
477 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 486 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
478 dev_cap->stat_rate_support = stat_rate; 487 dev_cap->stat_rate_support = stat_rate;
479 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 488 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -1061,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1061#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1070#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1062#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1071#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1063#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1072#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1073#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1074#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1075#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1076#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1077#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1078#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1079#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1080#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1081#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1064#define INIT_HCA_TPT_OFFSET 0x0f0 1082#define INIT_HCA_TPT_OFFSET 0x0f0
1065#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1083#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1066#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1084#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1119,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1119 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1137 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1120 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1138 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1121 1139
1122 /* multicast attributes */ 1140 /* steering attributes */
1123 1141 if (dev->caps.steering_mode ==
1124 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 1142 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1125 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1143 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1126 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1144 cpu_to_be32(1 <<
1127 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) 1145 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1128 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); 1146
1129 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1147 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1148 MLX4_PUT(inbox, param->log_mc_entry_sz,
1149 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1150 MLX4_PUT(inbox, param->log_mc_table_sz,
1151 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1152 /* Enable Ethernet flow steering
1153 * with udp unicast and tcp unicast
1154 */
1155 MLX4_PUT(inbox, param->fs_hash_enable_bits,
1156 INIT_HCA_FS_ETH_BITS_OFFSET);
1157 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1158 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1159 /* Enable IPoIB flow steering
1160 * with udp unicast and tcp unicast
1161 */
1162 MLX4_PUT(inbox, param->fs_hash_enable_bits,
1163 INIT_HCA_FS_IB_BITS_OFFSET);
1164 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1165 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1166 } else {
1167 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1168 MLX4_PUT(inbox, param->log_mc_entry_sz,
1169 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1170 MLX4_PUT(inbox, param->log_mc_hash_sz,
1171 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1172 MLX4_PUT(inbox, param->log_mc_table_sz,
1173 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1174 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1175 MLX4_PUT(inbox, (u8) (1 << 3),
1176 INIT_HCA_UC_STEERING_OFFSET);
1177 }
1130 1178
1131 /* TPT attributes */ 1179 /* TPT attributes */
1132 1180
@@ -1188,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1188 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1236 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1189 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1237 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1190 1238
1191 /* multicast attributes */ 1239 /* steering attributes */
1240 if (dev->caps.steering_mode ==
1241 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1192 1242
1193 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 1243 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1194 MLX4_GET(param->log_mc_entry_sz, outbox, 1244 MLX4_GET(param->log_mc_entry_sz, outbox,
1195 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1245 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1196 MLX4_GET(param->log_mc_hash_sz, outbox, 1246 MLX4_GET(param->log_mc_table_sz, outbox,
1197 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1247 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1198 MLX4_GET(param->log_mc_table_sz, outbox, 1248 } else {
1199 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1249 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1250 MLX4_GET(param->log_mc_entry_sz, outbox,
1251 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1252 MLX4_GET(param->log_mc_hash_sz, outbox,
1253 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1254 MLX4_GET(param->log_mc_table_sz, outbox,
1255 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1256 }
1200 1257
1201 /* TPT attributes */ 1258 /* TPT attributes */
1202 1259
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399e4b78..83fcbbf1b169 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
78 u16 wavelength[MLX4_MAX_PORTS + 1]; 78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1]; 79 u64 trans_code[MLX4_MAX_PORTS + 1];
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int fs_log_max_ucast_qp_range_size;
82 int fs_max_num_qp_per_entry;
81 u64 flags; 83 u64 flags;
82 u64 flags2; 84 u64 flags2;
83 int reserved_uars; 85 int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
165 u8 log_mpt_sz; 167 u8 log_mpt_sz;
166 u8 log_uar_sz; 168 u8 log_uar_sz;
167 u8 uar_page_sz; /* log pg sz in 4k chunks */ 169 u8 uar_page_sz; /* log pg sz in 4k chunks */
170 u8 fs_hash_enable_bits;
168}; 171};
169 172
170struct mlx4_init_ib_param { 173struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f8125a82c0cb..42645166bae2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -91,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
92 " of qp per mcg, for example:" 92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<=" 93 " 10 gives 248.range: 9<="
94 " log_num_mgm_entry_size <= 12"); 94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed"
96 " flow steering");
95 97
96#define MLX4_VF (1 << 0) 98#define MLX4_VF (1 << 0)
97 99
@@ -274,20 +276,27 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
274 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 276 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
275 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 277 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
276 278
277 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 279 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
278 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) { 280 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
279 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 281 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
282 dev->caps.fs_log_max_ucast_qp_range_size =
283 dev_cap->fs_log_max_ucast_qp_range_size;
280 } else { 284 } else {
281 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 285 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
286 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
287 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
288 } else {
289 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
282 290
283 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 291 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
284 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 292 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
285 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags " 293 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
286 "set to use B0 steering. Falling back to A0 steering mode.\n"); 294 "set to use B0 steering. Falling back to A0 steering mode.\n");
295 }
296 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
287 } 297 }
288 mlx4_dbg(dev, "Steering mode is: %s\n", 298 mlx4_dbg(dev, "Steering mode is: %s\n",
289 mlx4_steering_mode_str(dev->caps.steering_mode)); 299 mlx4_steering_mode_str(dev->caps.steering_mode));
290 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
291 300
292 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 301 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
293 if (dev->pdev->device != 0x1003) 302 if (dev->pdev->device != 0x1003)
@@ -982,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
982 } 991 }
983 992
984 /* 993 /*
985 * It's not strictly required, but for simplicity just map the 994 * For flow steering device managed mode it is required to use
986 * whole multicast group table now. The table isn't very big 995 * mlx4_init_icm_table. For B0 steering mode it's not strictly
987 * and it's a lot easier than trying to track ref counts. 996 * required, but for simplicity just map the whole multicast
997 * group table now. The table isn't very big and it's a lot
998 * easier than trying to track ref counts.
988 */ 999 */
989 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1000 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
990 init_hca->mc_base, 1001 init_hca->mc_base,
@@ -1220,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1220 goto err_stop_fw; 1231 goto err_stop_fw;
1221 } 1232 }
1222 1233
1234 priv->fs_hash_mode = MLX4_FS_L2_HASH;
1235
1236 switch (priv->fs_hash_mode) {
1237 case MLX4_FS_L2_HASH:
1238 init_hca.fs_hash_enable_bits = 0;
1239 break;
1240
1241 case MLX4_FS_L2_L3_L4_HASH:
1242 /* Enable flow steering with
1243 * udp unicast and tcp unicast
1244 */
1245 init_hca.fs_hash_enable_bits =
1246 MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
1247 break;
1248 }
1249
1223 profile = default_profile; 1250 profile = default_profile;
1251 if (dev->caps.steering_mode ==
1252 MLX4_STEERING_MODE_DEVICE_MANAGED)
1253 profile.num_mcg = MLX4_FS_NUM_MCG;
1224 1254
1225 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1255 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1226 &init_hca); 1256 &init_hca);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 3c59a33a98a5..768a2a4530e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -41,6 +41,7 @@
41 41
42#define MGM_QPN_MASK 0x00FFFFFF 42#define MGM_QPN_MASK 0x00FFFFFF
43#define MGM_BLCK_LB_BIT 30 43#define MGM_BLCK_LB_BIT 30
44#define MLX4_MAC_MASK 0xffffffffffffULL
44 45
45static const u8 zero_gid[16]; /* automatically initialized to 0 */ 46static const u8 zero_gid[16]; /* automatically initialized to 0 */
46 47
@@ -54,7 +55,12 @@ struct mlx4_mgm {
54 55
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 56int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 57{
57 return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE); 58 if (dev->caps.steering_mode ==
59 MLX4_STEERING_MODE_DEVICE_MANAGED)
60 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
61 else
62 return min((1 << mlx4_log_num_mgm_entry_size),
63 MLX4_MAX_MGM_ENTRY_SIZE);
58} 64}
59 65
60int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 66int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -643,6 +649,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
643 return err; 649 return err;
644} 650}
645 651
652struct mlx4_net_trans_rule_hw_ctrl {
653 __be32 ctrl;
654 __be32 vf_vep_port;
655 __be32 qpn;
656 __be32 reserved;
657};
658
659static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
660 struct mlx4_net_trans_rule_hw_ctrl *hw)
661{
662 static const u8 __promisc_mode[] = {
663 [MLX4_FS_PROMISC_NONE] = 0x0,
664 [MLX4_FS_PROMISC_UPLINK] = 0x1,
665 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
666 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
667 };
668
669 u32 dw = 0;
670
671 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
672 dw |= ctrl->exclusive ? (1 << 2) : 0;
673 dw |= ctrl->allow_loopback ? (1 << 3) : 0;
674 dw |= __promisc_mode[ctrl->promisc_mode] << 8;
675 dw |= ctrl->priority << 16;
676
677 hw->ctrl = cpu_to_be32(dw);
678 hw->vf_vep_port = cpu_to_be32(ctrl->port);
679 hw->qpn = cpu_to_be32(ctrl->qpn);
680}
681
682struct mlx4_net_trans_rule_hw_ib {
683 u8 size;
684 u8 rsvd1;
685 __be16 id;
686 u32 rsvd2;
687 __be32 qpn;
688 __be32 qpn_mask;
689 u8 dst_gid[16];
690 u8 dst_gid_msk[16];
691} __packed;
692
693struct mlx4_net_trans_rule_hw_eth {
694 u8 size;
695 u8 rsvd;
696 __be16 id;
697 u8 rsvd1[6];
698 u8 dst_mac[6];
699 u16 rsvd2;
700 u8 dst_mac_msk[6];
701 u16 rsvd3;
702 u8 src_mac[6];
703 u16 rsvd4;
704 u8 src_mac_msk[6];
705 u8 rsvd5;
706 u8 ether_type_enable;
707 __be16 ether_type;
708 __be16 vlan_id_msk;
709 __be16 vlan_id;
710} __packed;
711
712struct mlx4_net_trans_rule_hw_tcp_udp {
713 u8 size;
714 u8 rsvd;
715 __be16 id;
716 __be16 rsvd1[3];
717 __be16 dst_port;
718 __be16 rsvd2;
719 __be16 dst_port_msk;
720 __be16 rsvd3;
721 __be16 src_port;
722 __be16 rsvd4;
723 __be16 src_port_msk;
724} __packed;
725
726struct mlx4_net_trans_rule_hw_ipv4 {
727 u8 size;
728 u8 rsvd;
729 __be16 id;
730 __be32 rsvd1;
731 __be32 dst_ip;
732 __be32 dst_ip_msk;
733 __be32 src_ip;
734 __be32 src_ip_msk;
735} __packed;
736
737struct _rule_hw {
738 union {
739 struct {
740 u8 size;
741 u8 rsvd;
742 __be16 id;
743 };
744 struct mlx4_net_trans_rule_hw_eth eth;
745 struct mlx4_net_trans_rule_hw_ib ib;
746 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
747 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
748 };
749};
750
751static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
752 struct _rule_hw *rule_hw)
753{
754 static const u16 __sw_id_hw[] = {
755 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
756 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
757 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
758 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
759 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
760 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
761 };
762
763 static const size_t __rule_hw_sz[] = {
764 [MLX4_NET_TRANS_RULE_ID_ETH] =
765 sizeof(struct mlx4_net_trans_rule_hw_eth),
766 [MLX4_NET_TRANS_RULE_ID_IB] =
767 sizeof(struct mlx4_net_trans_rule_hw_ib),
768 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
769 [MLX4_NET_TRANS_RULE_ID_IPV4] =
770 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
771 [MLX4_NET_TRANS_RULE_ID_TCP] =
772 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
773 [MLX4_NET_TRANS_RULE_ID_UDP] =
774 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
775 };
776 if (spec->id > MLX4_NET_TRANS_RULE_NUM) {
777 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
778 return -EINVAL;
779 }
780 memset(rule_hw, 0, __rule_hw_sz[spec->id]);
781 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
782 rule_hw->size = __rule_hw_sz[spec->id] >> 2;
783
784 switch (spec->id) {
785 case MLX4_NET_TRANS_RULE_ID_ETH:
786 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
787 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
788 ETH_ALEN);
789 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
790 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
791 ETH_ALEN);
792 if (spec->eth.ether_type_enable) {
793 rule_hw->eth.ether_type_enable = 1;
794 rule_hw->eth.ether_type = spec->eth.ether_type;
795 }
796 rule_hw->eth.vlan_id = spec->eth.vlan_id;
797 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
798 break;
799
800 case MLX4_NET_TRANS_RULE_ID_IB:
801 rule_hw->ib.qpn = spec->ib.r_qpn;
802 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
803 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
804 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
805 break;
806
807 case MLX4_NET_TRANS_RULE_ID_IPV6:
808 return -EOPNOTSUPP;
809
810 case MLX4_NET_TRANS_RULE_ID_IPV4:
811 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
812 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
813 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
814 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
815 break;
816
817 case MLX4_NET_TRANS_RULE_ID_TCP:
818 case MLX4_NET_TRANS_RULE_ID_UDP:
819 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
820 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
821 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
822 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
823 break;
824
825 default:
826 return -EINVAL;
827 }
828
829 return __rule_hw_sz[spec->id];
830}
831
832static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
833 struct mlx4_net_trans_rule *rule)
834{
835#define BUF_SIZE 256
836 struct mlx4_spec_list *cur;
837 char buf[BUF_SIZE];
838 int len = 0;
839
840 mlx4_err(dev, "%s", str);
841 len += snprintf(buf + len, BUF_SIZE - len,
842 "port = %d prio = 0x%x qp = 0x%x ",
843 rule->port, rule->priority, rule->qpn);
844
845 list_for_each_entry(cur, &rule->list, list) {
846 switch (cur->id) {
847 case MLX4_NET_TRANS_RULE_ID_ETH:
848 len += snprintf(buf + len, BUF_SIZE - len,
849 "dmac = %pM ", &cur->eth.dst_mac);
850 if (cur->eth.ether_type)
851 len += snprintf(buf + len, BUF_SIZE - len,
852 "ethertype = 0x%x ",
853 be16_to_cpu(cur->eth.ether_type));
854 if (cur->eth.vlan_id)
855 len += snprintf(buf + len, BUF_SIZE - len,
856 "vlan-id = %d ",
857 be16_to_cpu(cur->eth.vlan_id));
858 break;
859
860 case MLX4_NET_TRANS_RULE_ID_IPV4:
861 if (cur->ipv4.src_ip)
862 len += snprintf(buf + len, BUF_SIZE - len,
863 "src-ip = %pI4 ",
864 &cur->ipv4.src_ip);
865 if (cur->ipv4.dst_ip)
866 len += snprintf(buf + len, BUF_SIZE - len,
867 "dst-ip = %pI4 ",
868 &cur->ipv4.dst_ip);
869 break;
870
871 case MLX4_NET_TRANS_RULE_ID_TCP:
872 case MLX4_NET_TRANS_RULE_ID_UDP:
873 if (cur->tcp_udp.src_port)
874 len += snprintf(buf + len, BUF_SIZE - len,
875 "src-port = %d ",
876 be16_to_cpu(cur->tcp_udp.src_port));
877 if (cur->tcp_udp.dst_port)
878 len += snprintf(buf + len, BUF_SIZE - len,
879 "dst-port = %d ",
880 be16_to_cpu(cur->tcp_udp.dst_port));
881 break;
882
883 case MLX4_NET_TRANS_RULE_ID_IB:
884 len += snprintf(buf + len, BUF_SIZE - len,
885 "dst-gid = %pI6\n", cur->ib.dst_gid);
886 len += snprintf(buf + len, BUF_SIZE - len,
887 "dst-gid-mask = %pI6\n",
888 cur->ib.dst_gid_msk);
889 break;
890
891 case MLX4_NET_TRANS_RULE_ID_IPV6:
892 break;
893
894 default:
895 break;
896 }
897 }
898 len += snprintf(buf + len, BUF_SIZE - len, "\n");
899 mlx4_err(dev, "%s", buf);
900
901 if (len >= BUF_SIZE)
902 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
903}
904
905int mlx4_flow_attach(struct mlx4_dev *dev,
906 struct mlx4_net_trans_rule *rule, u64 *reg_id)
907{
908 struct mlx4_cmd_mailbox *mailbox;
909 struct mlx4_spec_list *cur;
910 u32 size = 0;
911 int ret;
912
913 mailbox = mlx4_alloc_cmd_mailbox(dev);
914 if (IS_ERR(mailbox))
915 return PTR_ERR(mailbox);
916
917 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
918 trans_rule_ctrl_to_hw(rule, mailbox->buf);
919
920 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
921
922 list_for_each_entry(cur, &rule->list, list) {
923 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
924 if (ret < 0) {
925 mlx4_free_cmd_mailbox(dev, mailbox);
926 return -EINVAL;
927 }
928 size += ret;
929 }
930
931 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
932 if (ret == -ENOMEM)
933 mlx4_err_rule(dev,
934 "mcg table is full. Fail to register network rule.\n",
935 rule);
936 else if (ret)
937 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
938
939 mlx4_free_cmd_mailbox(dev, mailbox);
940
941 return ret;
942}
943EXPORT_SYMBOL_GPL(mlx4_flow_attach);
944
945int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
946{
947 int err;
948
949 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
950 if (err)
951 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
952 reg_id);
953 return err;
954}
955EXPORT_SYMBOL_GPL(mlx4_flow_detach);
956
646int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 957int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
647 int block_mcast_loopback, enum mlx4_protocol prot, 958 int block_mcast_loopback, enum mlx4_protocol prot,
648 enum mlx4_steer_type steer) 959 enum mlx4_steer_type steer)
@@ -895,7 +1206,8 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
895} 1206}
896 1207
897int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1208int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
898 int block_mcast_loopback, enum mlx4_protocol prot) 1209 u8 port, int block_mcast_loopback,
1210 enum mlx4_protocol prot, u64 *reg_id)
899{ 1211{
900 1212
901 switch (dev->caps.steering_mode) { 1213 switch (dev->caps.steering_mode) {
@@ -914,6 +1226,42 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
914 block_mcast_loopback, prot, 1226 block_mcast_loopback, prot,
915 MLX4_MC_STEER); 1227 MLX4_MC_STEER);
916 1228
1229 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
1230 struct mlx4_spec_list spec = { {NULL} };
1231 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1232
1233 struct mlx4_net_trans_rule rule = {
1234 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1235 .exclusive = 0,
1236 .promisc_mode = MLX4_FS_PROMISC_NONE,
1237 .priority = MLX4_DOMAIN_NIC,
1238 };
1239
1240 rule.allow_loopback = ~block_mcast_loopback;
1241 rule.port = port;
1242 rule.qpn = qp->qpn;
1243 INIT_LIST_HEAD(&rule.list);
1244
1245 switch (prot) {
1246 case MLX4_PROT_ETH:
1247 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1248 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1249 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1250 break;
1251
1252 case MLX4_PROT_IB_IPV6:
1253 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
1254 memcpy(spec.ib.dst_gid, gid, 16);
1255 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1256 break;
1257 default:
1258 return -EINVAL;
1259 }
1260 list_add_tail(&spec.list, &rule.list);
1261
1262 return mlx4_flow_attach(dev, &rule, reg_id);
1263 }
1264
917 default: 1265 default:
918 return -EINVAL; 1266 return -EINVAL;
919 } 1267 }
@@ -921,7 +1269,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
921EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1269EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
922 1270
923int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1271int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
924 enum mlx4_protocol prot) 1272 enum mlx4_protocol prot, u64 reg_id)
925{ 1273{
926 switch (dev->caps.steering_mode) { 1274 switch (dev->caps.steering_mode) {
927 case MLX4_STEERING_MODE_A0: 1275 case MLX4_STEERING_MODE_A0:
@@ -938,6 +1286,9 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
938 return mlx4_qp_detach_common(dev, qp, gid, prot, 1286 return mlx4_qp_detach_common(dev, qp, gid, prot,
939 MLX4_MC_STEER); 1287 MLX4_MC_STEER);
940 1288
1289 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1290 return mlx4_flow_detach(dev, reg_id);
1291
941 default: 1292 default:
942 return -EINVAL; 1293 return -EINVAL;
943 } 1294 }
@@ -1042,6 +1393,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
1042 struct mlx4_priv *priv = mlx4_priv(dev); 1393 struct mlx4_priv *priv = mlx4_priv(dev);
1043 int err; 1394 int err;
1044 1395
1396 /* No need for mcg_table when fw managed the mcg table*/
1397 if (dev->caps.steering_mode ==
1398 MLX4_STEERING_MODE_DEVICE_MANAGED)
1399 return 0;
1045 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1400 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1046 dev->caps.num_amgms - 1, 0, 0); 1401 dev->caps.num_amgms - 1, 0, 0);
1047 if (err) 1402 if (err)
@@ -1054,5 +1409,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
1054 1409
1055void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1410void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1056{ 1411{
1057 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1412 if (dev->caps.steering_mode !=
1413 MLX4_STEERING_MODE_DEVICE_MANAGED)
1414 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1058} 1415}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c07e882e8369..0084967be19e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -54,6 +54,17 @@
54#define DRV_VERSION "1.1" 54#define DRV_VERSION "1.1"
55#define DRV_RELDATE "Dec, 2011" 55#define DRV_RELDATE "Dec, 2011"
56 56
57#define MLX4_FS_UDP_UC_EN (1 << 1)
58#define MLX4_FS_TCP_UC_EN (1 << 2)
59#define MLX4_FS_NUM_OF_L2_ADDR 8
60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
61#define MLX4_FS_NUM_MCG (1 << 17)
62
63enum {
64 MLX4_FS_L2_HASH = 0,
65 MLX4_FS_L2_L3_L4_HASH,
66};
67
57#define MLX4_NUM_UP 8 68#define MLX4_NUM_UP 8
58#define MLX4_NUM_TC 8 69#define MLX4_NUM_TC 8
59#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ 70#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -704,6 +715,7 @@ struct mlx4_set_port_rqp_calc_context {
704 715
705struct mlx4_mac_entry { 716struct mlx4_mac_entry {
706 u64 mac; 717 u64 mac;
718 u64 reg_id;
707}; 719};
708 720
709struct mlx4_port_info { 721struct mlx4_port_info {
@@ -777,6 +789,7 @@ struct mlx4_priv {
777 struct mutex bf_mutex; 789 struct mutex bf_mutex;
778 struct io_mapping *bf_mapping; 790 struct io_mapping *bf_mapping;
779 int reserved_mtts; 791 int reserved_mtts;
792 int fs_hash_mode;
780}; 793};
781 794
782static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 795static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1bb00cd22d42..2d6dabe7f55d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -414,6 +414,7 @@ struct mlx4_en_mc_list {
414 struct list_head list; 414 struct list_head list;
415 enum mlx4_en_mclist_act action; 415 enum mlx4_en_mclist_act action;
416 u8 addr[ETH_ALEN]; 416 u8 addr[ETH_ALEN];
417 u64 reg_id;
417}; 418};
418 419
419struct mlx4_en_frag_info { 420struct mlx4_en_frag_info {
@@ -503,6 +504,7 @@ struct mlx4_en_priv {
503 u64 stats_bitmap; 504 u64 stats_bitmap;
504 struct list_head mc_list; 505 struct list_head mc_list;
505 struct list_head curr_list; 506 struct list_head curr_list;
507 u64 broadcast_id;
506 struct mlx4_en_stat_out_mbox hw_stats; 508 struct mlx4_en_stat_out_mbox hw_stats;
507 int vids[128]; 509 int vids[128];
508 bool wol; 510 bool wol;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 58de7237f57a..a51d1b9bf1d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,21 +75,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) 78static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
79 u64 mac, int *qpn, u64 *reg_id)
79{ 80{
80 struct mlx4_qp qp;
81 u8 gid[16] = {0};
82 __be64 be_mac; 81 __be64 be_mac;
83 int err; 82 int err;
84 83
85 qp.qpn = *qpn; 84 mac &= MLX4_MAC_MASK;
86
87 mac &= 0xffffffffffffULL;
88 be_mac = cpu_to_be64(mac << 16); 85 be_mac = cpu_to_be64(mac << 16);
89 memcpy(&gid[10], &be_mac, ETH_ALEN);
90 gid[5] = port;
91 86
92 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 87 switch (dev->caps.steering_mode) {
88 case MLX4_STEERING_MODE_B0: {
89 struct mlx4_qp qp;
90 u8 gid[16] = {0};
91
92 qp.qpn = *qpn;
93 memcpy(&gid[10], &be_mac, ETH_ALEN);
94 gid[5] = port;
95
96 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
97 break;
98 }
99 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
100 struct mlx4_spec_list spec_eth = { {NULL} };
101 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
102
103 struct mlx4_net_trans_rule rule = {
104 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
105 .exclusive = 0,
106 .allow_loopback = 1,
107 .promisc_mode = MLX4_FS_PROMISC_NONE,
108 .priority = MLX4_DOMAIN_NIC,
109 };
110
111 rule.port = port;
112 rule.qpn = *qpn;
113 INIT_LIST_HEAD(&rule.list);
114
115 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
116 memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
117 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
118 list_add_tail(&spec_eth.list, &rule.list);
119
120 err = mlx4_flow_attach(dev, &rule, reg_id);
121 break;
122 }
123 default:
124 return -EINVAL;
125 }
93 if (err) 126 if (err)
94 mlx4_warn(dev, "Failed Attaching Unicast\n"); 127 mlx4_warn(dev, "Failed Attaching Unicast\n");
95 128
@@ -97,19 +130,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
97} 130}
98 131
99static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, 132static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
100 u64 mac, int qpn) 133 u64 mac, int qpn, u64 reg_id)
101{ 134{
102 struct mlx4_qp qp; 135 switch (dev->caps.steering_mode) {
103 u8 gid[16] = {0}; 136 case MLX4_STEERING_MODE_B0: {
104 __be64 be_mac; 137 struct mlx4_qp qp;
138 u8 gid[16] = {0};
139 __be64 be_mac;
105 140
106 qp.qpn = qpn; 141 qp.qpn = qpn;
107 mac &= 0xffffffffffffULL; 142 mac &= MLX4_MAC_MASK;
108 be_mac = cpu_to_be64(mac << 16); 143 be_mac = cpu_to_be64(mac << 16);
109 memcpy(&gid[10], &be_mac, ETH_ALEN); 144 memcpy(&gid[10], &be_mac, ETH_ALEN);
110 gid[5] = port; 145 gid[5] = port;
111 146
112 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 147 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
148 break;
149 }
150 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
151 mlx4_flow_detach(dev, reg_id);
152 break;
153 }
154 default:
155 mlx4_err(dev, "Invalid steering mode.\n");
156 }
113} 157}
114 158
115static int validate_index(struct mlx4_dev *dev, 159static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +188,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
144 struct mlx4_mac_entry *entry; 188 struct mlx4_mac_entry *entry;
145 int index = 0; 189 int index = 0;
146 int err = 0; 190 int err = 0;
191 u64 reg_id;
147 192
148 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", 193 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
149 (unsigned long long) mac); 194 (unsigned long long) mac);
@@ -167,7 +212,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
167 goto qp_err; 212 goto qp_err;
168 } 213 }
169 214
170 err = mlx4_uc_steer_add(dev, port, mac, qpn); 215 err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
171 if (err) 216 if (err)
172 goto steer_err; 217 goto steer_err;
173 218
@@ -177,6 +222,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
177 goto alloc_err; 222 goto alloc_err;
178 } 223 }
179 entry->mac = mac; 224 entry->mac = mac;
225 entry->reg_id = reg_id;
180 err = radix_tree_insert(&info->mac_tree, *qpn, entry); 226 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
181 if (err) 227 if (err)
182 goto insert_err; 228 goto insert_err;
@@ -186,7 +232,7 @@ insert_err:
186 kfree(entry); 232 kfree(entry);
187 233
188alloc_err: 234alloc_err:
189 mlx4_uc_steer_release(dev, port, mac, *qpn); 235 mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
190 236
191steer_err: 237steer_err:
192 mlx4_qp_release_range(dev, *qpn, 1); 238 mlx4_qp_release_range(dev, *qpn, 1);
@@ -212,7 +258,8 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
212 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," 258 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
213 " qpn %d\n", port, 259 " qpn %d\n", port,
214 (unsigned long long) mac, qpn); 260 (unsigned long long) mac, qpn);
215 mlx4_uc_steer_release(dev, port, entry->mac, qpn); 261 mlx4_uc_steer_release(dev, port, entry->mac,
262 qpn, entry->reg_id);
216 mlx4_qp_release_range(dev, qpn, 1); 263 mlx4_qp_release_range(dev, qpn, 1);
217 radix_tree_delete(&info->mac_tree, qpn); 264 radix_tree_delete(&info->mac_tree, qpn);
218 kfree(entry); 265 kfree(entry);
@@ -363,11 +410,14 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
363 entry = radix_tree_lookup(&info->mac_tree, qpn); 410 entry = radix_tree_lookup(&info->mac_tree, qpn);
364 if (!entry) 411 if (!entry)
365 return -EINVAL; 412 return -EINVAL;
366 mlx4_uc_steer_release(dev, port, entry->mac, qpn); 413 mlx4_uc_steer_release(dev, port, entry->mac,
414 qpn, entry->reg_id);
367 mlx4_unregister_mac(dev, port, entry->mac); 415 mlx4_unregister_mac(dev, port, entry->mac);
368 entry->mac = new_mac; 416 entry->mac = new_mac;
417 entry->reg_id = 0;
369 mlx4_register_mac(dev, port, new_mac); 418 mlx4_register_mac(dev, port, new_mac);
370 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn); 419 err = mlx4_uc_steer_add(dev, port, entry->mac,
420 &qpn, &entry->reg_id);
371 return err; 421 return err;
372 } 422 }
373 423
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b83bc928d52a..9ee4725363d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
237 init_hca->mtt_base = profile[i].start; 237 init_hca->mtt_base = profile[i].start;
238 break; 238 break;
239 case MLX4_RES_MCG: 239 case MLX4_RES_MCG:
240 dev->caps.num_mgms = profile[i].num >> 1;
241 dev->caps.num_amgms = profile[i].num >> 1;
242 init_hca->mc_base = profile[i].start; 240 init_hca->mc_base = profile[i].start;
243 init_hca->log_mc_entry_sz = 241 init_hca->log_mc_entry_sz =
244 ilog2(mlx4_get_mgm_entry_size(dev)); 242 ilog2(mlx4_get_mgm_entry_size(dev));
245 init_hca->log_mc_table_sz = profile[i].log_num; 243 init_hca->log_mc_table_sz = profile[i].log_num;
246 init_hca->log_mc_hash_sz = profile[i].log_num - 1; 244 if (dev->caps.steering_mode ==
245 MLX4_STEERING_MODE_DEVICE_MANAGED) {
246 dev->caps.num_mgms = profile[i].num;
247 } else {
248 init_hca->log_mc_hash_sz =
249 profile[i].log_num - 1;
250 dev->caps.num_mgms = profile[i].num >> 1;
251 dev->caps.num_amgms = profile[i].num >> 1;
252 }
247 break; 253 break;
248 default: 254 default:
249 break; 255 break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index a8ca960f4620..5a6f3555d806 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2744,6 +2744,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2744 struct mlx4_cmd_mailbox *outbox, 2744 struct mlx4_cmd_mailbox *outbox,
2745 struct mlx4_cmd_info *cmd) 2745 struct mlx4_cmd_info *cmd)
2746{ 2746{
2747 if (dev->caps.steering_mode !=
2748 MLX4_STEERING_MODE_DEVICE_MANAGED)
2749 return -EOPNOTSUPP;
2747 return mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 2750 return mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2748 vhcr->in_modifier, 0, 2751 vhcr->in_modifier, 0,
2749 MLX4_QP_FLOW_STEERING_ATTACH, 2752 MLX4_QP_FLOW_STEERING_ATTACH,
@@ -2757,6 +2760,9 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2757 struct mlx4_cmd_mailbox *outbox, 2760 struct mlx4_cmd_mailbox *outbox,
2758 struct mlx4_cmd_info *cmd) 2761 struct mlx4_cmd_info *cmd)
2759{ 2762{
2763 if (dev->caps.steering_mode !=
2764 MLX4_STEERING_MODE_DEVICE_MANAGED)
2765 return -EOPNOTSUPP;
2760 return mlx4_cmd(dev, vhcr->in_param, 0, 0, 2766 return mlx4_cmd(dev, vhcr->in_param, 0, 0,
2761 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 2767 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2762 MLX4_CMD_NATIVE); 2768 MLX4_CMD_NATIVE);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7f5c9ee42f96..e45fc20bd01f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -70,14 +70,17 @@ enum {
70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) 70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
71}; 71};
72 72
73/* Driver supports 2 diffrent device methods to manage traffic steering: 73/* Driver supports 3 diffrent device methods to manage traffic steering:
74 * -device managed - High level API for ib and eth flow steering. FW is
75 * managing flow steering tables.
74 * - B0 steering mode - Common low level API for ib and (if supported) eth. 76 * - B0 steering mode - Common low level API for ib and (if supported) eth.
75 * - A0 steering mode - Limited low level API for eth. In case of IB, 77 * - A0 steering mode - Limited low level API for eth. In case of IB,
76 * B0 mode is in use. 78 * B0 mode is in use.
77 */ 79 */
78enum { 80enum {
79 MLX4_STEERING_MODE_A0, 81 MLX4_STEERING_MODE_A0,
80 MLX4_STEERING_MODE_B0 82 MLX4_STEERING_MODE_B0,
83 MLX4_STEERING_MODE_DEVICE_MANAGED
81}; 84};
82 85
83static inline const char *mlx4_steering_mode_str(int steering_mode) 86static inline const char *mlx4_steering_mode_str(int steering_mode)
@@ -88,6 +91,10 @@ static inline const char *mlx4_steering_mode_str(int steering_mode)
88 91
89 case MLX4_STEERING_MODE_B0: 92 case MLX4_STEERING_MODE_B0:
90 return "B0 steering"; 93 return "B0 steering";
94
95 case MLX4_STEERING_MODE_DEVICE_MANAGED:
96 return "Device managed flow steering";
97
91 default: 98 default:
92 return "Unrecognize steering mode"; 99 return "Unrecognize steering mode";
93 } 100 }
@@ -125,7 +132,8 @@ enum {
125enum { 132enum {
126 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, 133 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
127 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, 134 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
128 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2 135 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
136 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3
129}; 137};
130 138
131#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 139#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
@@ -319,6 +327,7 @@ struct mlx4_caps {
319 int reserved_mcgs; 327 int reserved_mcgs;
320 int num_qp_per_mgm; 328 int num_qp_per_mgm;
321 int steering_mode; 329 int steering_mode;
330 int fs_log_max_ucast_qp_range_size;
322 int num_pds; 331 int num_pds;
323 int reserved_pds; 332 int reserved_pds;
324 int max_xrcds; 333 int max_xrcds;
@@ -647,9 +656,94 @@ int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
647int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 656int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
648 enum mlx4_protocol prot); 657 enum mlx4_protocol prot);
649int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 658int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
650 int block_mcast_loopback, enum mlx4_protocol protocol); 659 u8 port, int block_mcast_loopback,
660 enum mlx4_protocol protocol, u64 *reg_id);
651int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 661int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
652 enum mlx4_protocol protocol); 662 enum mlx4_protocol protocol, u64 reg_id);
663
664enum {
665 MLX4_DOMAIN_UVERBS = 0x1000,
666 MLX4_DOMAIN_ETHTOOL = 0x2000,
667 MLX4_DOMAIN_RFS = 0x3000,
668 MLX4_DOMAIN_NIC = 0x5000,
669};
670
671enum mlx4_net_trans_rule_id {
672 MLX4_NET_TRANS_RULE_ID_ETH = 0,
673 MLX4_NET_TRANS_RULE_ID_IB,
674 MLX4_NET_TRANS_RULE_ID_IPV6,
675 MLX4_NET_TRANS_RULE_ID_IPV4,
676 MLX4_NET_TRANS_RULE_ID_TCP,
677 MLX4_NET_TRANS_RULE_ID_UDP,
678 MLX4_NET_TRANS_RULE_NUM, /* should be last */
679};
680
681enum mlx4_net_trans_promisc_mode {
682 MLX4_FS_PROMISC_NONE = 0,
683 MLX4_FS_PROMISC_UPLINK,
684 MLX4_FS_PROMISC_FUNCTION_PORT,
685 MLX4_FS_PROMISC_ALL_MULTI,
686};
687
688struct mlx4_spec_eth {
689 u8 dst_mac[6];
690 u8 dst_mac_msk[6];
691 u8 src_mac[6];
692 u8 src_mac_msk[6];
693 u8 ether_type_enable;
694 __be16 ether_type;
695 __be16 vlan_id_msk;
696 __be16 vlan_id;
697};
698
699struct mlx4_spec_tcp_udp {
700 __be16 dst_port;
701 __be16 dst_port_msk;
702 __be16 src_port;
703 __be16 src_port_msk;
704};
705
706struct mlx4_spec_ipv4 {
707 __be32 dst_ip;
708 __be32 dst_ip_msk;
709 __be32 src_ip;
710 __be32 src_ip_msk;
711};
712
713struct mlx4_spec_ib {
714 __be32 r_qpn;
715 __be32 qpn_msk;
716 u8 dst_gid[16];
717 u8 dst_gid_msk[16];
718};
719
720struct mlx4_spec_list {
721 struct list_head list;
722 enum mlx4_net_trans_rule_id id;
723 union {
724 struct mlx4_spec_eth eth;
725 struct mlx4_spec_ib ib;
726 struct mlx4_spec_ipv4 ipv4;
727 struct mlx4_spec_tcp_udp tcp_udp;
728 };
729};
730
731enum mlx4_net_trans_hw_rule_queue {
732 MLX4_NET_TRANS_Q_FIFO,
733 MLX4_NET_TRANS_Q_LIFO,
734};
735
736struct mlx4_net_trans_rule {
737 struct list_head list;
738 enum mlx4_net_trans_hw_rule_queue queue_mode;
739 bool exclusive;
740 bool allow_loopback;
741 enum mlx4_net_trans_promisc_mode promisc_mode;
742 u8 port;
743 u16 priority;
744 u32 qpn;
745};
746
653int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 747int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
654int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 748int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
655int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 749int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
@@ -692,4 +786,8 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
692int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 786int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
693void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 787void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
694 788
789int mlx4_flow_attach(struct mlx4_dev *dev,
790 struct mlx4_net_trans_rule *rule, u64 *reg_id);
791int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
792
695#endif /* MLX4_DEVICE_H */ 793#endif /* MLX4_DEVICE_H */