aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
authorMatan Barak <matanb@mellanox.com>2013-11-07 08:25:16 -0500
committerRoland Dreier <roland@purestorage.com>2014-01-14 17:06:50 -0500
commita37a1a428431d3e7e9f53530b5c56ff7867bd487 (patch)
tree1c50f676da6b7dcf021b64135f37d6fc150ad30b /drivers/infiniband/hw/mlx4
parent0a9b7d59d5a8e2b97406a29a8a807bbc5ce7092e (diff)
IB/mlx4: Add mechanism to support flow steering over IB links
The mlx4 device requires adding IB flow spec to rules that apply over infiniband link layer. This patch adds a mechanism to add such a rule. If higher levels e.g. IP/UDP/TCP flow specs are provided, the device requires us to add an empty wild-carded IB rule. Furthermore, the device requires the QPN to be put in the rule. Add here specific parsing support for IB empty rules and the ability to self-generate missing specs based on existing ones. Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c135
1 files changed, 134 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e8d0c5592fd3..6b7f227ca9e4 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -55,6 +55,7 @@
55#define DRV_RELDATE "April 4, 2008" 55#define DRV_RELDATE "April 4, 2008"
56 56
57#define MLX4_IB_FLOW_MAX_PRIO 0xFFF 57#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
58#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
58 59
59MODULE_AUTHOR("Roland Dreier"); 60MODULE_AUTHOR("Roland Dreier");
60MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 61MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -825,6 +826,7 @@ struct mlx4_ib_steering {
825}; 826};
826 827
827static int parse_flow_attr(struct mlx4_dev *dev, 828static int parse_flow_attr(struct mlx4_dev *dev,
829 u32 qp_num,
828 union ib_flow_spec *ib_spec, 830 union ib_flow_spec *ib_spec,
829 struct _rule_hw *mlx4_spec) 831 struct _rule_hw *mlx4_spec)
830{ 832{
@@ -840,6 +842,14 @@ static int parse_flow_attr(struct mlx4_dev *dev,
840 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; 842 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
841 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; 843 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
842 break; 844 break;
845 case IB_FLOW_SPEC_IB:
846 type = MLX4_NET_TRANS_RULE_ID_IB;
847 mlx4_spec->ib.l3_qpn =
848 cpu_to_be32(qp_num);
849 mlx4_spec->ib.qpn_mask =
850 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
851 break;
852
843 853
844 case IB_FLOW_SPEC_IPV4: 854 case IB_FLOW_SPEC_IPV4:
845 type = MLX4_NET_TRANS_RULE_ID_IPV4; 855 type = MLX4_NET_TRANS_RULE_ID_IPV4;
@@ -871,6 +881,115 @@ static int parse_flow_attr(struct mlx4_dev *dev,
871 return mlx4_hw_rule_sz(dev, type); 881 return mlx4_hw_rule_sz(dev, type);
872} 882}
873 883
884struct default_rules {
885 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
886 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
887 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
888 __u8 link_layer;
889};
890static const struct default_rules default_table[] = {
891 {
892 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
893 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
894 .rules_create_list = {IB_FLOW_SPEC_IB},
895 .link_layer = IB_LINK_LAYER_INFINIBAND
896 }
897};
898
899static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
900 struct ib_flow_attr *flow_attr)
901{
902 int i, j, k;
903 void *ib_flow;
904 const struct default_rules *pdefault_rules = default_table;
905 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
906
907 for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
908 pdefault_rules++) {
909 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
910 memset(&field_types, 0, sizeof(field_types));
911
912 if (link_layer != pdefault_rules->link_layer)
913 continue;
914
915 ib_flow = flow_attr + 1;
916 /* we assume the specs are sorted */
917 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
918 j < flow_attr->num_of_specs; k++) {
919 union ib_flow_spec *current_flow =
920 (union ib_flow_spec *)ib_flow;
921
922 /* same layer but different type */
923 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
924 (pdefault_rules->mandatory_fields[k] &
925 IB_FLOW_SPEC_LAYER_MASK)) &&
926 (current_flow->type !=
927 pdefault_rules->mandatory_fields[k]))
928 goto out;
929
930 /* same layer, try match next one */
931 if (current_flow->type ==
932 pdefault_rules->mandatory_fields[k]) {
933 j++;
934 ib_flow +=
935 ((union ib_flow_spec *)ib_flow)->size;
936 }
937 }
938
939 ib_flow = flow_attr + 1;
940 for (j = 0; j < flow_attr->num_of_specs;
941 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
942 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
943 /* same layer and same type */
944 if (((union ib_flow_spec *)ib_flow)->type ==
945 pdefault_rules->mandatory_not_fields[k])
946 goto out;
947
948 return i;
949 }
950out:
951 return -1;
952}
953
954static int __mlx4_ib_create_default_rules(
955 struct mlx4_ib_dev *mdev,
956 struct ib_qp *qp,
957 const struct default_rules *pdefault_rules,
958 struct _rule_hw *mlx4_spec) {
959 int size = 0;
960 int i;
961
962 for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
963 sizeof(pdefault_rules->rules_create_list[0]); i++) {
964 int ret;
965 union ib_flow_spec ib_spec;
966 switch (pdefault_rules->rules_create_list[i]) {
967 case 0:
968 /* no rule */
969 continue;
970 case IB_FLOW_SPEC_IB:
971 ib_spec.type = IB_FLOW_SPEC_IB;
972 ib_spec.size = sizeof(struct ib_flow_spec_ib);
973
974 break;
975 default:
976 /* invalid rule */
977 return -EINVAL;
978 }
979 /* We must put empty rule, qpn is being ignored */
980 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
981 mlx4_spec);
982 if (ret < 0) {
983 pr_info("invalid parsing\n");
984 return -EINVAL;
985 }
986
987 mlx4_spec = (void *)mlx4_spec + ret;
988 size += ret;
989 }
990 return size;
991}
992
874static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, 993static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
875 int domain, 994 int domain,
876 enum mlx4_net_trans_promisc_mode flow_type, 995 enum mlx4_net_trans_promisc_mode flow_type,
@@ -882,6 +1001,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
882 struct mlx4_ib_dev *mdev = to_mdev(qp->device); 1001 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
883 struct mlx4_cmd_mailbox *mailbox; 1002 struct mlx4_cmd_mailbox *mailbox;
884 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 1003 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1004 int default_flow;
885 1005
886 static const u16 __mlx4_domain[] = { 1006 static const u16 __mlx4_domain[] = {
887 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, 1007 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -916,8 +1036,21 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
916 1036
917 ib_flow = flow_attr + 1; 1037 ib_flow = flow_attr + 1;
918 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 1038 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1039 /* Add default flows */
1040 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1041 if (default_flow >= 0) {
1042 ret = __mlx4_ib_create_default_rules(
1043 mdev, qp, default_table + default_flow,
1044 mailbox->buf + size);
1045 if (ret < 0) {
1046 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1047 return -EINVAL;
1048 }
1049 size += ret;
1050 }
919 for (i = 0; i < flow_attr->num_of_specs; i++) { 1051 for (i = 0; i < flow_attr->num_of_specs; i++) {
920 ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size); 1052 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1053 mailbox->buf + size);
921 if (ret < 0) { 1054 if (ret < 0) {
922 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 1055 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
923 return -EINVAL; 1056 return -EINVAL;