diff options
author | Hadar Hen Zion <hadarh@mellanox.com> | 2013-08-07 07:01:59 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-08-28 12:51:52 -0400 |
commit | 319a441d1361ea703b091caf92418f8121eadfc5 (patch) | |
tree | 869e882aff330a559820d6a217c1cefaba37bc6e /include/rdma | |
parent | c095ba7224d8edc71dcef0d655911399a8bd4a3f (diff) |
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'include/rdma')
-rw-r--r-- | include/rdma/ib_verbs.h | 119 |
1 files changed, 117 insertions, 2 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 645c3cedce9c..6f874b00491a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -116,7 +116,8 @@ enum ib_device_cap_flags { | |||
116 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), | 116 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), |
117 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), | 117 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), |
118 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), | 118 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), |
119 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24) | 119 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), |
120 | IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29) | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | enum ib_atomic_cap { | 123 | enum ib_atomic_cap { |
@@ -1033,7 +1034,8 @@ struct ib_qp { | |||
1033 | struct ib_srq *srq; | 1034 | struct ib_srq *srq; |
1034 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ | 1035 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
1035 | struct list_head xrcd_list; | 1036 | struct list_head xrcd_list; |
1036 | atomic_t usecnt; /* count times opened, mcast attaches */ | 1037 | /* count times opened, mcast attaches, flow attaches */ |
1038 | atomic_t usecnt; | ||
1037 | struct list_head open_list; | 1039 | struct list_head open_list; |
1038 | struct ib_qp *real_qp; | 1040 | struct ib_qp *real_qp; |
1039 | struct ib_uobject *uobject; | 1041 | struct ib_uobject *uobject; |
@@ -1068,6 +1070,110 @@ struct ib_fmr { | |||
1068 | u32 rkey; | 1070 | u32 rkey; |
1069 | }; | 1071 | }; |
1070 | 1072 | ||
1073 | /* Supported steering options */ | ||
1074 | enum ib_flow_attr_type { | ||
1075 | /* steering according to rule specifications */ | ||
1076 | IB_FLOW_ATTR_NORMAL = 0x0, | ||
1077 | /* default unicast and multicast rule - | ||
1078 | * receive all Eth traffic which isn't steered to any QP | ||
1079 | */ | ||
1080 | IB_FLOW_ATTR_ALL_DEFAULT = 0x1, | ||
1081 | /* default multicast rule - | ||
1082 | * receive all Eth multicast traffic which isn't steered to any QP | ||
1083 | */ | ||
1084 | IB_FLOW_ATTR_MC_DEFAULT = 0x2, | ||
1085 | /* sniffer rule - receive all port traffic */ | ||
1086 | IB_FLOW_ATTR_SNIFFER = 0x3 | ||
1087 | }; | ||
1088 | |||
1089 | /* Supported steering header types */ | ||
1090 | enum ib_flow_spec_type { | ||
1091 | /* L2 headers*/ | ||
1092 | IB_FLOW_SPEC_ETH = 0x20, | ||
1093 | /* L3 header*/ | ||
1094 | IB_FLOW_SPEC_IPV4 = 0x30, | ||
1095 | /* L4 headers*/ | ||
1096 | IB_FLOW_SPEC_TCP = 0x40, | ||
1097 | IB_FLOW_SPEC_UDP = 0x41 | ||
1098 | }; | ||
1099 | |||
1100 | /* Flow steering rule priority is set according to it's domain. | ||
1101 | * Lower domain value means higher priority. | ||
1102 | */ | ||
1103 | enum ib_flow_domain { | ||
1104 | IB_FLOW_DOMAIN_USER, | ||
1105 | IB_FLOW_DOMAIN_ETHTOOL, | ||
1106 | IB_FLOW_DOMAIN_RFS, | ||
1107 | IB_FLOW_DOMAIN_NIC, | ||
1108 | IB_FLOW_DOMAIN_NUM /* Must be last */ | ||
1109 | }; | ||
1110 | |||
1111 | struct ib_flow_eth_filter { | ||
1112 | u8 dst_mac[6]; | ||
1113 | u8 src_mac[6]; | ||
1114 | __be16 ether_type; | ||
1115 | __be16 vlan_tag; | ||
1116 | }; | ||
1117 | |||
1118 | struct ib_flow_spec_eth { | ||
1119 | enum ib_flow_spec_type type; | ||
1120 | u16 size; | ||
1121 | struct ib_flow_eth_filter val; | ||
1122 | struct ib_flow_eth_filter mask; | ||
1123 | }; | ||
1124 | |||
1125 | struct ib_flow_ipv4_filter { | ||
1126 | __be32 src_ip; | ||
1127 | __be32 dst_ip; | ||
1128 | }; | ||
1129 | |||
1130 | struct ib_flow_spec_ipv4 { | ||
1131 | enum ib_flow_spec_type type; | ||
1132 | u16 size; | ||
1133 | struct ib_flow_ipv4_filter val; | ||
1134 | struct ib_flow_ipv4_filter mask; | ||
1135 | }; | ||
1136 | |||
1137 | struct ib_flow_tcp_udp_filter { | ||
1138 | __be16 dst_port; | ||
1139 | __be16 src_port; | ||
1140 | }; | ||
1141 | |||
1142 | struct ib_flow_spec_tcp_udp { | ||
1143 | enum ib_flow_spec_type type; | ||
1144 | u16 size; | ||
1145 | struct ib_flow_tcp_udp_filter val; | ||
1146 | struct ib_flow_tcp_udp_filter mask; | ||
1147 | }; | ||
1148 | |||
1149 | union ib_flow_spec { | ||
1150 | struct { | ||
1151 | enum ib_flow_spec_type type; | ||
1152 | u16 size; | ||
1153 | }; | ||
1154 | struct ib_flow_spec_eth eth; | ||
1155 | struct ib_flow_spec_ipv4 ipv4; | ||
1156 | struct ib_flow_spec_tcp_udp tcp_udp; | ||
1157 | }; | ||
1158 | |||
1159 | struct ib_flow_attr { | ||
1160 | enum ib_flow_attr_type type; | ||
1161 | u16 size; | ||
1162 | u16 priority; | ||
1163 | u32 flags; | ||
1164 | u8 num_of_specs; | ||
1165 | u8 port; | ||
1166 | /* Following are the optional layers according to user request | ||
1167 | * struct ib_flow_spec_xxx | ||
1168 | * struct ib_flow_spec_yyy | ||
1169 | */ | ||
1170 | }; | ||
1171 | |||
1172 | struct ib_flow { | ||
1173 | struct ib_qp *qp; | ||
1174 | struct ib_uobject *uobject; | ||
1175 | }; | ||
1176 | |||
1071 | struct ib_mad; | 1177 | struct ib_mad; |
1072 | struct ib_grh; | 1178 | struct ib_grh; |
1073 | 1179 | ||
@@ -1300,6 +1406,11 @@ struct ib_device { | |||
1300 | struct ib_ucontext *ucontext, | 1406 | struct ib_ucontext *ucontext, |
1301 | struct ib_udata *udata); | 1407 | struct ib_udata *udata); |
1302 | int (*dealloc_xrcd)(struct ib_xrcd *xrcd); | 1408 | int (*dealloc_xrcd)(struct ib_xrcd *xrcd); |
1409 | struct ib_flow * (*create_flow)(struct ib_qp *qp, | ||
1410 | struct ib_flow_attr | ||
1411 | *flow_attr, | ||
1412 | int domain); | ||
1413 | int (*destroy_flow)(struct ib_flow *flow_id); | ||
1303 | 1414 | ||
1304 | struct ib_dma_mapping_ops *dma_ops; | 1415 | struct ib_dma_mapping_ops *dma_ops; |
1305 | 1416 | ||
@@ -2260,4 +2371,8 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); | |||
2260 | */ | 2371 | */ |
2261 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd); | 2372 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd); |
2262 | 2373 | ||
2374 | struct ib_flow *ib_create_flow(struct ib_qp *qp, | ||
2375 | struct ib_flow_attr *flow_attr, int domain); | ||
2376 | int ib_destroy_flow(struct ib_flow *flow_id); | ||
2377 | |||
2263 | #endif /* IB_VERBS_H */ | 2378 | #endif /* IB_VERBS_H */ |