aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorMoni Shoua <monis@mellanox.com>2015-02-03 09:48:38 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-04 19:14:25 -0500
commit146d6e19832a72136089afca51e5229d1fd72dcd (patch)
treea3ee40646897b81237e413efd97c58bcba8222d3 /drivers/infiniband/hw
parenta575009030931cd8a35c88ec81eb26b9e9f73539 (diff)
IB/mlx4: Create mirror flows in port aggregation mode
In port aggregation mode flows for port #1 (the only port) should be mirrored on port #2. This is because packets can arrive from either physical ports. Signed-off-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c84
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h9
2 files changed, 80 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ed21ae68a977..ca522382dedc 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -851,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
851 851
852struct mlx4_ib_steering { 852struct mlx4_ib_steering {
853 struct list_head list; 853 struct list_head list;
854 u64 reg_id; 854 struct mlx4_flow_reg_id reg_id;
855 union ib_gid gid; 855 union ib_gid gid;
856}; 856};
857 857
@@ -1142,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1142 struct ib_flow_attr *flow_attr, 1142 struct ib_flow_attr *flow_attr,
1143 int domain) 1143 int domain)
1144{ 1144{
1145 int err = 0, i = 0; 1145 int err = 0, i = 0, j = 0;
1146 struct mlx4_ib_flow *mflow; 1146 struct mlx4_ib_flow *mflow;
1147 enum mlx4_net_trans_promisc_mode type[2]; 1147 enum mlx4_net_trans_promisc_mode type[2];
1148 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1149 int is_bonded = mlx4_is_bonded(dev);
1148 1150
1149 memset(type, 0, sizeof(type)); 1151 memset(type, 0, sizeof(type));
1150 1152
@@ -1179,26 +1181,55 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1179 1181
1180 while (i < ARRAY_SIZE(type) && type[i]) { 1182 while (i < ARRAY_SIZE(type) && type[i]) {
1181 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1183 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1182 &mflow->reg_id[i]); 1184 &mflow->reg_id[i].id);
1183 if (err) 1185 if (err)
1184 goto err_create_flow; 1186 goto err_create_flow;
1185 i++; 1187 i++;
1188 if (is_bonded) {
1189 flow_attr->port = 2;
1190 err = __mlx4_ib_create_flow(qp, flow_attr,
1191 domain, type[j],
1192 &mflow->reg_id[j].mirror);
1193 flow_attr->port = 1;
1194 if (err)
1195 goto err_create_flow;
1196 j++;
1197 }
1198
1186 } 1199 }
1187 1200
1188 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1201 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1189 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); 1202 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1203 &mflow->reg_id[i].id);
1190 if (err) 1204 if (err)
1191 goto err_create_flow; 1205 goto err_create_flow;
1192 i++; 1206 i++;
1207 if (is_bonded) {
1208 flow_attr->port = 2;
1209 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1210 &mflow->reg_id[j].mirror);
1211 flow_attr->port = 1;
1212 if (err)
1213 goto err_create_flow;
1214 j++;
1215 }
1216 /* function to create mirror rule */
1193 } 1217 }
1194 1218
1195 return &mflow->ibflow; 1219 return &mflow->ibflow;
1196 1220
1197err_create_flow: 1221err_create_flow:
1198 while (i) { 1222 while (i) {
1199 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); 1223 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1224 mflow->reg_id[i].id);
1200 i--; 1225 i--;
1201 } 1226 }
1227
1228 while (j) {
1229 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1230 mflow->reg_id[j].mirror);
1231 j--;
1232 }
1202err_free: 1233err_free:
1203 kfree(mflow); 1234 kfree(mflow);
1204 return ERR_PTR(err); 1235 return ERR_PTR(err);
@@ -1211,10 +1242,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1211 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); 1242 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1212 struct mlx4_ib_flow *mflow = to_mflow(flow_id); 1243 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1213 1244
1214 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { 1245 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1215 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); 1246 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1216 if (err) 1247 if (err)
1217 ret = err; 1248 ret = err;
1249 if (mflow->reg_id[i].mirror) {
1250 err = __mlx4_ib_destroy_flow(mdev->dev,
1251 mflow->reg_id[i].mirror);
1252 if (err)
1253 ret = err;
1254 }
1218 i++; 1255 i++;
1219 } 1256 }
1220 1257
@@ -1226,11 +1263,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1226{ 1263{
1227 int err; 1264 int err;
1228 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1265 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1266 struct mlx4_dev *dev = mdev->dev;
1229 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1267 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1230 u64 reg_id;
1231 struct mlx4_ib_steering *ib_steering = NULL; 1268 struct mlx4_ib_steering *ib_steering = NULL;
1232 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1269 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1233 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1270 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1271 struct mlx4_flow_reg_id reg_id;
1234 1272
1235 if (mdev->dev->caps.steering_mode == 1273 if (mdev->dev->caps.steering_mode ==
1236 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1274 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1242,10 +1280,20 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1242 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1280 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1243 !!(mqp->flags & 1281 !!(mqp->flags &
1244 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1282 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1245 prot, &reg_id); 1283 prot, &reg_id.id);
1246 if (err) 1284 if (err)
1247 goto err_malloc; 1285 goto err_malloc;
1248 1286
1287 reg_id.mirror = 0;
1288 if (mlx4_is_bonded(dev)) {
1289 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2,
1290 !!(mqp->flags &
1291 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1292 prot, &reg_id.mirror);
1293 if (err)
1294 goto err_add;
1295 }
1296
1249 err = add_gid_entry(ibqp, gid); 1297 err = add_gid_entry(ibqp, gid);
1250 if (err) 1298 if (err)
1251 goto err_add; 1299 goto err_add;
@@ -1261,7 +1309,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1261 1309
1262err_add: 1310err_add:
1263 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1311 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1264 prot, reg_id); 1312 prot, reg_id.id);
1313 if (reg_id.mirror)
1314 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1315 prot, reg_id.mirror);
1265err_malloc: 1316err_malloc:
1266 kfree(ib_steering); 1317 kfree(ib_steering);
1267 1318
@@ -1288,10 +1339,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1288{ 1339{
1289 int err; 1340 int err;
1290 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1341 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1342 struct mlx4_dev *dev = mdev->dev;
1291 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1343 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1292 struct net_device *ndev; 1344 struct net_device *ndev;
1293 struct mlx4_ib_gid_entry *ge; 1345 struct mlx4_ib_gid_entry *ge;
1294 u64 reg_id = 0; 1346 struct mlx4_flow_reg_id reg_id = {0, 0};
1347
1295 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1348 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1296 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1349 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1297 1350
@@ -1316,10 +1369,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1316 } 1369 }
1317 1370
1318 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1371 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1319 prot, reg_id); 1372 prot, reg_id.id);
1320 if (err) 1373 if (err)
1321 return err; 1374 return err;
1322 1375
1376 if (mlx4_is_bonded(dev)) {
1377 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1378 prot, reg_id.mirror);
1379 if (err)
1380 return err;
1381 }
1382
1323 mutex_lock(&mqp->mutex); 1383 mutex_lock(&mqp->mutex);
1324 ge = find_gid_entry(mqp, gid->raw); 1384 ge = find_gid_entry(mqp, gid->raw);
1325 if (ge) { 1385 if (ge) {
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6eb743f65f6f..2b49f9de2556 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -134,10 +134,17 @@ struct mlx4_ib_fmr {
134 struct mlx4_fmr mfmr; 134 struct mlx4_fmr mfmr;
135}; 135};
136 136
137#define MAX_REGS_PER_FLOW 2
138
139struct mlx4_flow_reg_id {
140 u64 id;
141 u64 mirror;
142};
143
137struct mlx4_ib_flow { 144struct mlx4_ib_flow {
138 struct ib_flow ibflow; 145 struct ib_flow ibflow;
139 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ 146 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
140 u64 reg_id[2]; 147 struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
141}; 148};
142 149
143struct mlx4_ib_wq { 150struct mlx4_ib_wq {