aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRam Amrani <Ram.Amrani@caviumnetworks.com>2016-10-01 14:59:59 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-03 23:22:47 -0400
commitf109394033521862f2558df93d9afc4dfa829c6a (patch)
tree4c98136fdf25c4beb46face0f703fcfd18c88911 /drivers/net
parentc295f86e60f5ba67f0f4bba2bb2c22b3cbf01ec1 (diff)
qed: Add support for QP verbs
Add support for the slowpath configurations of Queue Pair verbs which adds, deletes, modifies and queries Queue Pairs. Signed-off-by: Ram Amrani <Ram.Amrani@caviumnetworks.com> Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1197
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h71
3 files changed, 1269 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index d00ad055802b..2b8bdaa77800 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -176,6 +176,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
176 enum protocol_type type); 176 enum protocol_type type);
177u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, 177u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
178 enum protocol_type type); 178 enum protocol_type type);
179int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
179 180
180#define QED_CTX_WORKING_MEM 0 181#define QED_CTX_WORKING_MEM 0
181#define QED_CTX_FL_MEM 1 182#define QED_CTX_FL_MEM 1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f9551643428f..438a0badc4e1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -1107,6 +1107,1199 @@ err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1107 return rc; 1107 return rc;
1108} 1108}
1109 1109
1110static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1111{
1112 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1113 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1114 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1115}
1116
1117static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1118 __le32 *dst_gid)
1119{
1120 u32 i;
1121
1122 if (qp->roce_mode == ROCE_V2_IPV4) {
1123 /* The IPv4 addresses shall be aligned to the highest word.
1124 * The lower words must be zero.
1125 */
1126 memset(src_gid, 0, sizeof(union qed_gid));
1127 memset(dst_gid, 0, sizeof(union qed_gid));
1128 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1129 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1130 } else {
1131 /* GIDs and IPv6 addresses coincide in location and size */
1132 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1133 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1134 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1135 }
1136 }
1137}
1138
1139static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1140{
1141 enum roce_flavor flavor;
1142
1143 switch (roce_mode) {
1144 case ROCE_V1:
1145 flavor = PLAIN_ROCE;
1146 break;
1147 case ROCE_V2_IPV4:
1148 flavor = RROCE_IPV4;
1149 break;
1150 case ROCE_V2_IPV6:
1151 flavor = ROCE_V2_IPV6;
1152 break;
1153 default:
1154 flavor = MAX_ROCE_MODE;
1155 break;
1156 }
1157 return flavor;
1158}
1159
1160int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1161{
1162 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1163 u32 responder_icid;
1164 u32 requester_icid;
1165 int rc;
1166
1167 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1168 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1169 &responder_icid);
1170 if (rc) {
1171 spin_unlock_bh(&p_rdma_info->lock);
1172 return rc;
1173 }
1174
1175 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1176 &requester_icid);
1177
1178 spin_unlock_bh(&p_rdma_info->lock);
1179 if (rc)
1180 goto err;
1181
1182 /* the two icid's should be adjacent */
1183 if ((requester_icid - responder_icid) != 1) {
1184 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1185 rc = -EINVAL;
1186 goto err;
1187 }
1188
1189 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1190 p_rdma_info->proto);
1191 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1192 p_rdma_info->proto);
1193
1194 /* If these icids require a new ILT line allocate DMA-able context for
1195 * an ILT page
1196 */
1197 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1198 if (rc)
1199 goto err;
1200
1201 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1202 if (rc)
1203 goto err;
1204
1205 *cid = (u16)responder_icid;
1206 return rc;
1207
1208err:
1209 spin_lock_bh(&p_rdma_info->lock);
1210 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1211 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1212
1213 spin_unlock_bh(&p_rdma_info->lock);
1214 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1215 "Allocate CID - failed, rc = %d\n", rc);
1216 return rc;
1217}
1218
1219static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1220 struct qed_rdma_qp *qp)
1221{
1222 struct roce_create_qp_resp_ramrod_data *p_ramrod;
1223 struct qed_sp_init_data init_data;
1224 union qed_qm_pq_params qm_params;
1225 enum roce_flavor roce_flavor;
1226 struct qed_spq_entry *p_ent;
1227 u16 physical_queue0 = 0;
1228 int rc;
1229
1230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1231
1232 /* Allocate DMA-able memory for IRQ */
1233 qp->irq_num_pages = 1;
1234 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1235 RDMA_RING_PAGE_SIZE,
1236 &qp->irq_phys_addr, GFP_KERNEL);
1237 if (!qp->irq) {
1238 rc = -ENOMEM;
1239 DP_NOTICE(p_hwfn,
1240 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1241 rc);
1242 return rc;
1243 }
1244
1245 /* Get SPQ entry */
1246 memset(&init_data, 0, sizeof(init_data));
1247 init_data.cid = qp->icid;
1248 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1249 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1250
1251 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1252 PROTOCOLID_ROCE, &init_data);
1253 if (rc)
1254 goto err;
1255
1256 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1257
1258 p_ramrod->flags = 0;
1259
1260 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1261 SET_FIELD(p_ramrod->flags,
1262 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1263
1264 SET_FIELD(p_ramrod->flags,
1265 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1266 qp->incoming_rdma_read_en);
1267
1268 SET_FIELD(p_ramrod->flags,
1269 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1270 qp->incoming_rdma_write_en);
1271
1272 SET_FIELD(p_ramrod->flags,
1273 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1274 qp->incoming_atomic_en);
1275
1276 SET_FIELD(p_ramrod->flags,
1277 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1278 qp->e2e_flow_control_en);
1279
1280 SET_FIELD(p_ramrod->flags,
1281 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1282
1283 SET_FIELD(p_ramrod->flags,
1284 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1285 qp->fmr_and_reserved_lkey);
1286
1287 SET_FIELD(p_ramrod->flags,
1288 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1289 qp->min_rnr_nak_timer);
1290
1291 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1292 p_ramrod->traffic_class = qp->traffic_class_tos;
1293 p_ramrod->hop_limit = qp->hop_limit_ttl;
1294 p_ramrod->irq_num_pages = qp->irq_num_pages;
1295 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1296 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1297 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1298 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1299 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1300 p_ramrod->pd = cpu_to_le16(qp->pd);
1301 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1302 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1303 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1304 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1305 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1306 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1307 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1308 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1309 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1310 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1311 qp->rq_cq_id);
1312
1313 memset(&qm_params, 0, sizeof(qm_params));
1314 qm_params.roce.qpid = qp->icid >> 1;
1315 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1316
1317 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1318 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1319
1320 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1321 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1322
1323 p_ramrod->udp_src_port = qp->udp_src_port;
1324 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1325 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1326 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1327
1328 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1329 qp->stats_queue;
1330
1331 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1332
1333 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
1334 rc, physical_queue0);
1335
1336 if (rc)
1337 goto err;
1338
1339 qp->resp_offloaded = true;
1340
1341 return rc;
1342
1343err:
1344 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1345 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1346 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1347 qp->irq, qp->irq_phys_addr);
1348
1349 return rc;
1350}
1351
1352static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1353 struct qed_rdma_qp *qp)
1354{
1355 struct roce_create_qp_req_ramrod_data *p_ramrod;
1356 struct qed_sp_init_data init_data;
1357 union qed_qm_pq_params qm_params;
1358 enum roce_flavor roce_flavor;
1359 struct qed_spq_entry *p_ent;
1360 u16 physical_queue0 = 0;
1361 int rc;
1362
1363 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1364
1365 /* Allocate DMA-able memory for ORQ */
1366 qp->orq_num_pages = 1;
1367 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1368 RDMA_RING_PAGE_SIZE,
1369 &qp->orq_phys_addr, GFP_KERNEL);
1370 if (!qp->orq) {
1371 rc = -ENOMEM;
1372 DP_NOTICE(p_hwfn,
1373 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1374 rc);
1375 return rc;
1376 }
1377
1378 /* Get SPQ entry */
1379 memset(&init_data, 0, sizeof(init_data));
1380 init_data.cid = qp->icid + 1;
1381 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1382 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1383
1384 rc = qed_sp_init_request(p_hwfn, &p_ent,
1385 ROCE_RAMROD_CREATE_QP,
1386 PROTOCOLID_ROCE, &init_data);
1387 if (rc)
1388 goto err;
1389
1390 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
1391
1392 p_ramrod->flags = 0;
1393
1394 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1395 SET_FIELD(p_ramrod->flags,
1396 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1397
1398 SET_FIELD(p_ramrod->flags,
1399 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1400 qp->fmr_and_reserved_lkey);
1401
1402 SET_FIELD(p_ramrod->flags,
1403 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1404
1405 SET_FIELD(p_ramrod->flags,
1406 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1407
1408 SET_FIELD(p_ramrod->flags,
1409 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1410 qp->rnr_retry_cnt);
1411
1412 p_ramrod->max_ord = qp->max_rd_atomic_req;
1413 p_ramrod->traffic_class = qp->traffic_class_tos;
1414 p_ramrod->hop_limit = qp->hop_limit_ttl;
1415 p_ramrod->orq_num_pages = qp->orq_num_pages;
1416 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1417 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1418 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1419 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1420 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1421 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1422 p_ramrod->pd = cpu_to_le16(qp->pd);
1423 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1424 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1425 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1426 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1427 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1428 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1429 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1430 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1431 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1432 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1433 qp->sq_cq_id);
1434
1435 memset(&qm_params, 0, sizeof(qm_params));
1436 qm_params.roce.qpid = qp->icid >> 1;
1437 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1438
1439 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1440 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1441
1442 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1443 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1444
1445 p_ramrod->udp_src_port = qp->udp_src_port;
1446 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1447 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1448 qp->stats_queue;
1449
1450 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1451
1452 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1453
1454 if (rc)
1455 goto err;
1456
1457 qp->req_offloaded = true;
1458
1459 return rc;
1460
1461err:
1462 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1463 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1464 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1465 qp->orq, qp->orq_phys_addr);
1466 return rc;
1467}
1468
1469static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1470 struct qed_rdma_qp *qp,
1471 bool move_to_err, u32 modify_flags)
1472{
1473 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1474 struct qed_sp_init_data init_data;
1475 struct qed_spq_entry *p_ent;
1476 int rc;
1477
1478 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1479
1480 if (move_to_err && !qp->resp_offloaded)
1481 return 0;
1482
1483 /* Get SPQ entry */
1484 memset(&init_data, 0, sizeof(init_data));
1485 init_data.cid = qp->icid;
1486 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1487 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1488
1489 rc = qed_sp_init_request(p_hwfn, &p_ent,
1490 ROCE_EVENT_MODIFY_QP,
1491 PROTOCOLID_ROCE, &init_data);
1492 if (rc) {
1493 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1494 return rc;
1495 }
1496
1497 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1498
1499 p_ramrod->flags = 0;
1500
1501 SET_FIELD(p_ramrod->flags,
1502 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1503
1504 SET_FIELD(p_ramrod->flags,
1505 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1506 qp->incoming_rdma_read_en);
1507
1508 SET_FIELD(p_ramrod->flags,
1509 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1510 qp->incoming_rdma_write_en);
1511
1512 SET_FIELD(p_ramrod->flags,
1513 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1514 qp->incoming_atomic_en);
1515
1516 SET_FIELD(p_ramrod->flags,
1517 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1518 qp->e2e_flow_control_en);
1519
1520 SET_FIELD(p_ramrod->flags,
1521 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1522 GET_FIELD(modify_flags,
1523 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1524
1525 SET_FIELD(p_ramrod->flags,
1526 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1527 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1528
1529 SET_FIELD(p_ramrod->flags,
1530 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1531 GET_FIELD(modify_flags,
1532 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1533
1534 SET_FIELD(p_ramrod->flags,
1535 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1536 GET_FIELD(modify_flags,
1537 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1538
1539 SET_FIELD(p_ramrod->flags,
1540 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1541 GET_FIELD(modify_flags,
1542 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1543
1544 p_ramrod->fields = 0;
1545 SET_FIELD(p_ramrod->fields,
1546 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1547 qp->min_rnr_nak_timer);
1548
1549 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1550 p_ramrod->traffic_class = qp->traffic_class_tos;
1551 p_ramrod->hop_limit = qp->hop_limit_ttl;
1552 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1553 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1554 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1555 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1556 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1557
1558 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1559 return rc;
1560}
1561
1562static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1563 struct qed_rdma_qp *qp,
1564 bool move_to_sqd,
1565 bool move_to_err, u32 modify_flags)
1566{
1567 struct roce_modify_qp_req_ramrod_data *p_ramrod;
1568 struct qed_sp_init_data init_data;
1569 struct qed_spq_entry *p_ent;
1570 int rc;
1571
1572 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1573
1574 if (move_to_err && !(qp->req_offloaded))
1575 return 0;
1576
1577 /* Get SPQ entry */
1578 memset(&init_data, 0, sizeof(init_data));
1579 init_data.cid = qp->icid + 1;
1580 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1581 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1582
1583 rc = qed_sp_init_request(p_hwfn, &p_ent,
1584 ROCE_EVENT_MODIFY_QP,
1585 PROTOCOLID_ROCE, &init_data);
1586 if (rc) {
1587 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1588 return rc;
1589 }
1590
1591 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1592
1593 p_ramrod->flags = 0;
1594
1595 SET_FIELD(p_ramrod->flags,
1596 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1597
1598 SET_FIELD(p_ramrod->flags,
1599 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1600
1601 SET_FIELD(p_ramrod->flags,
1602 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1603 qp->sqd_async);
1604
1605 SET_FIELD(p_ramrod->flags,
1606 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1607 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1608
1609 SET_FIELD(p_ramrod->flags,
1610 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1611 GET_FIELD(modify_flags,
1612 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1613
1614 SET_FIELD(p_ramrod->flags,
1615 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1616 GET_FIELD(modify_flags,
1617 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1618
1619 SET_FIELD(p_ramrod->flags,
1620 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1621 GET_FIELD(modify_flags,
1622 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1623
1624 SET_FIELD(p_ramrod->flags,
1625 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1626 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1627
1628 SET_FIELD(p_ramrod->flags,
1629 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1630 GET_FIELD(modify_flags,
1631 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1632
1633 p_ramrod->fields = 0;
1634 SET_FIELD(p_ramrod->fields,
1635 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1636
1637 SET_FIELD(p_ramrod->fields,
1638 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1639 qp->rnr_retry_cnt);
1640
1641 p_ramrod->max_ord = qp->max_rd_atomic_req;
1642 p_ramrod->traffic_class = qp->traffic_class_tos;
1643 p_ramrod->hop_limit = qp->hop_limit_ttl;
1644 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1645 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1646 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1647 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1648 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1649 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1650
1651 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1652 return rc;
1653}
1654
1655static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1656 struct qed_rdma_qp *qp,
1657 u32 *num_invalidated_mw)
1658{
1659 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1660 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1661 struct qed_sp_init_data init_data;
1662 struct qed_spq_entry *p_ent;
1663 dma_addr_t ramrod_res_phys;
1664 int rc;
1665
1666 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1667
1668 if (!qp->resp_offloaded)
1669 return 0;
1670
1671 /* Get SPQ entry */
1672 memset(&init_data, 0, sizeof(init_data));
1673 init_data.cid = qp->icid;
1674 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1675 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1676
1677 rc = qed_sp_init_request(p_hwfn, &p_ent,
1678 ROCE_RAMROD_DESTROY_QP,
1679 PROTOCOLID_ROCE, &init_data);
1680 if (rc)
1681 return rc;
1682
1683 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1684
1685 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1686 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1687 &ramrod_res_phys, GFP_KERNEL);
1688
1689 if (!p_ramrod_res) {
1690 rc = -ENOMEM;
1691 DP_NOTICE(p_hwfn,
1692 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1693 rc);
1694 return rc;
1695 }
1696
1697 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1698
1699 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1700 if (rc)
1701 goto err;
1702
1703 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
1704
1705 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1706 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1707 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1708 qp->irq, qp->irq_phys_addr);
1709
1710 qp->resp_offloaded = false;
1711
1712 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1713
1714err:
1715 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1716 sizeof(struct roce_destroy_qp_resp_output_params),
1717 p_ramrod_res, ramrod_res_phys);
1718
1719 return rc;
1720}
1721
1722static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1723 struct qed_rdma_qp *qp,
1724 u32 *num_bound_mw)
1725{
1726 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1727 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1728 struct qed_sp_init_data init_data;
1729 struct qed_spq_entry *p_ent;
1730 dma_addr_t ramrod_res_phys;
1731 int rc = -ENOMEM;
1732
1733 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1734
1735 if (!qp->req_offloaded)
1736 return 0;
1737
1738 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1739 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1740 sizeof(*p_ramrod_res),
1741 &ramrod_res_phys, GFP_KERNEL);
1742 if (!p_ramrod_res) {
1743 DP_NOTICE(p_hwfn,
1744 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1745 return rc;
1746 }
1747
1748 /* Get SPQ entry */
1749 memset(&init_data, 0, sizeof(init_data));
1750 init_data.cid = qp->icid + 1;
1751 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1752 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1753
1754 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1755 PROTOCOLID_ROCE, &init_data);
1756 if (rc)
1757 goto err;
1758
1759 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1760 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1761
1762 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1763 if (rc)
1764 goto err;
1765
1766 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1767
1768 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1769 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1770 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1771 qp->orq, qp->orq_phys_addr);
1772
1773 qp->req_offloaded = false;
1774
1775 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1776
1777err:
1778 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1779 p_ramrod_res, ramrod_res_phys);
1780
1781 return rc;
1782}
1783
1784int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1785 struct qed_rdma_qp *qp,
1786 struct qed_rdma_query_qp_out_params *out_params)
1787{
1788 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1789 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1790 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1791 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1792 struct qed_sp_init_data init_data;
1793 dma_addr_t resp_ramrod_res_phys;
1794 dma_addr_t req_ramrod_res_phys;
1795 struct qed_spq_entry *p_ent;
1796 bool rq_err_state;
1797 bool sq_err_state;
1798 bool sq_draining;
1799 int rc = -ENOMEM;
1800
1801 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1802 /* We can't send ramrod to the fw since this qp wasn't offloaded
1803 * to the fw yet
1804 */
1805 out_params->draining = false;
1806 out_params->rq_psn = qp->rq_psn;
1807 out_params->sq_psn = qp->sq_psn;
1808 out_params->state = qp->cur_state;
1809
1810 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1811 return 0;
1812 }
1813
1814 if (!(qp->resp_offloaded)) {
1815 DP_NOTICE(p_hwfn,
1816 "The responder's qp should be offloded before requester's\n");
1817 return -EINVAL;
1818 }
1819
1820 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1821 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1822 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1823 sizeof(*p_resp_ramrod_res),
1824 &resp_ramrod_res_phys, GFP_KERNEL);
1825 if (!p_resp_ramrod_res) {
1826 DP_NOTICE(p_hwfn,
1827 "qed query qp failed: cannot allocate memory (ramrod)\n");
1828 return rc;
1829 }
1830
1831 /* Get SPQ entry */
1832 memset(&init_data, 0, sizeof(init_data));
1833 init_data.cid = qp->icid;
1834 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1835 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1836 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1837 PROTOCOLID_ROCE, &init_data);
1838 if (rc)
1839 goto err_resp;
1840
1841 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1842 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1843
1844 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1845 if (rc)
1846 goto err_resp;
1847
1848 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1849 p_resp_ramrod_res, resp_ramrod_res_phys);
1850
1851 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1852 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1853 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1854
1855 if (!(qp->req_offloaded)) {
1856 /* Don't send query qp for the requester */
1857 out_params->sq_psn = qp->sq_psn;
1858 out_params->draining = false;
1859
1860 if (rq_err_state)
1861 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1862
1863 out_params->state = qp->cur_state;
1864
1865 return 0;
1866 }
1867
1868 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1869 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1870 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1871 sizeof(*p_req_ramrod_res),
1872 &req_ramrod_res_phys,
1873 GFP_KERNEL);
1874 if (!p_req_ramrod_res) {
1875 rc = -ENOMEM;
1876 DP_NOTICE(p_hwfn,
1877 "qed query qp failed: cannot allocate memory (ramrod)\n");
1878 return rc;
1879 }
1880
1881 /* Get SPQ entry */
1882 init_data.cid = qp->icid + 1;
1883 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1884 PROTOCOLID_ROCE, &init_data);
1885 if (rc)
1886 goto err_req;
1887
1888 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1889 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1890
1891 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1892 if (rc)
1893 goto err_req;
1894
1895 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1896 p_req_ramrod_res, req_ramrod_res_phys);
1897
1898 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1899 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1900 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1901 sq_draining =
1902 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1903 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1904
1905 out_params->draining = false;
1906
1907 if (rq_err_state)
1908 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1909 else if (sq_err_state)
1910 qp->cur_state = QED_ROCE_QP_STATE_SQE;
1911 else if (sq_draining)
1912 out_params->draining = true;
1913 out_params->state = qp->cur_state;
1914
1915 return 0;
1916
1917err_req:
1918 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1919 p_req_ramrod_res, req_ramrod_res_phys);
1920 return rc;
1921err_resp:
1922 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1923 p_resp_ramrod_res, resp_ramrod_res_phys);
1924 return rc;
1925}
1926
1927int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1928{
1929 u32 num_invalidated_mw = 0;
1930 u32 num_bound_mw = 0;
1931 u32 start_cid;
1932 int rc;
1933
1934 /* Destroys the specified QP */
1935 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
1936 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
1937 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
1938 DP_NOTICE(p_hwfn,
1939 "QP must be in error, reset or init state before destroying it\n");
1940 return -EINVAL;
1941 }
1942
1943 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
1944 if (rc)
1945 return rc;
1946
1947 /* Send destroy requester ramrod */
1948 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
1949 if (rc)
1950 return rc;
1951
1952 if (num_invalidated_mw != num_bound_mw) {
1953 DP_NOTICE(p_hwfn,
1954 "number of invalidate memory windows is different from bounded ones\n");
1955 return -EINVAL;
1956 }
1957
1958 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1959
1960 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1961 p_hwfn->p_rdma_info->proto);
1962
1963 /* Release responder's icid */
1964 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1965 qp->icid - start_cid);
1966
1967 /* Release requester's icid */
1968 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1969 qp->icid + 1 - start_cid);
1970
1971 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1972
1973 return 0;
1974}
1975
1976int qed_rdma_query_qp(void *rdma_cxt,
1977 struct qed_rdma_qp *qp,
1978 struct qed_rdma_query_qp_out_params *out_params)
1979{
1980 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1981 int rc;
1982
1983 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1984
1985 /* The following fields are filled in from qp and not FW as they can't
1986 * be modified by FW
1987 */
1988 out_params->mtu = qp->mtu;
1989 out_params->dest_qp = qp->dest_qp;
1990 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1991 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1992 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1993 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1994 out_params->dgid = qp->dgid;
1995 out_params->flow_label = qp->flow_label;
1996 out_params->hop_limit_ttl = qp->hop_limit_ttl;
1997 out_params->traffic_class_tos = qp->traffic_class_tos;
1998 out_params->timeout = qp->ack_timeout;
1999 out_params->rnr_retry = qp->rnr_retry_cnt;
2000 out_params->retry_cnt = qp->retry_cnt;
2001 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
2002 out_params->pkey_index = 0;
2003 out_params->max_rd_atomic = qp->max_rd_atomic_req;
2004 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
2005 out_params->sqd_async = qp->sqd_async;
2006
2007 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
2008
2009 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
2010 return rc;
2011}
2012
2013int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
2014{
2015 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2016 int rc = 0;
2017
2018 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2019
2020 rc = qed_roce_destroy_qp(p_hwfn, qp);
2021
2022 /* free qp params struct */
2023 kfree(qp);
2024
2025 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
2026 return rc;
2027}
2028
2029struct qed_rdma_qp *
2030qed_rdma_create_qp(void *rdma_cxt,
2031 struct qed_rdma_create_qp_in_params *in_params,
2032 struct qed_rdma_create_qp_out_params *out_params)
2033{
2034 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2035 struct qed_rdma_qp *qp;
2036 u8 max_stats_queues;
2037 int rc;
2038
2039 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
2040 DP_ERR(p_hwfn->cdev,
2041 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2042 rdma_cxt, in_params, out_params);
2043 return NULL;
2044 }
2045
2046 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2047 "qed rdma create qp called with qp_handle = %08x%08x\n",
2048 in_params->qp_handle_hi, in_params->qp_handle_lo);
2049
2050 /* Some sanity checks... */
2051 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2052 if (in_params->stats_queue >= max_stats_queues) {
2053 DP_ERR(p_hwfn->cdev,
2054 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
2055 in_params->stats_queue, max_stats_queues);
2056 return NULL;
2057 }
2058
2059 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2060 if (!qp) {
2061 DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
2062 return NULL;
2063 }
2064
2065 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
2066 qp->qpid = ((0xFF << 16) | qp->icid);
2067
2068 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
2069
2070 if (rc) {
2071 kfree(qp);
2072 return NULL;
2073 }
2074
2075 qp->cur_state = QED_ROCE_QP_STATE_RESET;
2076 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
2077 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
2078 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
2079 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
2080 qp->use_srq = in_params->use_srq;
2081 qp->signal_all = in_params->signal_all;
2082 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
2083 qp->pd = in_params->pd;
2084 qp->dpi = in_params->dpi;
2085 qp->sq_cq_id = in_params->sq_cq_id;
2086 qp->sq_num_pages = in_params->sq_num_pages;
2087 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2088 qp->rq_cq_id = in_params->rq_cq_id;
2089 qp->rq_num_pages = in_params->rq_num_pages;
2090 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2091 qp->srq_id = in_params->srq_id;
2092 qp->req_offloaded = false;
2093 qp->resp_offloaded = false;
2094 qp->e2e_flow_control_en = qp->use_srq ? false : true;
2095 qp->stats_queue = in_params->stats_queue;
2096
2097 out_params->icid = qp->icid;
2098 out_params->qp_id = qp->qpid;
2099
2100 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2101 return qp;
2102}
2103
2104static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2105 struct qed_rdma_qp *qp,
2106 enum qed_roce_qp_state prev_state,
2107 struct qed_rdma_modify_qp_in_params *params)
2108{
2109 u32 num_invalidated_mw = 0, num_bound_mw = 0;
2110 int rc = 0;
2111
2112 /* Perform additional operations according to the current state and the
2113 * next state
2114 */
2115 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2116 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2117 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2118 /* Init->RTR or Reset->RTR */
2119 rc = qed_roce_sp_create_responder(p_hwfn, qp);
2120 return rc;
2121 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2122 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2123 /* RTR-> RTS */
2124 rc = qed_roce_sp_create_requester(p_hwfn, qp);
2125 if (rc)
2126 return rc;
2127
2128 /* Send modify responder ramrod */
2129 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2130 params->modify_flags);
2131 return rc;
2132 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2133 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2134 /* RTS->RTS */
2135 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2136 params->modify_flags);
2137 if (rc)
2138 return rc;
2139
2140 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2141 params->modify_flags);
2142 return rc;
2143 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2144 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2145 /* RTS->SQD */
2146 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2147 params->modify_flags);
2148 return rc;
2149 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2150 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2151 /* SQD->SQD */
2152 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2153 params->modify_flags);
2154 if (rc)
2155 return rc;
2156
2157 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2158 params->modify_flags);
2159 return rc;
2160 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2161 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2162 /* SQD->RTS */
2163 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2164 params->modify_flags);
2165 if (rc)
2166 return rc;
2167
2168 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2169 params->modify_flags);
2170
2171 return rc;
2172 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
2173 qp->cur_state == QED_ROCE_QP_STATE_SQE) {
2174 /* ->ERR */
2175 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2176 params->modify_flags);
2177 if (rc)
2178 return rc;
2179
2180 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2181 params->modify_flags);
2182 return rc;
2183 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2184 /* Any state -> RESET */
2185
2186 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2187 &num_invalidated_mw);
2188 if (rc)
2189 return rc;
2190
2191 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2192 &num_bound_mw);
2193
2194 if (num_invalidated_mw != num_bound_mw) {
2195 DP_NOTICE(p_hwfn,
2196 "number of invalidate memory windows is different from bounded ones\n");
2197 return -EINVAL;
2198 }
2199 } else {
2200 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2201 }
2202
2203 return rc;
2204}
2205
2206int qed_rdma_modify_qp(void *rdma_cxt,
2207 struct qed_rdma_qp *qp,
2208 struct qed_rdma_modify_qp_in_params *params)
2209{
2210 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2211 enum qed_roce_qp_state prev_state;
2212 int rc = 0;
2213
2214 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2215 qp->icid, params->new_state);
2216
2217 if (rc) {
2218 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2219 return rc;
2220 }
2221
2222 if (GET_FIELD(params->modify_flags,
2223 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2224 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2225 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2226 qp->incoming_atomic_en = params->incoming_atomic_en;
2227 }
2228
2229 /* Update QP structure with the updated values */
2230 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2231 qp->roce_mode = params->roce_mode;
2232 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2233 qp->pkey = params->pkey;
2234 if (GET_FIELD(params->modify_flags,
2235 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2236 qp->e2e_flow_control_en = params->e2e_flow_control_en;
2237 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2238 qp->dest_qp = params->dest_qp;
2239 if (GET_FIELD(params->modify_flags,
2240 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2241 /* Indicates that the following parameters have changed:
2242 * Traffic class, flow label, hop limit, source GID,
2243 * destination GID, loopback indicator
2244 */
2245 qp->traffic_class_tos = params->traffic_class_tos;
2246 qp->flow_label = params->flow_label;
2247 qp->hop_limit_ttl = params->hop_limit_ttl;
2248
2249 qp->sgid = params->sgid;
2250 qp->dgid = params->dgid;
2251 qp->udp_src_port = 0;
2252 qp->vlan_id = params->vlan_id;
2253 qp->mtu = params->mtu;
2254 qp->lb_indication = params->lb_indication;
2255 memcpy((u8 *)&qp->remote_mac_addr[0],
2256 (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
2257 if (params->use_local_mac) {
2258 memcpy((u8 *)&qp->local_mac_addr[0],
2259 (u8 *)&params->local_mac_addr[0], ETH_ALEN);
2260 } else {
2261 memcpy((u8 *)&qp->local_mac_addr[0],
2262 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2263 }
2264 }
2265 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2266 qp->rq_psn = params->rq_psn;
2267 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2268 qp->sq_psn = params->sq_psn;
2269 if (GET_FIELD(params->modify_flags,
2270 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2271 qp->max_rd_atomic_req = params->max_rd_atomic_req;
2272 if (GET_FIELD(params->modify_flags,
2273 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2274 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2275 if (GET_FIELD(params->modify_flags,
2276 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2277 qp->ack_timeout = params->ack_timeout;
2278 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2279 qp->retry_cnt = params->retry_cnt;
2280 if (GET_FIELD(params->modify_flags,
2281 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2282 qp->rnr_retry_cnt = params->rnr_retry_cnt;
2283 if (GET_FIELD(params->modify_flags,
2284 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2285 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2286
2287 qp->sqd_async = params->sqd_async;
2288
2289 prev_state = qp->cur_state;
2290 if (GET_FIELD(params->modify_flags,
2291 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2292 qp->cur_state = params->new_state;
2293 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2294 qp->cur_state);
2295 }
2296
2297 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2298
2299 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2300 return rc;
2301}
2302
1110static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 2303static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1111{ 2304{
1112 return QED_LEADING_HWFN(cdev); 2305 return QED_LEADING_HWFN(cdev);
@@ -1201,6 +2394,10 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
1201 .rdma_dealloc_pd = &qed_rdma_free_pd, 2394 .rdma_dealloc_pd = &qed_rdma_free_pd,
1202 .rdma_create_cq = &qed_rdma_create_cq, 2395 .rdma_create_cq = &qed_rdma_create_cq,
1203 .rdma_destroy_cq = &qed_rdma_destroy_cq, 2396 .rdma_destroy_cq = &qed_rdma_destroy_cq,
2397 .rdma_create_qp = &qed_rdma_create_qp,
2398 .rdma_modify_qp = &qed_rdma_modify_qp,
2399 .rdma_query_qp = &qed_rdma_query_qp,
2400 .rdma_destroy_qp = &qed_rdma_destroy_qp,
1204}; 2401};
1205 2402
1206const struct qed_rdma_ops *qed_get_rdma_ops() 2403const struct qed_rdma_ops *qed_get_rdma_ops()
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 1fe73707e0b5..b8ddda456101 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -114,6 +114,72 @@ struct qed_rdma_resize_cnq_in_params {
114 u64 pbl_ptr; 114 u64 pbl_ptr;
115}; 115};
116 116
117struct qed_rdma_qp {
118 struct regpair qp_handle;
119 struct regpair qp_handle_async;
120 u32 qpid;
121 u16 icid;
122 enum qed_roce_qp_state cur_state;
123 bool use_srq;
124 bool signal_all;
125 bool fmr_and_reserved_lkey;
126
127 bool incoming_rdma_read_en;
128 bool incoming_rdma_write_en;
129 bool incoming_atomic_en;
130 bool e2e_flow_control_en;
131
132 u16 pd;
133 u16 pkey;
134 u32 dest_qp;
135 u16 mtu;
136 u16 srq_id;
137 u8 traffic_class_tos;
138 u8 hop_limit_ttl;
139 u16 dpi;
140 u32 flow_label;
141 bool lb_indication;
142 u16 vlan_id;
143 u32 ack_timeout;
144 u8 retry_cnt;
145 u8 rnr_retry_cnt;
146 u8 min_rnr_nak_timer;
147 bool sqd_async;
148 union qed_gid sgid;
149 union qed_gid dgid;
150 enum roce_mode roce_mode;
151 u16 udp_src_port;
152 u8 stats_queue;
153
154 /* requeseter */
155 u8 max_rd_atomic_req;
156 u32 sq_psn;
157 u16 sq_cq_id;
158 u16 sq_num_pages;
159 dma_addr_t sq_pbl_ptr;
160 void *orq;
161 dma_addr_t orq_phys_addr;
162 u8 orq_num_pages;
163 bool req_offloaded;
164
165 /* responder */
166 u8 max_rd_atomic_resp;
167 u32 rq_psn;
168 u16 rq_cq_id;
169 u16 rq_num_pages;
170 dma_addr_t rq_pbl_ptr;
171 void *irq;
172 dma_addr_t irq_phys_addr;
173 u8 irq_num_pages;
174 bool resp_offloaded;
175
176 u8 remote_mac_addr[6];
177 u8 local_mac_addr[6];
178
179 void *shared_queue;
180 dma_addr_t shared_queue_phys_addr;
181};
182
117int 183int
118qed_rdma_add_user(void *rdma_cxt, 184qed_rdma_add_user(void *rdma_cxt,
119 struct qed_rdma_add_user_out_params *out_params); 185 struct qed_rdma_add_user_out_params *out_params);
@@ -135,6 +201,11 @@ void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
135void qed_rdma_resc_free(struct qed_hwfn *p_hwfn); 201void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
136void qed_async_roce_event(struct qed_hwfn *p_hwfn, 202void qed_async_roce_event(struct qed_hwfn *p_hwfn,
137 struct event_ring_entry *p_eqe); 203 struct event_ring_entry *p_eqe);
204int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
205int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
206 struct qed_rdma_modify_qp_in_params *params);
207int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
208 struct qed_rdma_query_qp_out_params *out_params);
138 209
139#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 210#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
140void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 211void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);