summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVarun Prakash <varun@chelsio.com>2016-12-01 09:58:29 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2016-12-14 15:09:13 -0500
commit44830d8fd28a729729d14bb160341a6170631eb7 (patch)
tree1c103c0e31d7454671542ded8c25800af002ec40
parent586be7cb694fdbb3a35cc35c03387ce0fc534572 (diff)
scsi: cxgb4i: libcxgbi: cxgb4: add T6 iSCSI completion feature
T6 adapters reduce number of completions to host by generating single completion for all the directly placed(DDP) iSCSI pdus in a sequence. This patch adds new structure for completion hw cmd (struct cpl_rx_iscsi_cmp) and implements T6 completion feature. Signed-off-by: Varun Prakash <varun@chelsio.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h13
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c219
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c19
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
4 files changed, 226 insertions, 26 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fba3b2ad382d..a267173f5997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -76,6 +76,7 @@ enum {
76 CPL_PASS_ESTABLISH = 0x41, 76 CPL_PASS_ESTABLISH = 0x41,
77 CPL_RX_DATA_DDP = 0x42, 77 CPL_RX_DATA_DDP = 0x42,
78 CPL_PASS_ACCEPT_REQ = 0x44, 78 CPL_PASS_ACCEPT_REQ = 0x44,
79 CPL_RX_ISCSI_CMP = 0x45,
79 CPL_TRACE_PKT_T5 = 0x48, 80 CPL_TRACE_PKT_T5 = 0x48,
80 CPL_RX_ISCSI_DDP = 0x49, 81 CPL_RX_ISCSI_DDP = 0x49,
81 82
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
934 __u8 status; 935 __u8 status;
935}; 936};
936 937
938struct cpl_rx_iscsi_cmp {
939 union opcode_tid ot;
940 __be16 pdu_len_ddp;
941 __be16 len;
942 __be32 seq;
943 __be16 urg;
944 __u8 rsvd;
945 __u8 status;
946 __be32 ulp_crc;
947 __be32 ddpvld;
948};
949
937struct cpl_tx_data_iso { 950struct cpl_tx_data_iso {
938 __be32 op_to_scsi; 951 __be32 op_to_scsi;
939 __u8 reserved1; 952 __u8 reserved1;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 01a2f2f315f8..57401b58efce 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1232,6 +1232,101 @@ rel_skb:
1232 __kfree_skb(skb); 1232 __kfree_skb(skb);
1233} 1233}
1234 1234
1235static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1236{
1237 struct cxgbi_sock *csk;
1238 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1239 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1240 struct tid_info *t = lldi->tids;
1241 struct sk_buff *lskb;
1242 u32 tid = GET_TID(cpl);
1243 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1244
1245 csk = lookup_tid(t, tid);
1246 if (unlikely(!csk)) {
1247 pr_err("can't find conn. for tid %u.\n", tid);
1248 goto rel_skb;
1249 }
1250
1251 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1252 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1253 csk, csk->state, csk->flags, csk->tid, skb,
1254 skb->len, pdu_len_ddp);
1255
1256 spin_lock_bh(&csk->lock);
1257
1258 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1259 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1260 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1261 csk, csk->state, csk->flags, csk->tid);
1262
1263 if (csk->state != CTP_ABORTING)
1264 goto abort_conn;
1265 else
1266 goto discard;
1267 }
1268
1269 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
1270 cxgbi_skcb_flags(skb) = 0;
1271
1272 skb_reset_transport_header(skb);
1273 __skb_pull(skb, sizeof(*cpl));
1274 __pskb_trim(skb, ntohs(cpl->len));
1275
1276 if (!csk->skb_ulp_lhdr)
1277 csk->skb_ulp_lhdr = skb;
1278
1279 lskb = csk->skb_ulp_lhdr;
1280 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1281
1282 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1283 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1284 csk, csk->state, csk->flags, skb, lskb);
1285
1286 __skb_queue_tail(&csk->receive_queue, skb);
1287 spin_unlock_bh(&csk->lock);
1288 return;
1289
1290abort_conn:
1291 send_abort_req(csk);
1292discard:
1293 spin_unlock_bh(&csk->lock);
1294rel_skb:
1295 __kfree_skb(skb);
1296}
1297
1298static void
1299cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1300 struct sk_buff *skb, u32 ddpvld)
1301{
1302 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1303 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1304 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1305 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
1306 }
1307
1308 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1309 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1310 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1311 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
1312 }
1313
1314 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1315 log_debug(1 << CXGBI_DBG_PDU_RX,
1316 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1317 csk, skb, ddpvld);
1318 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
1319 }
1320
1321 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1322 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1323 log_debug(1 << CXGBI_DBG_PDU_RX,
1324 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1325 csk, skb, ddpvld);
1326 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
1327 }
1328}
1329
1235static void do_rx_data_ddp(struct cxgbi_device *cdev, 1330static void do_rx_data_ddp(struct cxgbi_device *cdev,
1236 struct sk_buff *skb) 1331 struct sk_buff *skb)
1237{ 1332{
@@ -1241,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
1241 unsigned int tid = GET_TID(rpl); 1336 unsigned int tid = GET_TID(rpl);
1242 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1337 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1243 struct tid_info *t = lldi->tids; 1338 struct tid_info *t = lldi->tids;
1244 unsigned int status = ntohl(rpl->ddpvld); 1339 u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1245 1340
1246 csk = lookup_tid(t, tid); 1341 csk = lookup_tid(t, tid);
1247 if (unlikely(!csk)) { 1342 if (unlikely(!csk)) {
@@ -1251,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
1251 1346
1252 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1347 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1253 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1348 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1254 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); 1349 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1255 1350
1256 spin_lock_bh(&csk->lock); 1351 spin_lock_bh(&csk->lock);
1257 1352
@@ -1279,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
1279 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1374 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1280 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1375 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1281 1376
1282 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1377 cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1283 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1378
1284 csk, lskb, status, cxgbi_skcb_flags(lskb));
1285 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1286 }
1287 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1288 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1289 csk, lskb, status, cxgbi_skcb_flags(lskb));
1290 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1291 }
1292 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1293 log_debug(1 << CXGBI_DBG_PDU_RX,
1294 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1295 csk, lskb, status);
1296 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1297 }
1298 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1299 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1300 log_debug(1 << CXGBI_DBG_PDU_RX,
1301 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1302 csk, lskb, status);
1303 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1304 }
1305 log_debug(1 << CXGBI_DBG_PDU_RX, 1379 log_debug(1 << CXGBI_DBG_PDU_RX,
1306 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1380 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1307 csk, lskb, cxgbi_skcb_flags(lskb)); 1381 csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1319,6 +1393,98 @@ rel_skb:
1319 __kfree_skb(skb); 1393 __kfree_skb(skb);
1320} 1394}
1321 1395
1396static void
1397do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1398{
1399 struct cxgbi_sock *csk;
1400 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
1401 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1402 struct tid_info *t = lldi->tids;
1403 struct sk_buff *data_skb = NULL;
1404 u32 tid = GET_TID(rpl);
1405 u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1406 u32 seq = be32_to_cpu(rpl->seq);
1407 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
1408
1409 csk = lookup_tid(t, tid);
1410 if (unlikely(!csk)) {
1411 pr_err("can't find connection for tid %u.\n", tid);
1412 goto rel_skb;
1413 }
1414
1415 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1416 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1417 "pdu_len_ddp %u, status %u.\n",
1418 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1419 ntohs(rpl->len), pdu_len_ddp, rpl->status);
1420
1421 spin_lock_bh(&csk->lock);
1422
1423 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1424 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1425 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1426 csk, csk->state, csk->flags, csk->tid);
1427
1428 if (csk->state != CTP_ABORTING)
1429 goto abort_conn;
1430 else
1431 goto discard;
1432 }
1433
1434 cxgbi_skcb_tcp_seq(skb) = seq;
1435 cxgbi_skcb_flags(skb) = 0;
1436 cxgbi_skcb_rx_pdulen(skb) = 0;
1437
1438 skb_reset_transport_header(skb);
1439 __skb_pull(skb, sizeof(*rpl));
1440 __pskb_trim(skb, be16_to_cpu(rpl->len));
1441
1442 csk->rcv_nxt = seq + pdu_len_ddp;
1443
1444 if (csk->skb_ulp_lhdr) {
1445 data_skb = skb_peek(&csk->receive_queue);
1446 if (!data_skb ||
1447 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
1448 pr_err("Error! freelist data not found 0x%p, tid %u\n",
1449 data_skb, tid);
1450
1451 goto abort_conn;
1452 }
1453 __skb_unlink(data_skb, &csk->receive_queue);
1454
1455 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
1456
1457 __skb_queue_tail(&csk->receive_queue, skb);
1458 __skb_queue_tail(&csk->receive_queue, data_skb);
1459 } else {
1460 __skb_queue_tail(&csk->receive_queue, skb);
1461 }
1462
1463 csk->skb_ulp_lhdr = NULL;
1464
1465 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1466 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
1467 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
1468 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
1469
1470 cxgb4i_process_ddpvld(csk, skb, ddpvld);
1471
1472 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1473 csk, skb, cxgbi_skcb_flags(skb));
1474
1475 cxgbi_conn_pdu_ready(csk);
1476 spin_unlock_bh(&csk->lock);
1477
1478 return;
1479
1480abort_conn:
1481 send_abort_req(csk);
1482discard:
1483 spin_unlock_bh(&csk->lock);
1484rel_skb:
1485 __kfree_skb(skb);
1486}
1487
1322static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1488static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1323{ 1489{
1324 struct cxgbi_sock *csk; 1490 struct cxgbi_sock *csk;
@@ -1582,10 +1748,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1582 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1748 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1583 [CPL_FW4_ACK] = do_fw4_ack, 1749 [CPL_FW4_ACK] = do_fw4_ack,
1584 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1750 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1585 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, 1751 [CPL_ISCSI_DATA] = do_rx_iscsi_data,
1586 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1752 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1587 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1753 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1588 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1754 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1755 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
1589 [CPL_RX_DATA] = do_rx_data, 1756 [CPL_RX_DATA] = do_rx_data,
1590}; 1757};
1591 1758
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 542337889224..eb4af124d5cd 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1574,6 +1574,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1574 return -EIO; 1574 return -EIO;
1575 } 1575 }
1576 1576
1577 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
1578 cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
1579 /* If completion flag is set and data is directly
1580 * placed in to the host memory then update
1581 * task->exp_datasn to the datasn in completion
1582 * iSCSI hdr as T6 adapter generates completion only
1583 * for the last pdu of a sequence.
1584 */
1585 itt_t itt = ((struct iscsi_data *)skb->data)->itt;
1586 struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
1587 u32 data_sn = be32_to_cpu(((struct iscsi_data *)
1588 skb->data)->datasn);
1589 if (task && task->sc) {
1590 struct iscsi_tcp_task *tcp_task = task->dd_data;
1591
1592 tcp_task->exp_datasn = data_sn;
1593 }
1594 }
1595
1577 return read_pdu_skb(conn, skb, 0, 0); 1596 return read_pdu_skb(conn, skb, 0, 0);
1578} 1597}
1579 1598
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index e7802738f5d2..85bae613d860 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
207 SKCBF_RX_HDR, /* received pdu header */ 207 SKCBF_RX_HDR, /* received pdu header */
208 SKCBF_RX_DATA, /* received pdu payload */ 208 SKCBF_RX_DATA, /* received pdu payload */
209 SKCBF_RX_STATUS, /* received ddp status */ 209 SKCBF_RX_STATUS, /* received ddp status */
210 SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
210 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ 211 SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
211 SKCBF_RX_HCRC_ERR, /* header digest error */ 212 SKCBF_RX_HCRC_ERR, /* header digest error */
212 SKCBF_RX_DCRC_ERR, /* data digest error */ 213 SKCBF_RX_DCRC_ERR, /* data digest error */