aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAtul Gupta <atul.gupta@chelsio.com>2018-03-31 12:12:01 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-31 23:37:33 -0400
commitb647993fca1460937bd05f07c87a8234054a68f7 (patch)
tree259e2a85ed0760a763d39bf558ce53b034165d9d
parent36bedb3f2e5b81832b5895363ed3fedb9ff1e8d0 (diff)
crypto: chtls - Inline TLS record Rx
handler for record receive. plain text copied to user buffer Signed-off-by: Atul Gupta <atul.gupta@chelsio.com> Signed-off-by: Michael Werner <werner@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c602
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c1
2 files changed, 602 insertions, 1 deletions
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 6974d3ed0ff5..5a75be43950f 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -30,6 +30,11 @@ static bool is_tls_tx(struct chtls_sock *csk)
30 return csk->tlshws.txkey >= 0; 30 return csk->tlshws.txkey >= 0;
31} 31}
32 32
33static bool is_tls_rx(struct chtls_sock *csk)
34{
35 return csk->tlshws.rxkey >= 0;
36}
37
33static int data_sgl_len(const struct sk_buff *skb) 38static int data_sgl_len(const struct sk_buff *skb)
34{ 39{
35 unsigned int cnt; 40 unsigned int cnt;
@@ -106,10 +111,12 @@ static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
106{ 111{
107 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); 112 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
108 struct tcp_sock *tp = tcp_sk(sk); 113 struct tcp_sock *tp = tcp_sk(sk);
109 int flowclen16 = flowclen / 16;
110 struct sk_buff *skb; 114 struct sk_buff *skb;
115 int flowclen16;
111 int ret; 116 int ret;
112 117
118 flowclen16 = flowclen / 16;
119
113 if (csk_flag(sk, CSK_TX_DATA_SENT)) { 120 if (csk_flag(sk, CSK_TX_DATA_SENT)) {
114 skb = create_flowc_wr_skb(sk, flowc, flowclen); 121 skb = create_flowc_wr_skb(sk, flowc, flowclen);
115 if (!skb) 122 if (!skb)
@@ -1220,3 +1227,596 @@ out_err:
1220 copied = sk_stream_error(sk, flags, err); 1227 copied = sk_stream_error(sk, flags, err);
1221 goto done; 1228 goto done;
1222} 1229}
1230
1231static void chtls_select_window(struct sock *sk)
1232{
1233 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1234 struct tcp_sock *tp = tcp_sk(sk);
1235 unsigned int wnd = tp->rcv_wnd;
1236
1237 wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
1238 wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
1239
1240 if (wnd > MAX_RCV_WND)
1241 wnd = MAX_RCV_WND;
1242
1243/*
1244 * Check if we need to grow the receive window in response to an increase in
1245 * the socket's receive buffer size. Some applications increase the buffer
1246 * size dynamically and rely on the window to grow accordingly.
1247 */
1248
1249 if (wnd > tp->rcv_wnd) {
1250 tp->rcv_wup -= wnd - tp->rcv_wnd;
1251 tp->rcv_wnd = wnd;
1252 /* Mark the receive window as updated */
1253 csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
1254 }
1255}
1256
1257/*
1258 * Send RX credits through an RX_DATA_ACK CPL message. We are permitted
1259 * to return without sending the message in case we cannot allocate
1260 * an sk_buff. Returns the number of credits sent.
1261 */
1262static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
1263{
1264 struct cpl_rx_data_ack *req;
1265 struct sk_buff *skb;
1266
1267 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1268 if (!skb)
1269 return 0;
1270 __skb_put(skb, sizeof(*req));
1271 req = (struct cpl_rx_data_ack *)skb->head;
1272
1273 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
1274 INIT_TP_WR(req, csk->tid);
1275 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1276 csk->tid));
1277 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
1278 RX_FORCE_ACK_F);
1279 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1280 return credits;
1281}
1282
1283#define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
1284 TCPF_FIN_WAIT1 | \
1285 TCPF_FIN_WAIT2)
1286
1287/*
1288 * Called after some received data has been read. It returns RX credits
1289 * to the HW for the amount of data processed.
1290 */
1291static void chtls_cleanup_rbuf(struct sock *sk, int copied)
1292{
1293 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1294 struct tcp_sock *tp;
1295 int must_send;
1296 u32 credits;
1297 u32 thres;
1298
1299 thres = 15 * 1024;
1300
1301 if (!sk_in_state(sk, CREDIT_RETURN_STATE))
1302 return;
1303
1304 chtls_select_window(sk);
1305 tp = tcp_sk(sk);
1306 credits = tp->copied_seq - tp->rcv_wup;
1307 if (unlikely(!credits))
1308 return;
1309
1310/*
1311 * For coalescing to work effectively ensure the receive window has
1312 * at least 16KB left.
1313 */
1314 must_send = credits + 16384 >= tp->rcv_wnd;
1315
1316 if (must_send || credits >= thres)
1317 tp->rcv_wup += send_rx_credits(csk, credits);
1318}
1319
1320static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1321 int nonblock, int flags, int *addr_len)
1322{
1323 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1324 struct net_device *dev = csk->egress_dev;
1325 struct chtls_hws *hws = &csk->tlshws;
1326 struct tcp_sock *tp = tcp_sk(sk);
1327 struct adapter *adap;
1328 unsigned long avail;
1329 int buffers_freed;
1330 int copied = 0;
1331 int request;
1332 int target;
1333 long timeo;
1334
1335 adap = netdev2adap(dev);
1336 buffers_freed = 0;
1337
1338 timeo = sock_rcvtimeo(sk, nonblock);
1339 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1340 request = len;
1341
1342 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
1343 chtls_cleanup_rbuf(sk, copied);
1344
1345 do {
1346 struct sk_buff *skb;
1347 u32 offset = 0;
1348
1349 if (unlikely(tp->urg_data &&
1350 tp->urg_seq == tp->copied_seq)) {
1351 if (copied)
1352 break;
1353 if (signal_pending(current)) {
1354 copied = timeo ? sock_intr_errno(timeo) :
1355 -EAGAIN;
1356 break;
1357 }
1358 }
1359 skb = skb_peek(&sk->sk_receive_queue);
1360 if (skb)
1361 goto found_ok_skb;
1362 if (csk->wr_credits &&
1363 skb_queue_len(&csk->txq) &&
1364 chtls_push_frames(csk, csk->wr_credits ==
1365 csk->wr_max_credits))
1366 sk->sk_write_space(sk);
1367
1368 if (copied >= target && !sk->sk_backlog.tail)
1369 break;
1370
1371 if (copied) {
1372 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
1373 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1374 signal_pending(current))
1375 break;
1376
1377 if (!timeo)
1378 break;
1379 } else {
1380 if (sock_flag(sk, SOCK_DONE))
1381 break;
1382 if (sk->sk_err) {
1383 copied = sock_error(sk);
1384 break;
1385 }
1386 if (sk->sk_shutdown & RCV_SHUTDOWN)
1387 break;
1388 if (sk->sk_state == TCP_CLOSE) {
1389 copied = -ENOTCONN;
1390 break;
1391 }
1392 if (!timeo) {
1393 copied = -EAGAIN;
1394 break;
1395 }
1396 if (signal_pending(current)) {
1397 copied = sock_intr_errno(timeo);
1398 break;
1399 }
1400 }
1401 if (sk->sk_backlog.tail) {
1402 release_sock(sk);
1403 lock_sock(sk);
1404 chtls_cleanup_rbuf(sk, copied);
1405 continue;
1406 }
1407
1408 if (copied >= target)
1409 break;
1410 chtls_cleanup_rbuf(sk, copied);
1411 sk_wait_data(sk, &timeo, NULL);
1412 continue;
1413found_ok_skb:
1414 if (!skb->len) {
1415 skb_dst_set(skb, NULL);
1416 __skb_unlink(skb, &sk->sk_receive_queue);
1417 kfree_skb(skb);
1418
1419 if (!copied && !timeo) {
1420 copied = -EAGAIN;
1421 break;
1422 }
1423
1424 if (copied < target) {
1425 release_sock(sk);
1426 lock_sock(sk);
1427 continue;
1428 }
1429 break;
1430 }
1431 offset = hws->copied_seq;
1432 avail = skb->len - offset;
1433 if (len < avail)
1434 avail = len;
1435
1436 if (unlikely(tp->urg_data)) {
1437 u32 urg_offset = tp->urg_seq - tp->copied_seq;
1438
1439 if (urg_offset < avail) {
1440 if (urg_offset) {
1441 avail = urg_offset;
1442 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
1443 /* First byte is urgent, skip */
1444 tp->copied_seq++;
1445 offset++;
1446 avail--;
1447 if (!avail)
1448 goto skip_copy;
1449 }
1450 }
1451 }
1452 if (hws->rstate == TLS_RCV_ST_READ_BODY) {
1453 if (skb_copy_datagram_msg(skb, offset,
1454 msg, avail)) {
1455 if (!copied) {
1456 copied = -EFAULT;
1457 break;
1458 }
1459 }
1460 } else {
1461 struct tlsrx_cmp_hdr *tls_hdr_pkt =
1462 (struct tlsrx_cmp_hdr *)skb->data;
1463
1464 if ((tls_hdr_pkt->res_to_mac_error &
1465 TLSRX_HDR_PKT_ERROR_M))
1466 tls_hdr_pkt->type = 0x7F;
1467
1468 /* CMP pld len is for recv seq */
1469 hws->rcvpld = skb->hdr_len;
1470 if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
1471 if (!copied) {
1472 copied = -EFAULT;
1473 break;
1474 }
1475 }
1476 }
1477 copied += avail;
1478 len -= avail;
1479 hws->copied_seq += avail;
1480skip_copy:
1481 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
1482 tp->urg_data = 0;
1483
1484 if (hws->rstate == TLS_RCV_ST_READ_BODY &&
1485 (avail + offset) >= skb->len) {
1486 if (likely(skb))
1487 chtls_free_skb(sk, skb);
1488 buffers_freed++;
1489 hws->rstate = TLS_RCV_ST_READ_HEADER;
1490 atomic_inc(&adap->chcr_stats.tls_pdu_rx);
1491 tp->copied_seq += hws->rcvpld;
1492 hws->copied_seq = 0;
1493 if (copied >= target &&
1494 !skb_peek(&sk->sk_receive_queue))
1495 break;
1496 } else {
1497 if (likely(skb)) {
1498 if (ULP_SKB_CB(skb)->flags &
1499 ULPCB_FLAG_TLS_ND)
1500 hws->rstate =
1501 TLS_RCV_ST_READ_HEADER;
1502 else
1503 hws->rstate =
1504 TLS_RCV_ST_READ_BODY;
1505 chtls_free_skb(sk, skb);
1506 }
1507 buffers_freed++;
1508 tp->copied_seq += avail;
1509 hws->copied_seq = 0;
1510 }
1511 } while (len > 0);
1512
1513 if (buffers_freed)
1514 chtls_cleanup_rbuf(sk, copied);
1515 release_sock(sk);
1516 return copied;
1517}
1518
1519/*
1520 * Peek at data in a socket's receive buffer.
1521 */
1522static int peekmsg(struct sock *sk, struct msghdr *msg,
1523 size_t len, int nonblock, int flags)
1524{
1525 struct tcp_sock *tp = tcp_sk(sk);
1526 u32 peek_seq, offset;
1527 struct sk_buff *skb;
1528 int copied = 0;
1529 size_t avail; /* amount of available data in current skb */
1530 long timeo;
1531
1532 lock_sock(sk);
1533 timeo = sock_rcvtimeo(sk, nonblock);
1534 peek_seq = tp->copied_seq;
1535
1536 do {
1537 if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
1538 if (copied)
1539 break;
1540 if (signal_pending(current)) {
1541 copied = timeo ? sock_intr_errno(timeo) :
1542 -EAGAIN;
1543 break;
1544 }
1545 }
1546
1547 skb_queue_walk(&sk->sk_receive_queue, skb) {
1548 offset = peek_seq - ULP_SKB_CB(skb)->seq;
1549 if (offset < skb->len)
1550 goto found_ok_skb;
1551 }
1552
1553 /* empty receive queue */
1554 if (copied)
1555 break;
1556 if (sock_flag(sk, SOCK_DONE))
1557 break;
1558 if (sk->sk_err) {
1559 copied = sock_error(sk);
1560 break;
1561 }
1562 if (sk->sk_shutdown & RCV_SHUTDOWN)
1563 break;
1564 if (sk->sk_state == TCP_CLOSE) {
1565 copied = -ENOTCONN;
1566 break;
1567 }
1568 if (!timeo) {
1569 copied = -EAGAIN;
1570 break;
1571 }
1572 if (signal_pending(current)) {
1573 copied = sock_intr_errno(timeo);
1574 break;
1575 }
1576
1577 if (sk->sk_backlog.tail) {
1578 /* Do not sleep, just process backlog. */
1579 release_sock(sk);
1580 lock_sock(sk);
1581 } else {
1582 sk_wait_data(sk, &timeo, NULL);
1583 }
1584
1585 if (unlikely(peek_seq != tp->copied_seq)) {
1586 if (net_ratelimit())
1587 pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
1588 current->comm, current->pid);
1589 peek_seq = tp->copied_seq;
1590 }
1591 continue;
1592
1593found_ok_skb:
1594 avail = skb->len - offset;
1595 if (len < avail)
1596 avail = len;
1597 /*
1598 * Do we have urgent data here? We need to skip over the
1599 * urgent byte.
1600 */
1601 if (unlikely(tp->urg_data)) {
1602 u32 urg_offset = tp->urg_seq - peek_seq;
1603
1604 if (urg_offset < avail) {
1605 /*
1606 * The amount of data we are preparing to copy
1607 * contains urgent data.
1608 */
1609 if (!urg_offset) { /* First byte is urgent */
1610 if (!sock_flag(sk, SOCK_URGINLINE)) {
1611 peek_seq++;
1612 offset++;
1613 avail--;
1614 }
1615 if (!avail)
1616 continue;
1617 } else {
1618 /* stop short of the urgent data */
1619 avail = urg_offset;
1620 }
1621 }
1622 }
1623
1624 /*
1625 * If MSG_TRUNC is specified the data is discarded.
1626 */
1627 if (likely(!(flags & MSG_TRUNC)))
1628 if (skb_copy_datagram_msg(skb, offset, msg, len)) {
1629 if (!copied) {
1630 copied = -EFAULT;
1631 break;
1632 }
1633 }
1634 peek_seq += avail;
1635 copied += avail;
1636 len -= avail;
1637 } while (len > 0);
1638
1639 release_sock(sk);
1640 return copied;
1641}
1642
1643int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1644 int nonblock, int flags, int *addr_len)
1645{
1646 struct tcp_sock *tp = tcp_sk(sk);
1647 struct chtls_sock *csk;
1648 struct chtls_hws *hws;
1649 unsigned long avail; /* amount of available data in current skb */
1650 int buffers_freed;
1651 int copied = 0;
1652 int request;
1653 long timeo;
1654 int target; /* Read at least this many bytes */
1655
1656 buffers_freed = 0;
1657
1658 if (unlikely(flags & MSG_OOB))
1659 return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
1660 addr_len);
1661
1662 if (unlikely(flags & MSG_PEEK))
1663 return peekmsg(sk, msg, len, nonblock, flags);
1664
1665 if (sk_can_busy_loop(sk) &&
1666 skb_queue_empty(&sk->sk_receive_queue) &&
1667 sk->sk_state == TCP_ESTABLISHED)
1668 sk_busy_loop(sk, nonblock);
1669
1670 lock_sock(sk);
1671 csk = rcu_dereference_sk_user_data(sk);
1672 hws = &csk->tlshws;
1673
1674 if (is_tls_rx(csk))
1675 return chtls_pt_recvmsg(sk, msg, len, nonblock,
1676 flags, addr_len);
1677
1678 timeo = sock_rcvtimeo(sk, nonblock);
1679 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1680 request = len;
1681
1682 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
1683 chtls_cleanup_rbuf(sk, copied);
1684
1685 do {
1686 struct sk_buff *skb;
1687 u32 offset;
1688
1689 if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
1690 if (copied)
1691 break;
1692 if (signal_pending(current)) {
1693 copied = timeo ? sock_intr_errno(timeo) :
1694 -EAGAIN;
1695 break;
1696 }
1697 }
1698
1699 skb = skb_peek(&sk->sk_receive_queue);
1700 if (skb)
1701 goto found_ok_skb;
1702
1703 if (csk->wr_credits &&
1704 skb_queue_len(&csk->txq) &&
1705 chtls_push_frames(csk, csk->wr_credits ==
1706 csk->wr_max_credits))
1707 sk->sk_write_space(sk);
1708
1709 if (copied >= target && !sk->sk_backlog.tail)
1710 break;
1711
1712 if (copied) {
1713 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
1714 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1715 signal_pending(current))
1716 break;
1717 } else {
1718 if (sock_flag(sk, SOCK_DONE))
1719 break;
1720 if (sk->sk_err) {
1721 copied = sock_error(sk);
1722 break;
1723 }
1724 if (sk->sk_shutdown & RCV_SHUTDOWN)
1725 break;
1726 if (sk->sk_state == TCP_CLOSE) {
1727 copied = -ENOTCONN;
1728 break;
1729 }
1730 if (!timeo) {
1731 copied = -EAGAIN;
1732 break;
1733 }
1734 if (signal_pending(current)) {
1735 copied = sock_intr_errno(timeo);
1736 break;
1737 }
1738 }
1739
1740 if (sk->sk_backlog.tail) {
1741 release_sock(sk);
1742 lock_sock(sk);
1743 chtls_cleanup_rbuf(sk, copied);
1744 continue;
1745 }
1746
1747 if (copied >= target)
1748 break;
1749 chtls_cleanup_rbuf(sk, copied);
1750 sk_wait_data(sk, &timeo, NULL);
1751 continue;
1752
1753found_ok_skb:
1754 if (!skb->len) {
1755 chtls_kfree_skb(sk, skb);
1756 if (!copied && !timeo) {
1757 copied = -EAGAIN;
1758 break;
1759 }
1760
1761 if (copied < target)
1762 continue;
1763
1764 break;
1765 }
1766
1767 offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
1768 avail = skb->len - offset;
1769 if (len < avail)
1770 avail = len;
1771
1772 if (unlikely(tp->urg_data)) {
1773 u32 urg_offset = tp->urg_seq - tp->copied_seq;
1774
1775 if (urg_offset < avail) {
1776 if (urg_offset) {
1777 avail = urg_offset;
1778 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
1779 tp->copied_seq++;
1780 offset++;
1781 avail--;
1782 if (!avail)
1783 goto skip_copy;
1784 }
1785 }
1786 }
1787
1788 if (likely(!(flags & MSG_TRUNC))) {
1789 if (skb_copy_datagram_msg(skb, offset,
1790 msg, avail)) {
1791 if (!copied) {
1792 copied = -EFAULT;
1793 break;
1794 }
1795 }
1796 }
1797
1798 tp->copied_seq += avail;
1799 copied += avail;
1800 len -= avail;
1801
1802skip_copy:
1803 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
1804 tp->urg_data = 0;
1805
1806 if (avail + offset >= skb->len) {
1807 if (likely(skb))
1808 chtls_free_skb(sk, skb);
1809 buffers_freed++;
1810
1811 if (copied >= target &&
1812 !skb_peek(&sk->sk_receive_queue))
1813 break;
1814 }
1815 } while (len > 0);
1816
1817 if (buffers_freed)
1818 chtls_cleanup_rbuf(sk, copied);
1819
1820 release_sock(sk);
1821 return copied;
1822}
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index e5e543a45542..007c45c38fc7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -549,6 +549,7 @@ static void __init chtls_init_ulp_ops(void)
549 chtls_cpl_prot.shutdown = chtls_shutdown; 549 chtls_cpl_prot.shutdown = chtls_shutdown;
550 chtls_cpl_prot.sendmsg = chtls_sendmsg; 550 chtls_cpl_prot.sendmsg = chtls_sendmsg;
551 chtls_cpl_prot.sendpage = chtls_sendpage; 551 chtls_cpl_prot.sendpage = chtls_sendpage;
552 chtls_cpl_prot.recvmsg = chtls_recvmsg;
552 chtls_cpl_prot.setsockopt = chtls_setsockopt; 553 chtls_cpl_prot.setsockopt = chtls_setsockopt;
553 chtls_cpl_prot.getsockopt = chtls_getsockopt; 554 chtls_cpl_prot.getsockopt = chtls_getsockopt;
554} 555}