aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-12-30 15:37:29 -0500
committerDan Williams <dan.j.williams@intel.com>2014-09-28 10:05:16 -0400
commit7bced397510ab569d31de4c70b39e13355046387 (patch)
treefaa4067a53e42acffc752e9a153e7dbaed4126e5 /net/ipv4/tcp.c
parent08223d80df38e666a42d7c82eb340db55c6e03bd (diff)
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c147
1 files changed, 14 insertions, 133 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 97c8f5620c43..28595a364f09 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -274,7 +274,6 @@
274#include <net/tcp.h> 274#include <net/tcp.h>
275#include <net/xfrm.h> 275#include <net/xfrm.h>
276#include <net/ip.h> 276#include <net/ip.h>
277#include <net/netdma.h>
278#include <net/sock.h> 277#include <net/sock.h>
279 278
280#include <asm/uaccess.h> 279#include <asm/uaccess.h>
@@ -1454,39 +1453,6 @@ static void tcp_prequeue_process(struct sock *sk)
1454 tp->ucopy.memory = 0; 1453 tp->ucopy.memory = 0;
1455} 1454}
1456 1455
1457#ifdef CONFIG_NET_DMA
1458static void tcp_service_net_dma(struct sock *sk, bool wait)
1459{
1460 dma_cookie_t done, used;
1461 dma_cookie_t last_issued;
1462 struct tcp_sock *tp = tcp_sk(sk);
1463
1464 if (!tp->ucopy.dma_chan)
1465 return;
1466
1467 last_issued = tp->ucopy.dma_cookie;
1468 dma_async_issue_pending(tp->ucopy.dma_chan);
1469
1470 do {
1471 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1472 last_issued, &done,
1473 &used) == DMA_COMPLETE) {
1474 /* Safe to free early-copied skbs now */
1475 __skb_queue_purge(&sk->sk_async_wait_queue);
1476 break;
1477 } else {
1478 struct sk_buff *skb;
1479 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1480 (dma_async_is_complete(skb->dma_cookie, done,
1481 used) == DMA_COMPLETE)) {
1482 __skb_dequeue(&sk->sk_async_wait_queue);
1483 kfree_skb(skb);
1484 }
1485 }
1486 } while (wait);
1487}
1488#endif
1489
1490static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1456static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1491{ 1457{
1492 struct sk_buff *skb; 1458 struct sk_buff *skb;
@@ -1504,7 +1470,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1504 * splitted a fat GRO packet, while we released socket lock 1470 * splitted a fat GRO packet, while we released socket lock
1505 * in skb_splice_bits() 1471 * in skb_splice_bits()
1506 */ 1472 */
1507 sk_eat_skb(sk, skb, false); 1473 sk_eat_skb(sk, skb);
1508 } 1474 }
1509 return NULL; 1475 return NULL;
1510} 1476}
@@ -1570,11 +1536,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1570 continue; 1536 continue;
1571 } 1537 }
1572 if (tcp_hdr(skb)->fin) { 1538 if (tcp_hdr(skb)->fin) {
1573 sk_eat_skb(sk, skb, false); 1539 sk_eat_skb(sk, skb);
1574 ++seq; 1540 ++seq;
1575 break; 1541 break;
1576 } 1542 }
1577 sk_eat_skb(sk, skb, false); 1543 sk_eat_skb(sk, skb);
1578 if (!desc->count) 1544 if (!desc->count)
1579 break; 1545 break;
1580 tp->copied_seq = seq; 1546 tp->copied_seq = seq;
@@ -1612,7 +1578,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1612 int target; /* Read at least this many bytes */ 1578 int target; /* Read at least this many bytes */
1613 long timeo; 1579 long timeo;
1614 struct task_struct *user_recv = NULL; 1580 struct task_struct *user_recv = NULL;
1615 bool copied_early = false;
1616 struct sk_buff *skb; 1581 struct sk_buff *skb;
1617 u32 urg_hole = 0; 1582 u32 urg_hole = 0;
1618 1583
@@ -1655,28 +1620,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1655 1620
1656 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1621 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1657 1622
1658#ifdef CONFIG_NET_DMA
1659 tp->ucopy.dma_chan = NULL;
1660 preempt_disable();
1661 skb = skb_peek_tail(&sk->sk_receive_queue);
1662 {
1663 int available = 0;
1664
1665 if (skb)
1666 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1667 if ((available < target) &&
1668 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1669 !sysctl_tcp_low_latency &&
1670 net_dma_find_channel()) {
1671 preempt_enable();
1672 tp->ucopy.pinned_list =
1673 dma_pin_iovec_pages(msg->msg_iov, len);
1674 } else {
1675 preempt_enable();
1676 }
1677 }
1678#endif
1679
1680 do { 1623 do {
1681 u32 offset; 1624 u32 offset;
1682 1625
@@ -1807,16 +1750,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1807 /* __ Set realtime policy in scheduler __ */ 1750 /* __ Set realtime policy in scheduler __ */
1808 } 1751 }
1809 1752
1810#ifdef CONFIG_NET_DMA
1811 if (tp->ucopy.dma_chan) {
1812 if (tp->rcv_wnd == 0 &&
1813 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1814 tcp_service_net_dma(sk, true);
1815 tcp_cleanup_rbuf(sk, copied);
1816 } else
1817 dma_async_issue_pending(tp->ucopy.dma_chan);
1818 }
1819#endif
1820 if (copied >= target) { 1753 if (copied >= target) {
1821 /* Do not sleep, just process backlog. */ 1754 /* Do not sleep, just process backlog. */
1822 release_sock(sk); 1755 release_sock(sk);
@@ -1824,11 +1757,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1824 } else 1757 } else
1825 sk_wait_data(sk, &timeo); 1758 sk_wait_data(sk, &timeo);
1826 1759
1827#ifdef CONFIG_NET_DMA
1828 tcp_service_net_dma(sk, false); /* Don't block */
1829 tp->ucopy.wakeup = 0;
1830#endif
1831
1832 if (user_recv) { 1760 if (user_recv) {
1833 int chunk; 1761 int chunk;
1834 1762
@@ -1886,43 +1814,13 @@ do_prequeue:
1886 } 1814 }
1887 1815
1888 if (!(flags & MSG_TRUNC)) { 1816 if (!(flags & MSG_TRUNC)) {
1889#ifdef CONFIG_NET_DMA 1817 err = skb_copy_datagram_iovec(skb, offset,
1890 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1818 msg->msg_iov, used);
1891 tp->ucopy.dma_chan = net_dma_find_channel(); 1819 if (err) {
1892 1820 /* Exception. Bailout! */
1893 if (tp->ucopy.dma_chan) { 1821 if (!copied)
1894 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1822 copied = -EFAULT;
1895 tp->ucopy.dma_chan, skb, offset, 1823 break;
1896 msg->msg_iov, used,
1897 tp->ucopy.pinned_list);
1898
1899 if (tp->ucopy.dma_cookie < 0) {
1900
1901 pr_alert("%s: dma_cookie < 0\n",
1902 __func__);
1903
1904 /* Exception. Bailout! */
1905 if (!copied)
1906 copied = -EFAULT;
1907 break;
1908 }
1909
1910 dma_async_issue_pending(tp->ucopy.dma_chan);
1911
1912 if ((offset + used) == skb->len)
1913 copied_early = true;
1914
1915 } else
1916#endif
1917 {
1918 err = skb_copy_datagram_iovec(skb, offset,
1919 msg->msg_iov, used);
1920 if (err) {
1921 /* Exception. Bailout! */
1922 if (!copied)
1923 copied = -EFAULT;
1924 break;
1925 }
1926 } 1824 }
1927 } 1825 }
1928 1826
@@ -1942,19 +1840,15 @@ skip_copy:
1942 1840
1943 if (tcp_hdr(skb)->fin) 1841 if (tcp_hdr(skb)->fin)
1944 goto found_fin_ok; 1842 goto found_fin_ok;
1945 if (!(flags & MSG_PEEK)) { 1843 if (!(flags & MSG_PEEK))
1946 sk_eat_skb(sk, skb, copied_early); 1844 sk_eat_skb(sk, skb);
1947 copied_early = false;
1948 }
1949 continue; 1845 continue;
1950 1846
1951 found_fin_ok: 1847 found_fin_ok:
1952 /* Process the FIN. */ 1848 /* Process the FIN. */
1953 ++*seq; 1849 ++*seq;
1954 if (!(flags & MSG_PEEK)) { 1850 if (!(flags & MSG_PEEK))
1955 sk_eat_skb(sk, skb, copied_early); 1851 sk_eat_skb(sk, skb);
1956 copied_early = false;
1957 }
1958 break; 1852 break;
1959 } while (len > 0); 1853 } while (len > 0);
1960 1854
@@ -1977,16 +1871,6 @@ skip_copy:
1977 tp->ucopy.len = 0; 1871 tp->ucopy.len = 0;
1978 } 1872 }
1979 1873
1980#ifdef CONFIG_NET_DMA
1981 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1982 tp->ucopy.dma_chan = NULL;
1983
1984 if (tp->ucopy.pinned_list) {
1985 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1986 tp->ucopy.pinned_list = NULL;
1987 }
1988#endif
1989
1990 /* According to UNIX98, msg_name/msg_namelen are ignored 1874 /* According to UNIX98, msg_name/msg_namelen are ignored
1991 * on connected socket. I was just happy when found this 8) --ANK 1875 * on connected socket. I was just happy when found this 8) --ANK
1992 */ 1876 */
@@ -2330,9 +2214,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2330 __skb_queue_purge(&sk->sk_receive_queue); 2214 __skb_queue_purge(&sk->sk_receive_queue);
2331 tcp_write_queue_purge(sk); 2215 tcp_write_queue_purge(sk);
2332 __skb_queue_purge(&tp->out_of_order_queue); 2216 __skb_queue_purge(&tp->out_of_order_queue);
2333#ifdef CONFIG_NET_DMA
2334 __skb_queue_purge(&sk->sk_async_wait_queue);
2335#endif
2336 2217
2337 inet->inet_dport = 0; 2218 inet->inet_dport = 0;
2338 2219