aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
commitd0cd84817c745655428dbfdb1e3f754230b46bef (patch)
treea7b6f422f6ac50f506ffa7a66f8e83387f90f212 /net/ipv4/tcp.c
parentbdf428feb225229b1d4715b45bbdad4a934cd89c (diff)
parent3f334078567245429540e6461c81c749fce87f70 (diff)
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams: "Even though this has fixes marked for -stable, given the size and the needed conflict resolutions this is 3.18-rc1/merge-window material. These patches have been languishing in my tree for a long while. The fact that I do not have the time to do proper/prompt maintenance of this tree is a primary factor in the decision to step down as dmaengine maintainer. That and the fact that the bulk of drivers/dma/ activity is going through Vinod these days. The net_dma removal has not been in -next. It has developed simple conflicts against mainline and net-next (for-3.18). Continuing thanks to Vinod for staying on top of drivers/dma/. Summary: 1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes" * tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net: make tcp_cleanup_rbuf private net_dma: revert 'copied_early' net_dma: simple removal dmaengine maintainer update dmatest: prevent memory leakage on error path in thread ioat: Use time_before_jiffies() dmaengine: fix xor sources continuation dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup() dma: mv_xor: Remove all callers of mv_xor_slot_cleanup() dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call ioat: Use pci_enable_msix_exact() instead of pci_enable_msix() drivers: dma: Include appropriate header file in dca.c drivers: dma: Mark functions as static in dma_v3.c dma: mv_xor: Add DMA API error checks ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c149
1 files changed, 15 insertions, 134 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 541f26a67ba2..8ee43ae90396 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -274,7 +274,6 @@
274#include <net/tcp.h> 274#include <net/tcp.h>
275#include <net/xfrm.h> 275#include <net/xfrm.h>
276#include <net/ip.h> 276#include <net/ip.h>
277#include <net/netdma.h>
278#include <net/sock.h> 277#include <net/sock.h>
279 278
280#include <asm/uaccess.h> 279#include <asm/uaccess.h>
@@ -1394,7 +1393,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1394 * calculation of whether or not we must ACK for the sake of 1393 * calculation of whether or not we must ACK for the sake of
1395 * a window update. 1394 * a window update.
1396 */ 1395 */
1397void tcp_cleanup_rbuf(struct sock *sk, int copied) 1396static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1398{ 1397{
1399 struct tcp_sock *tp = tcp_sk(sk); 1398 struct tcp_sock *tp = tcp_sk(sk);
1400 bool time_to_ack = false; 1399 bool time_to_ack = false;
@@ -1470,39 +1469,6 @@ static void tcp_prequeue_process(struct sock *sk)
1470 tp->ucopy.memory = 0; 1469 tp->ucopy.memory = 0;
1471} 1470}
1472 1471
1473#ifdef CONFIG_NET_DMA
1474static void tcp_service_net_dma(struct sock *sk, bool wait)
1475{
1476 dma_cookie_t done, used;
1477 dma_cookie_t last_issued;
1478 struct tcp_sock *tp = tcp_sk(sk);
1479
1480 if (!tp->ucopy.dma_chan)
1481 return;
1482
1483 last_issued = tp->ucopy.dma_cookie;
1484 dma_async_issue_pending(tp->ucopy.dma_chan);
1485
1486 do {
1487 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1488 last_issued, &done,
1489 &used) == DMA_COMPLETE) {
1490 /* Safe to free early-copied skbs now */
1491 __skb_queue_purge(&sk->sk_async_wait_queue);
1492 break;
1493 } else {
1494 struct sk_buff *skb;
1495 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1496 (dma_async_is_complete(skb->dma_cookie, done,
1497 used) == DMA_COMPLETE)) {
1498 __skb_dequeue(&sk->sk_async_wait_queue);
1499 kfree_skb(skb);
1500 }
1501 }
1502 } while (wait);
1503}
1504#endif
1505
1506static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1472static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1507{ 1473{
1508 struct sk_buff *skb; 1474 struct sk_buff *skb;
@@ -1520,7 +1486,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1520 * splitted a fat GRO packet, while we released socket lock 1486 * splitted a fat GRO packet, while we released socket lock
1521 * in skb_splice_bits() 1487 * in skb_splice_bits()
1522 */ 1488 */
1523 sk_eat_skb(sk, skb, false); 1489 sk_eat_skb(sk, skb);
1524 } 1490 }
1525 return NULL; 1491 return NULL;
1526} 1492}
@@ -1586,11 +1552,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1586 continue; 1552 continue;
1587 } 1553 }
1588 if (tcp_hdr(skb)->fin) { 1554 if (tcp_hdr(skb)->fin) {
1589 sk_eat_skb(sk, skb, false); 1555 sk_eat_skb(sk, skb);
1590 ++seq; 1556 ++seq;
1591 break; 1557 break;
1592 } 1558 }
1593 sk_eat_skb(sk, skb, false); 1559 sk_eat_skb(sk, skb);
1594 if (!desc->count) 1560 if (!desc->count)
1595 break; 1561 break;
1596 tp->copied_seq = seq; 1562 tp->copied_seq = seq;
@@ -1628,7 +1594,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1628 int target; /* Read at least this many bytes */ 1594 int target; /* Read at least this many bytes */
1629 long timeo; 1595 long timeo;
1630 struct task_struct *user_recv = NULL; 1596 struct task_struct *user_recv = NULL;
1631 bool copied_early = false;
1632 struct sk_buff *skb; 1597 struct sk_buff *skb;
1633 u32 urg_hole = 0; 1598 u32 urg_hole = 0;
1634 1599
@@ -1674,28 +1639,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1674 1639
1675 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1640 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1676 1641
1677#ifdef CONFIG_NET_DMA
1678 tp->ucopy.dma_chan = NULL;
1679 preempt_disable();
1680 skb = skb_peek_tail(&sk->sk_receive_queue);
1681 {
1682 int available = 0;
1683
1684 if (skb)
1685 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1686 if ((available < target) &&
1687 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1688 !sysctl_tcp_low_latency &&
1689 net_dma_find_channel()) {
1690 preempt_enable();
1691 tp->ucopy.pinned_list =
1692 dma_pin_iovec_pages(msg->msg_iov, len);
1693 } else {
1694 preempt_enable();
1695 }
1696 }
1697#endif
1698
1699 do { 1642 do {
1700 u32 offset; 1643 u32 offset;
1701 1644
@@ -1826,16 +1769,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1826 /* __ Set realtime policy in scheduler __ */ 1769 /* __ Set realtime policy in scheduler __ */
1827 } 1770 }
1828 1771
1829#ifdef CONFIG_NET_DMA
1830 if (tp->ucopy.dma_chan) {
1831 if (tp->rcv_wnd == 0 &&
1832 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1833 tcp_service_net_dma(sk, true);
1834 tcp_cleanup_rbuf(sk, copied);
1835 } else
1836 dma_async_issue_pending(tp->ucopy.dma_chan);
1837 }
1838#endif
1839 if (copied >= target) { 1772 if (copied >= target) {
1840 /* Do not sleep, just process backlog. */ 1773 /* Do not sleep, just process backlog. */
1841 release_sock(sk); 1774 release_sock(sk);
@@ -1843,11 +1776,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1843 } else 1776 } else
1844 sk_wait_data(sk, &timeo); 1777 sk_wait_data(sk, &timeo);
1845 1778
1846#ifdef CONFIG_NET_DMA
1847 tcp_service_net_dma(sk, false); /* Don't block */
1848 tp->ucopy.wakeup = 0;
1849#endif
1850
1851 if (user_recv) { 1779 if (user_recv) {
1852 int chunk; 1780 int chunk;
1853 1781
@@ -1905,43 +1833,13 @@ do_prequeue:
1905 } 1833 }
1906 1834
1907 if (!(flags & MSG_TRUNC)) { 1835 if (!(flags & MSG_TRUNC)) {
1908#ifdef CONFIG_NET_DMA 1836 err = skb_copy_datagram_iovec(skb, offset,
1909 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1837 msg->msg_iov, used);
1910 tp->ucopy.dma_chan = net_dma_find_channel(); 1838 if (err) {
1911 1839 /* Exception. Bailout! */
1912 if (tp->ucopy.dma_chan) { 1840 if (!copied)
1913 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1841 copied = -EFAULT;
1914 tp->ucopy.dma_chan, skb, offset, 1842 break;
1915 msg->msg_iov, used,
1916 tp->ucopy.pinned_list);
1917
1918 if (tp->ucopy.dma_cookie < 0) {
1919
1920 pr_alert("%s: dma_cookie < 0\n",
1921 __func__);
1922
1923 /* Exception. Bailout! */
1924 if (!copied)
1925 copied = -EFAULT;
1926 break;
1927 }
1928
1929 dma_async_issue_pending(tp->ucopy.dma_chan);
1930
1931 if ((offset + used) == skb->len)
1932 copied_early = true;
1933
1934 } else
1935#endif
1936 {
1937 err = skb_copy_datagram_iovec(skb, offset,
1938 msg->msg_iov, used);
1939 if (err) {
1940 /* Exception. Bailout! */
1941 if (!copied)
1942 copied = -EFAULT;
1943 break;
1944 }
1945 } 1843 }
1946 } 1844 }
1947 1845
@@ -1961,19 +1859,15 @@ skip_copy:
1961 1859
1962 if (tcp_hdr(skb)->fin) 1860 if (tcp_hdr(skb)->fin)
1963 goto found_fin_ok; 1861 goto found_fin_ok;
1964 if (!(flags & MSG_PEEK)) { 1862 if (!(flags & MSG_PEEK))
1965 sk_eat_skb(sk, skb, copied_early); 1863 sk_eat_skb(sk, skb);
1966 copied_early = false;
1967 }
1968 continue; 1864 continue;
1969 1865
1970 found_fin_ok: 1866 found_fin_ok:
1971 /* Process the FIN. */ 1867 /* Process the FIN. */
1972 ++*seq; 1868 ++*seq;
1973 if (!(flags & MSG_PEEK)) { 1869 if (!(flags & MSG_PEEK))
1974 sk_eat_skb(sk, skb, copied_early); 1870 sk_eat_skb(sk, skb);
1975 copied_early = false;
1976 }
1977 break; 1871 break;
1978 } while (len > 0); 1872 } while (len > 0);
1979 1873
@@ -1996,16 +1890,6 @@ skip_copy:
1996 tp->ucopy.len = 0; 1890 tp->ucopy.len = 0;
1997 } 1891 }
1998 1892
1999#ifdef CONFIG_NET_DMA
2000 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
2001 tp->ucopy.dma_chan = NULL;
2002
2003 if (tp->ucopy.pinned_list) {
2004 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
2005 tp->ucopy.pinned_list = NULL;
2006 }
2007#endif
2008
2009 /* According to UNIX98, msg_name/msg_namelen are ignored 1893 /* According to UNIX98, msg_name/msg_namelen are ignored
2010 * on connected socket. I was just happy when found this 8) --ANK 1894 * on connected socket. I was just happy when found this 8) --ANK
2011 */ 1895 */
@@ -2349,9 +2233,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2349 __skb_queue_purge(&sk->sk_receive_queue); 2233 __skb_queue_purge(&sk->sk_receive_queue);
2350 tcp_write_queue_purge(sk); 2234 tcp_write_queue_purge(sk);
2351 __skb_queue_purge(&tp->out_of_order_queue); 2235 __skb_queue_purge(&tp->out_of_order_queue);
2352#ifdef CONFIG_NET_DMA
2353 __skb_queue_purge(&sk->sk_async_wait_queue);
2354#endif
2355 2236
2356 inet->inet_dport = 0; 2237 inet->inet_dport = 0;
2357 2238