aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
commitd0cd84817c745655428dbfdb1e3f754230b46bef (patch)
treea7b6f422f6ac50f506ffa7a66f8e83387f90f212 /net
parentbdf428feb225229b1d4715b45bbdad4a934cd89c (diff)
parent3f334078567245429540e6461c81c749fce87f70 (diff)
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams: "Even though this has fixes marked for -stable, given the size and the needed conflict resolutions this is 3.18-rc1/merge-window material. These patches have been languishing in my tree for a long while. The fact that I do not have the time to do proper/prompt maintenance of this tree is a primary factor in the decision to step down as dmaengine maintainer. That and the fact that the bulk of drivers/dma/ activity is going through Vinod these days. The net_dma removal has not been in -next. It has developed simple conflicts against mainline and net-next (for-3.18). Continuing thanks to Vinod for staying on top of drivers/dma/. Summary: 1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes" * tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net: make tcp_cleanup_rbuf private net_dma: revert 'copied_early' net_dma: simple removal dmaengine maintainer update dmatest: prevent memory leakage on error path in thread ioat: Use time_before_jiffies() dmaengine: fix xor sources continuation dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup() dma: mv_xor: Remove all callers of mv_xor_slot_cleanup() dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call ioat: Use pci_enable_msix_exact() instead of pci_enable_msix() drivers: dma: Include appropriate header file in dca.c drivers: dma: Mark functions as static in dma_v3.c dma: mv_xor: Add DMA API error checks ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'net')
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/user_dma.c131
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c149
-rw-r--r--net/ipv4/tcp_input.c83
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/llc/af_llc.c10
11 files changed, 32 insertions, 402 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index 71093d94ad2b..235e6c50708d 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -16,7 +16,6 @@ obj-y += net-sysfs.o
16obj-$(CONFIG_PROC_FS) += net-procfs.o 16obj-$(CONFIG_PROC_FS) += net-procfs.o
17obj-$(CONFIG_NET_PKTGEN) += pktgen.o 17obj-$(CONFIG_NET_PKTGEN) += pktgen.o
18obj-$(CONFIG_NETPOLL) += netpoll.o 18obj-$(CONFIG_NETPOLL) += netpoll.o
19obj-$(CONFIG_NET_DMA) += user_dma.o
20obj-$(CONFIG_FIB_RULES) += fib_rules.o 19obj-$(CONFIG_FIB_RULES) += fib_rules.o
21obj-$(CONFIG_TRACEPOINTS) += net-traces.o 20obj-$(CONFIG_TRACEPOINTS) += net-traces.o
22obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o 21obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
diff --git a/net/core/dev.c b/net/core/dev.c
index cf8a95f48cff..130d64220229 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1284,7 +1284,6 @@ static int __dev_open(struct net_device *dev)
1284 clear_bit(__LINK_STATE_START, &dev->state); 1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else { 1285 else {
1286 dev->flags |= IFF_UP; 1286 dev->flags |= IFF_UP;
1287 net_dmaengine_get();
1288 dev_set_rx_mode(dev); 1287 dev_set_rx_mode(dev);
1289 dev_activate(dev); 1288 dev_activate(dev);
1290 add_device_randomness(dev->dev_addr, dev->addr_len); 1289 add_device_randomness(dev->dev_addr, dev->addr_len);
@@ -1363,7 +1362,6 @@ static int __dev_close_many(struct list_head *head)
1363 ops->ndo_stop(dev); 1362 ops->ndo_stop(dev);
1364 1363
1365 dev->flags &= ~IFF_UP; 1364 dev->flags &= ~IFF_UP;
1366 net_dmaengine_put();
1367 netpoll_poll_enable(dev); 1365 netpoll_poll_enable(dev);
1368 } 1366 }
1369 1367
@@ -4505,14 +4503,6 @@ static void net_rx_action(struct softirq_action *h)
4505out: 4503out:
4506 net_rps_action_and_irq_enable(sd); 4504 net_rps_action_and_irq_enable(sd);
4507 4505
4508#ifdef CONFIG_NET_DMA
4509 /*
4510 * There may not be any more sk_buffs coming right now, so push
4511 * any pending DMA copies to hardware
4512 */
4513 dma_issue_pending_all();
4514#endif
4515
4516 return; 4506 return;
4517 4507
4518softnet_break: 4508softnet_break:
diff --git a/net/core/sock.c b/net/core/sock.c
index 9c3f823e76a9..611f424fb76b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1489,9 +1489,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1489 atomic_set(&newsk->sk_omem_alloc, 0); 1489 atomic_set(&newsk->sk_omem_alloc, 0);
1490 skb_queue_head_init(&newsk->sk_receive_queue); 1490 skb_queue_head_init(&newsk->sk_receive_queue);
1491 skb_queue_head_init(&newsk->sk_write_queue); 1491 skb_queue_head_init(&newsk->sk_write_queue);
1492#ifdef CONFIG_NET_DMA
1493 skb_queue_head_init(&newsk->sk_async_wait_queue);
1494#endif
1495 1492
1496 spin_lock_init(&newsk->sk_dst_lock); 1493 spin_lock_init(&newsk->sk_dst_lock);
1497 rwlock_init(&newsk->sk_callback_lock); 1494 rwlock_init(&newsk->sk_callback_lock);
@@ -2308,9 +2305,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2308 skb_queue_head_init(&sk->sk_receive_queue); 2305 skb_queue_head_init(&sk->sk_receive_queue);
2309 skb_queue_head_init(&sk->sk_write_queue); 2306 skb_queue_head_init(&sk->sk_write_queue);
2310 skb_queue_head_init(&sk->sk_error_queue); 2307 skb_queue_head_init(&sk->sk_error_queue);
2311#ifdef CONFIG_NET_DMA
2312 skb_queue_head_init(&sk->sk_async_wait_queue);
2313#endif
2314 2308
2315 sk->sk_send_head = NULL; 2309 sk->sk_send_head = NULL;
2316 2310
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
deleted file mode 100644
index 1b5fefdb8198..000000000000
--- a/net/core/user_dma.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * Portions based on net/core/datagram.c and copyrighted by their authors.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22
23/*
24 * This code allows the net stack to make use of a DMA engine for
25 * skb to iovec copies.
26 */
27
28#include <linux/dmaengine.h>
29#include <linux/socket.h>
30#include <linux/export.h>
31#include <net/tcp.h>
32#include <net/netdma.h>
33
34#define NET_DMA_DEFAULT_COPYBREAK 4096
35
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
37EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
38
39/**
40 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
41 * @skb - buffer to copy
42 * @offset - offset in the buffer to start copying from
43 * @iovec - io vector to copy to
44 * @len - amount of data to copy from buffer to iovec
45 * @pinned_list - locked iovec buffer data
46 *
47 * Note: the iovec is modified during the copy.
48 */
49int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
50 struct sk_buff *skb, int offset, struct iovec *to,
51 size_t len, struct dma_pinned_list *pinned_list)
52{
53 int start = skb_headlen(skb);
54 int i, copy = start - offset;
55 struct sk_buff *frag_iter;
56 dma_cookie_t cookie = 0;
57
58 /* Copy header. */
59 if (copy > 0) {
60 if (copy > len)
61 copy = len;
62 cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
63 skb->data + offset, copy);
64 if (cookie < 0)
65 goto fault;
66 len -= copy;
67 if (len == 0)
68 goto end;
69 offset += copy;
70 }
71
72 /* Copy paged appendix. Hmm... why does this look so complicated? */
73 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
74 int end;
75 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
76
77 WARN_ON(start > offset + len);
78
79 end = start + skb_frag_size(frag);
80 copy = end - offset;
81 if (copy > 0) {
82 struct page *page = skb_frag_page(frag);
83
84 if (copy > len)
85 copy = len;
86
87 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
88 frag->page_offset + offset - start, copy);
89 if (cookie < 0)
90 goto fault;
91 len -= copy;
92 if (len == 0)
93 goto end;
94 offset += copy;
95 }
96 start = end;
97 }
98
99 skb_walk_frags(skb, frag_iter) {
100 int end;
101
102 WARN_ON(start > offset + len);
103
104 end = start + frag_iter->len;
105 copy = end - offset;
106 if (copy > 0) {
107 if (copy > len)
108 copy = len;
109 cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
110 offset - start,
111 to, copy,
112 pinned_list);
113 if (cookie < 0)
114 goto fault;
115 len -= copy;
116 if (len == 0)
117 goto end;
118 offset += copy;
119 }
120 start = end;
121 }
122
123end:
124 if (!len) {
125 skb->dma_cookie = cookie;
126 return cookie;
127 }
128
129fault:
130 return -EFAULT;
131}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index de2c1e719305..f440cc7c9f72 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
848 default: 848 default:
849 dccp_pr_debug("packet_type=%s\n", 849 dccp_pr_debug("packet_type=%s\n",
850 dccp_packet_name(dh->dccph_type)); 850 dccp_packet_name(dh->dccph_type));
851 sk_eat_skb(sk, skb, false); 851 sk_eat_skb(sk, skb);
852 } 852 }
853verify_sock_status: 853verify_sock_status:
854 if (sock_flag(sk, SOCK_DONE)) { 854 if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@ verify_sock_status:
905 len = skb->len; 905 len = skb->len;
906 found_fin_ok: 906 found_fin_ok:
907 if (!(flags & MSG_PEEK)) 907 if (!(flags & MSG_PEEK))
908 sk_eat_skb(sk, skb, false); 908 sk_eat_skb(sk, skb);
909 break; 909 break;
910 } while (1); 910 } while (1);
911out: 911out:
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 79a007c52558..a9fde0eef77c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -628,15 +628,6 @@ static struct ctl_table ipv4_table[] = {
628 .mode = 0644, 628 .mode = 0644,
629 .proc_handler = proc_dointvec 629 .proc_handler = proc_dointvec
630 }, 630 },
631#ifdef CONFIG_NET_DMA
632 {
633 .procname = "tcp_dma_copybreak",
634 .data = &sysctl_tcp_dma_copybreak,
635 .maxlen = sizeof(int),
636 .mode = 0644,
637 .proc_handler = proc_dointvec
638 },
639#endif
640 { 631 {
641 .procname = "tcp_slow_start_after_idle", 632 .procname = "tcp_slow_start_after_idle",
642 .data = &sysctl_tcp_slow_start_after_idle, 633 .data = &sysctl_tcp_slow_start_after_idle,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 541f26a67ba2..8ee43ae90396 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -274,7 +274,6 @@
274#include <net/tcp.h> 274#include <net/tcp.h>
275#include <net/xfrm.h> 275#include <net/xfrm.h>
276#include <net/ip.h> 276#include <net/ip.h>
277#include <net/netdma.h>
278#include <net/sock.h> 277#include <net/sock.h>
279 278
280#include <asm/uaccess.h> 279#include <asm/uaccess.h>
@@ -1394,7 +1393,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1394 * calculation of whether or not we must ACK for the sake of 1393 * calculation of whether or not we must ACK for the sake of
1395 * a window update. 1394 * a window update.
1396 */ 1395 */
1397void tcp_cleanup_rbuf(struct sock *sk, int copied) 1396static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1398{ 1397{
1399 struct tcp_sock *tp = tcp_sk(sk); 1398 struct tcp_sock *tp = tcp_sk(sk);
1400 bool time_to_ack = false; 1399 bool time_to_ack = false;
@@ -1470,39 +1469,6 @@ static void tcp_prequeue_process(struct sock *sk)
1470 tp->ucopy.memory = 0; 1469 tp->ucopy.memory = 0;
1471} 1470}
1472 1471
1473#ifdef CONFIG_NET_DMA
1474static void tcp_service_net_dma(struct sock *sk, bool wait)
1475{
1476 dma_cookie_t done, used;
1477 dma_cookie_t last_issued;
1478 struct tcp_sock *tp = tcp_sk(sk);
1479
1480 if (!tp->ucopy.dma_chan)
1481 return;
1482
1483 last_issued = tp->ucopy.dma_cookie;
1484 dma_async_issue_pending(tp->ucopy.dma_chan);
1485
1486 do {
1487 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1488 last_issued, &done,
1489 &used) == DMA_COMPLETE) {
1490 /* Safe to free early-copied skbs now */
1491 __skb_queue_purge(&sk->sk_async_wait_queue);
1492 break;
1493 } else {
1494 struct sk_buff *skb;
1495 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1496 (dma_async_is_complete(skb->dma_cookie, done,
1497 used) == DMA_COMPLETE)) {
1498 __skb_dequeue(&sk->sk_async_wait_queue);
1499 kfree_skb(skb);
1500 }
1501 }
1502 } while (wait);
1503}
1504#endif
1505
1506static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1472static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1507{ 1473{
1508 struct sk_buff *skb; 1474 struct sk_buff *skb;
@@ -1520,7 +1486,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1520 * splitted a fat GRO packet, while we released socket lock 1486 * splitted a fat GRO packet, while we released socket lock
1521 * in skb_splice_bits() 1487 * in skb_splice_bits()
1522 */ 1488 */
1523 sk_eat_skb(sk, skb, false); 1489 sk_eat_skb(sk, skb);
1524 } 1490 }
1525 return NULL; 1491 return NULL;
1526} 1492}
@@ -1586,11 +1552,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1586 continue; 1552 continue;
1587 } 1553 }
1588 if (tcp_hdr(skb)->fin) { 1554 if (tcp_hdr(skb)->fin) {
1589 sk_eat_skb(sk, skb, false); 1555 sk_eat_skb(sk, skb);
1590 ++seq; 1556 ++seq;
1591 break; 1557 break;
1592 } 1558 }
1593 sk_eat_skb(sk, skb, false); 1559 sk_eat_skb(sk, skb);
1594 if (!desc->count) 1560 if (!desc->count)
1595 break; 1561 break;
1596 tp->copied_seq = seq; 1562 tp->copied_seq = seq;
@@ -1628,7 +1594,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1628 int target; /* Read at least this many bytes */ 1594 int target; /* Read at least this many bytes */
1629 long timeo; 1595 long timeo;
1630 struct task_struct *user_recv = NULL; 1596 struct task_struct *user_recv = NULL;
1631 bool copied_early = false;
1632 struct sk_buff *skb; 1597 struct sk_buff *skb;
1633 u32 urg_hole = 0; 1598 u32 urg_hole = 0;
1634 1599
@@ -1674,28 +1639,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1674 1639
1675 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1640 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1676 1641
1677#ifdef CONFIG_NET_DMA
1678 tp->ucopy.dma_chan = NULL;
1679 preempt_disable();
1680 skb = skb_peek_tail(&sk->sk_receive_queue);
1681 {
1682 int available = 0;
1683
1684 if (skb)
1685 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1686 if ((available < target) &&
1687 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1688 !sysctl_tcp_low_latency &&
1689 net_dma_find_channel()) {
1690 preempt_enable();
1691 tp->ucopy.pinned_list =
1692 dma_pin_iovec_pages(msg->msg_iov, len);
1693 } else {
1694 preempt_enable();
1695 }
1696 }
1697#endif
1698
1699 do { 1642 do {
1700 u32 offset; 1643 u32 offset;
1701 1644
@@ -1826,16 +1769,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1826 /* __ Set realtime policy in scheduler __ */ 1769 /* __ Set realtime policy in scheduler __ */
1827 } 1770 }
1828 1771
1829#ifdef CONFIG_NET_DMA
1830 if (tp->ucopy.dma_chan) {
1831 if (tp->rcv_wnd == 0 &&
1832 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1833 tcp_service_net_dma(sk, true);
1834 tcp_cleanup_rbuf(sk, copied);
1835 } else
1836 dma_async_issue_pending(tp->ucopy.dma_chan);
1837 }
1838#endif
1839 if (copied >= target) { 1772 if (copied >= target) {
1840 /* Do not sleep, just process backlog. */ 1773 /* Do not sleep, just process backlog. */
1841 release_sock(sk); 1774 release_sock(sk);
@@ -1843,11 +1776,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1843 } else 1776 } else
1844 sk_wait_data(sk, &timeo); 1777 sk_wait_data(sk, &timeo);
1845 1778
1846#ifdef CONFIG_NET_DMA
1847 tcp_service_net_dma(sk, false); /* Don't block */
1848 tp->ucopy.wakeup = 0;
1849#endif
1850
1851 if (user_recv) { 1779 if (user_recv) {
1852 int chunk; 1780 int chunk;
1853 1781
@@ -1905,43 +1833,13 @@ do_prequeue:
1905 } 1833 }
1906 1834
1907 if (!(flags & MSG_TRUNC)) { 1835 if (!(flags & MSG_TRUNC)) {
1908#ifdef CONFIG_NET_DMA 1836 err = skb_copy_datagram_iovec(skb, offset,
1909 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1837 msg->msg_iov, used);
1910 tp->ucopy.dma_chan = net_dma_find_channel(); 1838 if (err) {
1911 1839 /* Exception. Bailout! */
1912 if (tp->ucopy.dma_chan) { 1840 if (!copied)
1913 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1841 copied = -EFAULT;
1914 tp->ucopy.dma_chan, skb, offset, 1842 break;
1915 msg->msg_iov, used,
1916 tp->ucopy.pinned_list);
1917
1918 if (tp->ucopy.dma_cookie < 0) {
1919
1920 pr_alert("%s: dma_cookie < 0\n",
1921 __func__);
1922
1923 /* Exception. Bailout! */
1924 if (!copied)
1925 copied = -EFAULT;
1926 break;
1927 }
1928
1929 dma_async_issue_pending(tp->ucopy.dma_chan);
1930
1931 if ((offset + used) == skb->len)
1932 copied_early = true;
1933
1934 } else
1935#endif
1936 {
1937 err = skb_copy_datagram_iovec(skb, offset,
1938 msg->msg_iov, used);
1939 if (err) {
1940 /* Exception. Bailout! */
1941 if (!copied)
1942 copied = -EFAULT;
1943 break;
1944 }
1945 } 1843 }
1946 } 1844 }
1947 1845
@@ -1961,19 +1859,15 @@ skip_copy:
1961 1859
1962 if (tcp_hdr(skb)->fin) 1860 if (tcp_hdr(skb)->fin)
1963 goto found_fin_ok; 1861 goto found_fin_ok;
1964 if (!(flags & MSG_PEEK)) { 1862 if (!(flags & MSG_PEEK))
1965 sk_eat_skb(sk, skb, copied_early); 1863 sk_eat_skb(sk, skb);
1966 copied_early = false;
1967 }
1968 continue; 1864 continue;
1969 1865
1970 found_fin_ok: 1866 found_fin_ok:
1971 /* Process the FIN. */ 1867 /* Process the FIN. */
1972 ++*seq; 1868 ++*seq;
1973 if (!(flags & MSG_PEEK)) { 1869 if (!(flags & MSG_PEEK))
1974 sk_eat_skb(sk, skb, copied_early); 1870 sk_eat_skb(sk, skb);
1975 copied_early = false;
1976 }
1977 break; 1871 break;
1978 } while (len > 0); 1872 } while (len > 0);
1979 1873
@@ -1996,16 +1890,6 @@ skip_copy:
1996 tp->ucopy.len = 0; 1890 tp->ucopy.len = 0;
1997 } 1891 }
1998 1892
1999#ifdef CONFIG_NET_DMA
2000 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
2001 tp->ucopy.dma_chan = NULL;
2002
2003 if (tp->ucopy.pinned_list) {
2004 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
2005 tp->ucopy.pinned_list = NULL;
2006 }
2007#endif
2008
2009 /* According to UNIX98, msg_name/msg_namelen are ignored 1893 /* According to UNIX98, msg_name/msg_namelen are ignored
2010 * on connected socket. I was just happy when found this 8) --ANK 1894 * on connected socket. I was just happy when found this 8) --ANK
2011 */ 1895 */
@@ -2349,9 +2233,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2349 __skb_queue_purge(&sk->sk_receive_queue); 2233 __skb_queue_purge(&sk->sk_receive_queue);
2350 tcp_write_queue_purge(sk); 2234 tcp_write_queue_purge(sk);
2351 __skb_queue_purge(&tp->out_of_order_queue); 2235 __skb_queue_purge(&tp->out_of_order_queue);
2352#ifdef CONFIG_NET_DMA
2353 __skb_queue_purge(&sk->sk_async_wait_queue);
2354#endif
2355 2236
2356 inet->inet_dport = 0; 2237 inet->inet_dport = 0;
2357 2238
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a906e0200ff2..0185eea59342 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -73,7 +73,6 @@
73#include <net/inet_common.h> 73#include <net/inet_common.h>
74#include <linux/ipsec.h> 74#include <linux/ipsec.h>
75#include <asm/unaligned.h> 75#include <asm/unaligned.h>
76#include <net/netdma.h>
77#include <linux/errqueue.h> 76#include <linux/errqueue.h>
78 77
79int sysctl_tcp_timestamps __read_mostly = 1; 78int sysctl_tcp_timestamps __read_mostly = 1;
@@ -4951,53 +4950,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
4951 __tcp_checksum_complete_user(sk, skb); 4950 __tcp_checksum_complete_user(sk, skb);
4952} 4951}
4953 4952
4954#ifdef CONFIG_NET_DMA
4955static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
4956 int hlen)
4957{
4958 struct tcp_sock *tp = tcp_sk(sk);
4959 int chunk = skb->len - hlen;
4960 int dma_cookie;
4961 bool copied_early = false;
4962
4963 if (tp->ucopy.wakeup)
4964 return false;
4965
4966 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
4967 tp->ucopy.dma_chan = net_dma_find_channel();
4968
4969 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
4970
4971 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
4972 skb, hlen,
4973 tp->ucopy.iov, chunk,
4974 tp->ucopy.pinned_list);
4975
4976 if (dma_cookie < 0)
4977 goto out;
4978
4979 tp->ucopy.dma_cookie = dma_cookie;
4980 copied_early = true;
4981
4982 tp->ucopy.len -= chunk;
4983 tp->copied_seq += chunk;
4984 tcp_rcv_space_adjust(sk);
4985
4986 if ((tp->ucopy.len == 0) ||
4987 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
4988 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
4989 tp->ucopy.wakeup = 1;
4990 sk->sk_data_ready(sk);
4991 }
4992 } else if (chunk > 0) {
4993 tp->ucopy.wakeup = 1;
4994 sk->sk_data_ready(sk);
4995 }
4996out:
4997 return copied_early;
4998}
4999#endif /* CONFIG_NET_DMA */
5000
5001/* Does PAWS and seqno based validation of an incoming segment, flags will 4953/* Does PAWS and seqno based validation of an incoming segment, flags will
5002 * play significant role here. 4954 * play significant role here.
5003 */ 4955 */
@@ -5177,27 +5129,15 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5177 } 5129 }
5178 } else { 5130 } else {
5179 int eaten = 0; 5131 int eaten = 0;
5180 int copied_early = 0;
5181 bool fragstolen = false; 5132 bool fragstolen = false;
5182 5133
5183 if (tp->copied_seq == tp->rcv_nxt && 5134 if (tp->ucopy.task == current &&
5184 len - tcp_header_len <= tp->ucopy.len) { 5135 tp->copied_seq == tp->rcv_nxt &&
5185#ifdef CONFIG_NET_DMA 5136 len - tcp_header_len <= tp->ucopy.len &&
5186 if (tp->ucopy.task == current && 5137 sock_owned_by_user(sk)) {
5187 sock_owned_by_user(sk) && 5138 __set_current_state(TASK_RUNNING);
5188 tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
5189 copied_early = 1;
5190 eaten = 1;
5191 }
5192#endif
5193 if (tp->ucopy.task == current &&
5194 sock_owned_by_user(sk) && !copied_early) {
5195 __set_current_state(TASK_RUNNING);
5196 5139
5197 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 5140 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
5198 eaten = 1;
5199 }
5200 if (eaten) {
5201 /* Predicted packet is in window by definition. 5141 /* Predicted packet is in window by definition.
5202 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5142 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5203 * Hence, check seq<=rcv_wup reduces to: 5143 * Hence, check seq<=rcv_wup reduces to:
@@ -5213,9 +5153,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5213 __skb_pull(skb, tcp_header_len); 5153 __skb_pull(skb, tcp_header_len);
5214 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 5154 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 5155 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
5156 eaten = 1;
5216 } 5157 }
5217 if (copied_early)
5218 tcp_cleanup_rbuf(sk, skb->len);
5219 } 5158 }
5220 if (!eaten) { 5159 if (!eaten) {
5221 if (tcp_checksum_complete_user(sk, skb)) 5160 if (tcp_checksum_complete_user(sk, skb))
@@ -5252,14 +5191,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5252 goto no_ack; 5191 goto no_ack;
5253 } 5192 }
5254 5193
5255 if (!copied_early || tp->rcv_nxt != tp->rcv_wup) 5194 __tcp_ack_snd_check(sk, 0);
5256 __tcp_ack_snd_check(sk, 0);
5257no_ack: 5195no_ack:
5258#ifdef CONFIG_NET_DMA
5259 if (copied_early)
5260 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
5261 else
5262#endif
5263 if (eaten) 5196 if (eaten)
5264 kfree_skb_partial(skb, fragstolen); 5197 kfree_skb_partial(skb, fragstolen);
5265 sk->sk_data_ready(sk); 5198 sk->sk_data_ready(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cd17f009aede..fbea536cf5c0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,7 +72,6 @@
72#include <net/inet_common.h> 72#include <net/inet_common.h>
73#include <net/timewait_sock.h> 73#include <net/timewait_sock.h>
74#include <net/xfrm.h> 74#include <net/xfrm.h>
75#include <net/netdma.h>
76#include <net/secure_seq.h> 75#include <net/secure_seq.h>
77#include <net/tcp_memcontrol.h> 76#include <net/tcp_memcontrol.h>
78#include <net/busy_poll.h> 77#include <net/busy_poll.h>
@@ -1670,18 +1669,8 @@ process:
1670 bh_lock_sock_nested(sk); 1669 bh_lock_sock_nested(sk);
1671 ret = 0; 1670 ret = 0;
1672 if (!sock_owned_by_user(sk)) { 1671 if (!sock_owned_by_user(sk)) {
1673#ifdef CONFIG_NET_DMA 1672 if (!tcp_prequeue(sk, skb))
1674 struct tcp_sock *tp = tcp_sk(sk);
1675 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1676 tp->ucopy.dma_chan = net_dma_find_channel();
1677 if (tp->ucopy.dma_chan)
1678 ret = tcp_v4_do_rcv(sk, skb); 1673 ret = tcp_v4_do_rcv(sk, skb);
1679 else
1680#endif
1681 {
1682 if (!tcp_prequeue(sk, skb))
1683 ret = tcp_v4_do_rcv(sk, skb);
1684 }
1685 } else if (unlikely(sk_add_backlog(sk, skb, 1674 } else if (unlikely(sk_add_backlog(sk, skb,
1686 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1675 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1687 bh_unlock_sock(sk); 1676 bh_unlock_sock(sk);
@@ -1841,11 +1830,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
1841 } 1830 }
1842#endif 1831#endif
1843 1832
1844#ifdef CONFIG_NET_DMA
1845 /* Cleans up our sk_async_wait_queue */
1846 __skb_queue_purge(&sk->sk_async_wait_queue);
1847#endif
1848
1849 /* Clean prequeue, it must be empty really */ 1833 /* Clean prequeue, it must be empty really */
1850 __skb_queue_purge(&tp->ucopy.prequeue); 1834 __skb_queue_purge(&tp->ucopy.prequeue);
1851 1835
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 29964c3d363c..03a5d1ed3340 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -59,7 +59,6 @@
59#include <net/snmp.h> 59#include <net/snmp.h>
60#include <net/dsfield.h> 60#include <net/dsfield.h>
61#include <net/timewait_sock.h> 61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63#include <net/inet_common.h> 62#include <net/inet_common.h>
64#include <net/secure_seq.h> 63#include <net/secure_seq.h>
65#include <net/tcp_memcontrol.h> 64#include <net/tcp_memcontrol.h>
@@ -1446,18 +1445,8 @@ process:
1446 bh_lock_sock_nested(sk); 1445 bh_lock_sock_nested(sk);
1447 ret = 0; 1446 ret = 0;
1448 if (!sock_owned_by_user(sk)) { 1447 if (!sock_owned_by_user(sk)) {
1449#ifdef CONFIG_NET_DMA 1448 if (!tcp_prequeue(sk, skb))
1450 struct tcp_sock *tp = tcp_sk(sk);
1451 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1452 tp->ucopy.dma_chan = net_dma_find_channel();
1453 if (tp->ucopy.dma_chan)
1454 ret = tcp_v6_do_rcv(sk, skb); 1449 ret = tcp_v6_do_rcv(sk, skb);
1455 else
1456#endif
1457 {
1458 if (!tcp_prequeue(sk, skb))
1459 ret = tcp_v6_do_rcv(sk, skb);
1460 }
1461 } else if (unlikely(sk_add_backlog(sk, skb, 1450 } else if (unlikely(sk_add_backlog(sk, skb,
1462 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1451 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1463 bh_unlock_sock(sk); 1452 bh_unlock_sock(sk);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 0080d2b0a8ae..bb9cbc17d926 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -839,7 +839,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
839 839
840 if (!(flags & MSG_PEEK)) { 840 if (!(flags & MSG_PEEK)) {
841 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 841 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
842 sk_eat_skb(sk, skb, false); 842 sk_eat_skb(sk, skb);
843 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 843 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
844 *seq = 0; 844 *seq = 0;
845 } 845 }
@@ -861,10 +861,10 @@ copy_uaddr:
861 llc_cmsg_rcv(msg, skb); 861 llc_cmsg_rcv(msg, skb);
862 862
863 if (!(flags & MSG_PEEK)) { 863 if (!(flags & MSG_PEEK)) {
864 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 864 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
865 sk_eat_skb(sk, skb, false); 865 sk_eat_skb(sk, skb);
866 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 866 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
867 *seq = 0; 867 *seq = 0;
868 } 868 }
869 869
870 goto out; 870 goto out;