aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-12-30 15:37:29 -0500
committerDan Williams <dan.j.williams@intel.com>2014-09-28 10:05:16 -0400
commit7bced397510ab569d31de4c70b39e13355046387 (patch)
treefaa4067a53e42acffc752e9a153e7dbaed4126e5 /net
parent08223d80df38e666a42d7c82eb340db55c6e03bd (diff)
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'net')
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/user_dma.c131
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c147
-rw-r--r--net/ipv4/tcp_input.c61
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/llc/af_llc.c10
11 files changed, 23 insertions, 387 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index 9628c20acff6..5038f1ea0349 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -16,7 +16,6 @@ obj-y += net-sysfs.o
16obj-$(CONFIG_PROC_FS) += net-procfs.o 16obj-$(CONFIG_PROC_FS) += net-procfs.o
17obj-$(CONFIG_NET_PKTGEN) += pktgen.o 17obj-$(CONFIG_NET_PKTGEN) += pktgen.o
18obj-$(CONFIG_NETPOLL) += netpoll.o 18obj-$(CONFIG_NETPOLL) += netpoll.o
19obj-$(CONFIG_NET_DMA) += user_dma.o
20obj-$(CONFIG_FIB_RULES) += fib_rules.o 19obj-$(CONFIG_FIB_RULES) += fib_rules.o
21obj-$(CONFIG_TRACEPOINTS) += net-traces.o 20obj-$(CONFIG_TRACEPOINTS) += net-traces.o
22obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o 21obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
diff --git a/net/core/dev.c b/net/core/dev.c
index b1b0c8d4d7df..5e37e9abe8c5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1266,7 +1266,6 @@ static int __dev_open(struct net_device *dev)
1266 clear_bit(__LINK_STATE_START, &dev->state); 1266 clear_bit(__LINK_STATE_START, &dev->state);
1267 else { 1267 else {
1268 dev->flags |= IFF_UP; 1268 dev->flags |= IFF_UP;
1269 net_dmaengine_get();
1270 dev_set_rx_mode(dev); 1269 dev_set_rx_mode(dev);
1271 dev_activate(dev); 1270 dev_activate(dev);
1272 add_device_randomness(dev->dev_addr, dev->addr_len); 1271 add_device_randomness(dev->dev_addr, dev->addr_len);
@@ -1342,7 +1341,6 @@ static int __dev_close_many(struct list_head *head)
1342 ops->ndo_stop(dev); 1341 ops->ndo_stop(dev);
1343 1342
1344 dev->flags &= ~IFF_UP; 1343 dev->flags &= ~IFF_UP;
1345 net_dmaengine_put();
1346 } 1344 }
1347 1345
1348 return 0; 1346 return 0;
@@ -4405,14 +4403,6 @@ static void net_rx_action(struct softirq_action *h)
4405out: 4403out:
4406 net_rps_action_and_irq_enable(sd); 4404 net_rps_action_and_irq_enable(sd);
4407 4405
4408#ifdef CONFIG_NET_DMA
4409 /*
4410 * There may not be any more sk_buffs coming right now, so push
4411 * any pending DMA copies to hardware
4412 */
4413 dma_issue_pending_all();
4414#endif
4415
4416 return; 4406 return;
4417 4407
4418softnet_break: 4408softnet_break:
diff --git a/net/core/sock.c b/net/core/sock.c
index c0fc6bdad1e3..2f143c3b190a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1452,9 +1452,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1452 atomic_set(&newsk->sk_omem_alloc, 0); 1452 atomic_set(&newsk->sk_omem_alloc, 0);
1453 skb_queue_head_init(&newsk->sk_receive_queue); 1453 skb_queue_head_init(&newsk->sk_receive_queue);
1454 skb_queue_head_init(&newsk->sk_write_queue); 1454 skb_queue_head_init(&newsk->sk_write_queue);
1455#ifdef CONFIG_NET_DMA
1456 skb_queue_head_init(&newsk->sk_async_wait_queue);
1457#endif
1458 1455
1459 spin_lock_init(&newsk->sk_dst_lock); 1456 spin_lock_init(&newsk->sk_dst_lock);
1460 rwlock_init(&newsk->sk_callback_lock); 1457 rwlock_init(&newsk->sk_callback_lock);
@@ -2265,9 +2262,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2265 skb_queue_head_init(&sk->sk_receive_queue); 2262 skb_queue_head_init(&sk->sk_receive_queue);
2266 skb_queue_head_init(&sk->sk_write_queue); 2263 skb_queue_head_init(&sk->sk_write_queue);
2267 skb_queue_head_init(&sk->sk_error_queue); 2264 skb_queue_head_init(&sk->sk_error_queue);
2268#ifdef CONFIG_NET_DMA
2269 skb_queue_head_init(&sk->sk_async_wait_queue);
2270#endif
2271 2265
2272 sk->sk_send_head = NULL; 2266 sk->sk_send_head = NULL;
2273 2267
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
deleted file mode 100644
index 1b5fefdb8198..000000000000
--- a/net/core/user_dma.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * Portions based on net/core/datagram.c and copyrighted by their authors.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22
23/*
24 * This code allows the net stack to make use of a DMA engine for
25 * skb to iovec copies.
26 */
27
28#include <linux/dmaengine.h>
29#include <linux/socket.h>
30#include <linux/export.h>
31#include <net/tcp.h>
32#include <net/netdma.h>
33
34#define NET_DMA_DEFAULT_COPYBREAK 4096
35
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
37EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
38
39/**
40 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
41 * @skb - buffer to copy
42 * @offset - offset in the buffer to start copying from
43 * @iovec - io vector to copy to
44 * @len - amount of data to copy from buffer to iovec
45 * @pinned_list - locked iovec buffer data
46 *
47 * Note: the iovec is modified during the copy.
48 */
49int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
50 struct sk_buff *skb, int offset, struct iovec *to,
51 size_t len, struct dma_pinned_list *pinned_list)
52{
53 int start = skb_headlen(skb);
54 int i, copy = start - offset;
55 struct sk_buff *frag_iter;
56 dma_cookie_t cookie = 0;
57
58 /* Copy header. */
59 if (copy > 0) {
60 if (copy > len)
61 copy = len;
62 cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
63 skb->data + offset, copy);
64 if (cookie < 0)
65 goto fault;
66 len -= copy;
67 if (len == 0)
68 goto end;
69 offset += copy;
70 }
71
72 /* Copy paged appendix. Hmm... why does this look so complicated? */
73 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
74 int end;
75 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
76
77 WARN_ON(start > offset + len);
78
79 end = start + skb_frag_size(frag);
80 copy = end - offset;
81 if (copy > 0) {
82 struct page *page = skb_frag_page(frag);
83
84 if (copy > len)
85 copy = len;
86
87 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
88 frag->page_offset + offset - start, copy);
89 if (cookie < 0)
90 goto fault;
91 len -= copy;
92 if (len == 0)
93 goto end;
94 offset += copy;
95 }
96 start = end;
97 }
98
99 skb_walk_frags(skb, frag_iter) {
100 int end;
101
102 WARN_ON(start > offset + len);
103
104 end = start + frag_iter->len;
105 copy = end - offset;
106 if (copy > 0) {
107 if (copy > len)
108 copy = len;
109 cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
110 offset - start,
111 to, copy,
112 pinned_list);
113 if (cookie < 0)
114 goto fault;
115 len -= copy;
116 if (len == 0)
117 goto end;
118 offset += copy;
119 }
120 start = end;
121 }
122
123end:
124 if (!len) {
125 skb->dma_cookie = cookie;
126 return cookie;
127 }
128
129fault:
130 return -EFAULT;
131}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index eb892b4f4814..f9076f295b13 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
848 default: 848 default:
849 dccp_pr_debug("packet_type=%s\n", 849 dccp_pr_debug("packet_type=%s\n",
850 dccp_packet_name(dh->dccph_type)); 850 dccp_packet_name(dh->dccph_type));
851 sk_eat_skb(sk, skb, false); 851 sk_eat_skb(sk, skb);
852 } 852 }
853verify_sock_status: 853verify_sock_status:
854 if (sock_flag(sk, SOCK_DONE)) { 854 if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@ verify_sock_status:
905 len = skb->len; 905 len = skb->len;
906 found_fin_ok: 906 found_fin_ok:
907 if (!(flags & MSG_PEEK)) 907 if (!(flags & MSG_PEEK))
908 sk_eat_skb(sk, skb, false); 908 sk_eat_skb(sk, skb);
909 break; 909 break;
910 } while (1); 910 } while (1);
911out: 911out:
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 44eba052b43d..c3d2a48481f1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -635,15 +635,6 @@ static struct ctl_table ipv4_table[] = {
635 .mode = 0644, 635 .mode = 0644,
636 .proc_handler = proc_dointvec 636 .proc_handler = proc_dointvec
637 }, 637 },
638#ifdef CONFIG_NET_DMA
639 {
640 .procname = "tcp_dma_copybreak",
641 .data = &sysctl_tcp_dma_copybreak,
642 .maxlen = sizeof(int),
643 .mode = 0644,
644 .proc_handler = proc_dointvec
645 },
646#endif
647 { 638 {
648 .procname = "tcp_slow_start_after_idle", 639 .procname = "tcp_slow_start_after_idle",
649 .data = &sysctl_tcp_slow_start_after_idle, 640 .data = &sysctl_tcp_slow_start_after_idle,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 97c8f5620c43..28595a364f09 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -274,7 +274,6 @@
274#include <net/tcp.h> 274#include <net/tcp.h>
275#include <net/xfrm.h> 275#include <net/xfrm.h>
276#include <net/ip.h> 276#include <net/ip.h>
277#include <net/netdma.h>
278#include <net/sock.h> 277#include <net/sock.h>
279 278
280#include <asm/uaccess.h> 279#include <asm/uaccess.h>
@@ -1454,39 +1453,6 @@ static void tcp_prequeue_process(struct sock *sk)
1454 tp->ucopy.memory = 0; 1453 tp->ucopy.memory = 0;
1455} 1454}
1456 1455
1457#ifdef CONFIG_NET_DMA
1458static void tcp_service_net_dma(struct sock *sk, bool wait)
1459{
1460 dma_cookie_t done, used;
1461 dma_cookie_t last_issued;
1462 struct tcp_sock *tp = tcp_sk(sk);
1463
1464 if (!tp->ucopy.dma_chan)
1465 return;
1466
1467 last_issued = tp->ucopy.dma_cookie;
1468 dma_async_issue_pending(tp->ucopy.dma_chan);
1469
1470 do {
1471 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1472 last_issued, &done,
1473 &used) == DMA_COMPLETE) {
1474 /* Safe to free early-copied skbs now */
1475 __skb_queue_purge(&sk->sk_async_wait_queue);
1476 break;
1477 } else {
1478 struct sk_buff *skb;
1479 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1480 (dma_async_is_complete(skb->dma_cookie, done,
1481 used) == DMA_COMPLETE)) {
1482 __skb_dequeue(&sk->sk_async_wait_queue);
1483 kfree_skb(skb);
1484 }
1485 }
1486 } while (wait);
1487}
1488#endif
1489
1490static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1456static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1491{ 1457{
1492 struct sk_buff *skb; 1458 struct sk_buff *skb;
@@ -1504,7 +1470,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1504 * splitted a fat GRO packet, while we released socket lock 1470 * splitted a fat GRO packet, while we released socket lock
1505 * in skb_splice_bits() 1471 * in skb_splice_bits()
1506 */ 1472 */
1507 sk_eat_skb(sk, skb, false); 1473 sk_eat_skb(sk, skb);
1508 } 1474 }
1509 return NULL; 1475 return NULL;
1510} 1476}
@@ -1570,11 +1536,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1570 continue; 1536 continue;
1571 } 1537 }
1572 if (tcp_hdr(skb)->fin) { 1538 if (tcp_hdr(skb)->fin) {
1573 sk_eat_skb(sk, skb, false); 1539 sk_eat_skb(sk, skb);
1574 ++seq; 1540 ++seq;
1575 break; 1541 break;
1576 } 1542 }
1577 sk_eat_skb(sk, skb, false); 1543 sk_eat_skb(sk, skb);
1578 if (!desc->count) 1544 if (!desc->count)
1579 break; 1545 break;
1580 tp->copied_seq = seq; 1546 tp->copied_seq = seq;
@@ -1612,7 +1578,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1612 int target; /* Read at least this many bytes */ 1578 int target; /* Read at least this many bytes */
1613 long timeo; 1579 long timeo;
1614 struct task_struct *user_recv = NULL; 1580 struct task_struct *user_recv = NULL;
1615 bool copied_early = false;
1616 struct sk_buff *skb; 1581 struct sk_buff *skb;
1617 u32 urg_hole = 0; 1582 u32 urg_hole = 0;
1618 1583
@@ -1655,28 +1620,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1655 1620
1656 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1621 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1657 1622
1658#ifdef CONFIG_NET_DMA
1659 tp->ucopy.dma_chan = NULL;
1660 preempt_disable();
1661 skb = skb_peek_tail(&sk->sk_receive_queue);
1662 {
1663 int available = 0;
1664
1665 if (skb)
1666 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1667 if ((available < target) &&
1668 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1669 !sysctl_tcp_low_latency &&
1670 net_dma_find_channel()) {
1671 preempt_enable();
1672 tp->ucopy.pinned_list =
1673 dma_pin_iovec_pages(msg->msg_iov, len);
1674 } else {
1675 preempt_enable();
1676 }
1677 }
1678#endif
1679
1680 do { 1623 do {
1681 u32 offset; 1624 u32 offset;
1682 1625
@@ -1807,16 +1750,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1807 /* __ Set realtime policy in scheduler __ */ 1750 /* __ Set realtime policy in scheduler __ */
1808 } 1751 }
1809 1752
1810#ifdef CONFIG_NET_DMA
1811 if (tp->ucopy.dma_chan) {
1812 if (tp->rcv_wnd == 0 &&
1813 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1814 tcp_service_net_dma(sk, true);
1815 tcp_cleanup_rbuf(sk, copied);
1816 } else
1817 dma_async_issue_pending(tp->ucopy.dma_chan);
1818 }
1819#endif
1820 if (copied >= target) { 1753 if (copied >= target) {
1821 /* Do not sleep, just process backlog. */ 1754 /* Do not sleep, just process backlog. */
1822 release_sock(sk); 1755 release_sock(sk);
@@ -1824,11 +1757,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1824 } else 1757 } else
1825 sk_wait_data(sk, &timeo); 1758 sk_wait_data(sk, &timeo);
1826 1759
1827#ifdef CONFIG_NET_DMA
1828 tcp_service_net_dma(sk, false); /* Don't block */
1829 tp->ucopy.wakeup = 0;
1830#endif
1831
1832 if (user_recv) { 1760 if (user_recv) {
1833 int chunk; 1761 int chunk;
1834 1762
@@ -1886,43 +1814,13 @@ do_prequeue:
1886 } 1814 }
1887 1815
1888 if (!(flags & MSG_TRUNC)) { 1816 if (!(flags & MSG_TRUNC)) {
1889#ifdef CONFIG_NET_DMA 1817 err = skb_copy_datagram_iovec(skb, offset,
1890 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1818 msg->msg_iov, used);
1891 tp->ucopy.dma_chan = net_dma_find_channel(); 1819 if (err) {
1892 1820 /* Exception. Bailout! */
1893 if (tp->ucopy.dma_chan) { 1821 if (!copied)
1894 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1822 copied = -EFAULT;
1895 tp->ucopy.dma_chan, skb, offset, 1823 break;
1896 msg->msg_iov, used,
1897 tp->ucopy.pinned_list);
1898
1899 if (tp->ucopy.dma_cookie < 0) {
1900
1901 pr_alert("%s: dma_cookie < 0\n",
1902 __func__);
1903
1904 /* Exception. Bailout! */
1905 if (!copied)
1906 copied = -EFAULT;
1907 break;
1908 }
1909
1910 dma_async_issue_pending(tp->ucopy.dma_chan);
1911
1912 if ((offset + used) == skb->len)
1913 copied_early = true;
1914
1915 } else
1916#endif
1917 {
1918 err = skb_copy_datagram_iovec(skb, offset,
1919 msg->msg_iov, used);
1920 if (err) {
1921 /* Exception. Bailout! */
1922 if (!copied)
1923 copied = -EFAULT;
1924 break;
1925 }
1926 } 1824 }
1927 } 1825 }
1928 1826
@@ -1942,19 +1840,15 @@ skip_copy:
1942 1840
1943 if (tcp_hdr(skb)->fin) 1841 if (tcp_hdr(skb)->fin)
1944 goto found_fin_ok; 1842 goto found_fin_ok;
1945 if (!(flags & MSG_PEEK)) { 1843 if (!(flags & MSG_PEEK))
1946 sk_eat_skb(sk, skb, copied_early); 1844 sk_eat_skb(sk, skb);
1947 copied_early = false;
1948 }
1949 continue; 1845 continue;
1950 1846
1951 found_fin_ok: 1847 found_fin_ok:
1952 /* Process the FIN. */ 1848 /* Process the FIN. */
1953 ++*seq; 1849 ++*seq;
1954 if (!(flags & MSG_PEEK)) { 1850 if (!(flags & MSG_PEEK))
1955 sk_eat_skb(sk, skb, copied_early); 1851 sk_eat_skb(sk, skb);
1956 copied_early = false;
1957 }
1958 break; 1852 break;
1959 } while (len > 0); 1853 } while (len > 0);
1960 1854
@@ -1977,16 +1871,6 @@ skip_copy:
1977 tp->ucopy.len = 0; 1871 tp->ucopy.len = 0;
1978 } 1872 }
1979 1873
1980#ifdef CONFIG_NET_DMA
1981 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1982 tp->ucopy.dma_chan = NULL;
1983
1984 if (tp->ucopy.pinned_list) {
1985 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1986 tp->ucopy.pinned_list = NULL;
1987 }
1988#endif
1989
1990 /* According to UNIX98, msg_name/msg_namelen are ignored 1874 /* According to UNIX98, msg_name/msg_namelen are ignored
1991 * on connected socket. I was just happy when found this 8) --ANK 1875 * on connected socket. I was just happy when found this 8) --ANK
1992 */ 1876 */
@@ -2330,9 +2214,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2330 __skb_queue_purge(&sk->sk_receive_queue); 2214 __skb_queue_purge(&sk->sk_receive_queue);
2331 tcp_write_queue_purge(sk); 2215 tcp_write_queue_purge(sk);
2332 __skb_queue_purge(&tp->out_of_order_queue); 2216 __skb_queue_purge(&tp->out_of_order_queue);
2333#ifdef CONFIG_NET_DMA
2334 __skb_queue_purge(&sk->sk_async_wait_queue);
2335#endif
2336 2217
2337 inet->inet_dport = 0; 2218 inet->inet_dport = 0;
2338 2219
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eeaac399420d..1342e9851f97 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -73,7 +73,6 @@
73#include <net/inet_common.h> 73#include <net/inet_common.h>
74#include <linux/ipsec.h> 74#include <linux/ipsec.h>
75#include <asm/unaligned.h> 75#include <asm/unaligned.h>
76#include <net/netdma.h>
77 76
78int sysctl_tcp_timestamps __read_mostly = 1; 77int sysctl_tcp_timestamps __read_mostly = 1;
79int sysctl_tcp_window_scaling __read_mostly = 1; 78int sysctl_tcp_window_scaling __read_mostly = 1;
@@ -4970,53 +4969,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
4970 __tcp_checksum_complete_user(sk, skb); 4969 __tcp_checksum_complete_user(sk, skb);
4971} 4970}
4972 4971
4973#ifdef CONFIG_NET_DMA
4974static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
4975 int hlen)
4976{
4977 struct tcp_sock *tp = tcp_sk(sk);
4978 int chunk = skb->len - hlen;
4979 int dma_cookie;
4980 bool copied_early = false;
4981
4982 if (tp->ucopy.wakeup)
4983 return false;
4984
4985 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
4986 tp->ucopy.dma_chan = net_dma_find_channel();
4987
4988 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
4989
4990 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
4991 skb, hlen,
4992 tp->ucopy.iov, chunk,
4993 tp->ucopy.pinned_list);
4994
4995 if (dma_cookie < 0)
4996 goto out;
4997
4998 tp->ucopy.dma_cookie = dma_cookie;
4999 copied_early = true;
5000
5001 tp->ucopy.len -= chunk;
5002 tp->copied_seq += chunk;
5003 tcp_rcv_space_adjust(sk);
5004
5005 if ((tp->ucopy.len == 0) ||
5006 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
5007 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
5008 tp->ucopy.wakeup = 1;
5009 sk->sk_data_ready(sk, 0);
5010 }
5011 } else if (chunk > 0) {
5012 tp->ucopy.wakeup = 1;
5013 sk->sk_data_ready(sk, 0);
5014 }
5015out:
5016 return copied_early;
5017}
5018#endif /* CONFIG_NET_DMA */
5019
5020/* Does PAWS and seqno based validation of an incoming segment, flags will 4972/* Does PAWS and seqno based validation of an incoming segment, flags will
5021 * play significant role here. 4973 * play significant role here.
5022 */ 4974 */
@@ -5201,14 +5153,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5201 5153
5202 if (tp->copied_seq == tp->rcv_nxt && 5154 if (tp->copied_seq == tp->rcv_nxt &&
5203 len - tcp_header_len <= tp->ucopy.len) { 5155 len - tcp_header_len <= tp->ucopy.len) {
5204#ifdef CONFIG_NET_DMA
5205 if (tp->ucopy.task == current &&
5206 sock_owned_by_user(sk) &&
5207 tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
5208 copied_early = 1;
5209 eaten = 1;
5210 }
5211#endif
5212 if (tp->ucopy.task == current && 5156 if (tp->ucopy.task == current &&
5213 sock_owned_by_user(sk) && !copied_early) { 5157 sock_owned_by_user(sk) && !copied_early) {
5214 __set_current_state(TASK_RUNNING); 5158 __set_current_state(TASK_RUNNING);
@@ -5274,11 +5218,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5274 if (!copied_early || tp->rcv_nxt != tp->rcv_wup) 5218 if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
5275 __tcp_ack_snd_check(sk, 0); 5219 __tcp_ack_snd_check(sk, 0);
5276no_ack: 5220no_ack:
5277#ifdef CONFIG_NET_DMA
5278 if (copied_early)
5279 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
5280 else
5281#endif
5282 if (eaten) 5221 if (eaten)
5283 kfree_skb_partial(skb, fragstolen); 5222 kfree_skb_partial(skb, fragstolen);
5284 sk->sk_data_ready(sk, 0); 5223 sk->sk_data_ready(sk, 0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3cf976510497..737c2e270ee3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,7 +72,6 @@
72#include <net/inet_common.h> 72#include <net/inet_common.h>
73#include <net/timewait_sock.h> 73#include <net/timewait_sock.h>
74#include <net/xfrm.h> 74#include <net/xfrm.h>
75#include <net/netdma.h>
76#include <net/secure_seq.h> 75#include <net/secure_seq.h>
77#include <net/tcp_memcontrol.h> 76#include <net/tcp_memcontrol.h>
78#include <net/busy_poll.h> 77#include <net/busy_poll.h>
@@ -1999,18 +1998,8 @@ process:
1999 bh_lock_sock_nested(sk); 1998 bh_lock_sock_nested(sk);
2000 ret = 0; 1999 ret = 0;
2001 if (!sock_owned_by_user(sk)) { 2000 if (!sock_owned_by_user(sk)) {
2002#ifdef CONFIG_NET_DMA 2001 if (!tcp_prequeue(sk, skb))
2003 struct tcp_sock *tp = tcp_sk(sk);
2004 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2005 tp->ucopy.dma_chan = net_dma_find_channel();
2006 if (tp->ucopy.dma_chan)
2007 ret = tcp_v4_do_rcv(sk, skb); 2002 ret = tcp_v4_do_rcv(sk, skb);
2008 else
2009#endif
2010 {
2011 if (!tcp_prequeue(sk, skb))
2012 ret = tcp_v4_do_rcv(sk, skb);
2013 }
2014 } else if (unlikely(sk_add_backlog(sk, skb, 2003 } else if (unlikely(sk_add_backlog(sk, skb,
2015 sk->sk_rcvbuf + sk->sk_sndbuf))) { 2004 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2016 bh_unlock_sock(sk); 2005 bh_unlock_sock(sk);
@@ -2169,11 +2158,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
2169 } 2158 }
2170#endif 2159#endif
2171 2160
2172#ifdef CONFIG_NET_DMA
2173 /* Cleans up our sk_async_wait_queue */
2174 __skb_queue_purge(&sk->sk_async_wait_queue);
2175#endif
2176
2177 /* Clean prequeue, it must be empty really */ 2161 /* Clean prequeue, it must be empty really */
2178 __skb_queue_purge(&tp->ucopy.prequeue); 2162 __skb_queue_purge(&tp->ucopy.prequeue);
2179 2163
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 889079b2ea85..cb21fccf2089 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -59,7 +59,6 @@
59#include <net/snmp.h> 59#include <net/snmp.h>
60#include <net/dsfield.h> 60#include <net/dsfield.h>
61#include <net/timewait_sock.h> 61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63#include <net/inet_common.h> 62#include <net/inet_common.h>
64#include <net/secure_seq.h> 63#include <net/secure_seq.h>
65#include <net/tcp_memcontrol.h> 64#include <net/tcp_memcontrol.h>
@@ -1520,18 +1519,8 @@ process:
1520 bh_lock_sock_nested(sk); 1519 bh_lock_sock_nested(sk);
1521 ret = 0; 1520 ret = 0;
1522 if (!sock_owned_by_user(sk)) { 1521 if (!sock_owned_by_user(sk)) {
1523#ifdef CONFIG_NET_DMA 1522 if (!tcp_prequeue(sk, skb))
1524 struct tcp_sock *tp = tcp_sk(sk);
1525 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1526 tp->ucopy.dma_chan = net_dma_find_channel();
1527 if (tp->ucopy.dma_chan)
1528 ret = tcp_v6_do_rcv(sk, skb); 1523 ret = tcp_v6_do_rcv(sk, skb);
1529 else
1530#endif
1531 {
1532 if (!tcp_prequeue(sk, skb))
1533 ret = tcp_v6_do_rcv(sk, skb);
1534 }
1535 } else if (unlikely(sk_add_backlog(sk, skb, 1524 } else if (unlikely(sk_add_backlog(sk, skb,
1536 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1525 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1537 bh_unlock_sock(sk); 1526 bh_unlock_sock(sk);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 0080d2b0a8ae..bb9cbc17d926 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -839,7 +839,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
839 839
840 if (!(flags & MSG_PEEK)) { 840 if (!(flags & MSG_PEEK)) {
841 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 841 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
842 sk_eat_skb(sk, skb, false); 842 sk_eat_skb(sk, skb);
843 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 843 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
844 *seq = 0; 844 *seq = 0;
845 } 845 }
@@ -861,10 +861,10 @@ copy_uaddr:
861 llc_cmsg_rcv(msg, skb); 861 llc_cmsg_rcv(msg, skb);
862 862
863 if (!(flags & MSG_PEEK)) { 863 if (!(flags & MSG_PEEK)) {
864 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 864 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
865 sk_eat_skb(sk, skb, false); 865 sk_eat_skb(sk, skb);
866 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 866 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
867 *seq = 0; 867 *seq = 0;
868 } 868 }
869 869
870 goto out; 870 goto out;