aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/removed/net_dma8
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--MAINTAINERS19
-rw-r--r--crypto/async_tx/async_xor.c3
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c104
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/ioat/dca.c13
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma.h7
-rw-r--r--drivers/dma/ioat/dma_v2.c4
-rw-r--r--drivers/dma/ioat/dma_v3.c7
-rw-r--r--drivers/dma/iovlock.c280
-rw-r--r--drivers/dma/mv_xor.c80
-rw-r--r--include/linux/dmaengine.h22
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/net/netdma.h32
-rw-r--r--include/net/sock.h19
-rw-r--r--include/net/tcp.h9
-rw-r--r--kernel/sysctl_binary.c1
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/user_dma.c131
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c149
-rw-r--r--net/ipv4/tcp_input.c83
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/llc/af_llc.c10
33 files changed, 129 insertions, 955 deletions
diff --git a/Documentation/ABI/removed/net_dma b/Documentation/ABI/removed/net_dma
new file mode 100644
index 000000000000..a173aecc2f18
--- /dev/null
+++ b/Documentation/ABI/removed/net_dma
@@ -0,0 +1,8 @@
1What: tcp_dma_copybreak sysctl
2Date: Removed in kernel v3.13
3Contact: Dan Williams <dan.j.williams@intel.com>
4Description:
5 Formerly the lower limit, in bytes, of the size of socket reads
6 that will be offloaded to a DMA copy engine. Removed due to
7 coherency issues of the cpu potentially touching the buffers
8 while dma is in flight.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 29a93518bf18..caedb18d4564 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -580,12 +580,6 @@ tcp_workaround_signed_windows - BOOLEAN
580 not receive a window scaling option from them. 580 not receive a window scaling option from them.
581 Default: 0 581 Default: 0
582 582
583tcp_dma_copybreak - INTEGER
584 Lower limit, in bytes, of the size of socket reads that will be
585 offloaded to a DMA copy engine, if one is present in the system
586 and CONFIG_NET_DMA is enabled.
587 Default: 4096
588
589tcp_thin_linear_timeouts - BOOLEAN 583tcp_thin_linear_timeouts - BOOLEAN
590 Enable dynamic triggering of linear timeouts for thin streams. 584 Enable dynamic triggering of linear timeouts for thin streams.
591 If set, a check is performed upon retransmission by timeout to 585 If set, a check is performed upon retransmission by timeout to
diff --git a/MAINTAINERS b/MAINTAINERS
index f10ed3914ea8..f107230fc73d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1025,24 +1025,20 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c
1025 1025
1026ARM/INTEL IOP32X ARM ARCHITECTURE 1026ARM/INTEL IOP32X ARM ARCHITECTURE
1027M: Lennert Buytenhek <kernel@wantstofly.org> 1027M: Lennert Buytenhek <kernel@wantstofly.org>
1028M: Dan Williams <dan.j.williams@intel.com>
1029L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1028L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1030S: Maintained 1029S: Maintained
1031 1030
1032ARM/INTEL IOP33X ARM ARCHITECTURE 1031ARM/INTEL IOP33X ARM ARCHITECTURE
1033M: Dan Williams <dan.j.williams@intel.com>
1034L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1032L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1035S: Maintained 1033S: Orphan
1036 1034
1037ARM/INTEL IOP13XX ARM ARCHITECTURE 1035ARM/INTEL IOP13XX ARM ARCHITECTURE
1038M: Lennert Buytenhek <kernel@wantstofly.org> 1036M: Lennert Buytenhek <kernel@wantstofly.org>
1039M: Dan Williams <dan.j.williams@intel.com>
1040L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1037L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1041S: Maintained 1038S: Maintained
1042 1039
1043ARM/INTEL IQ81342EX MACHINE SUPPORT 1040ARM/INTEL IQ81342EX MACHINE SUPPORT
1044M: Lennert Buytenhek <kernel@wantstofly.org> 1041M: Lennert Buytenhek <kernel@wantstofly.org>
1045M: Dan Williams <dan.j.williams@intel.com>
1046L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1042L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1047S: Maintained 1043S: Maintained
1048 1044
@@ -1067,7 +1063,6 @@ F: drivers/pcmcia/pxa2xx_stargate2.c
1067 1063
1068ARM/INTEL XSC3 (MANZANO) ARM CORE 1064ARM/INTEL XSC3 (MANZANO) ARM CORE
1069M: Lennert Buytenhek <kernel@wantstofly.org> 1065M: Lennert Buytenhek <kernel@wantstofly.org>
1070M: Dan Williams <dan.j.williams@intel.com>
1071L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1066L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1072S: Maintained 1067S: Maintained
1073 1068
@@ -1562,9 +1557,9 @@ F: drivers/platform/x86/asus*.c
1562F: drivers/platform/x86/eeepc*.c 1557F: drivers/platform/x86/eeepc*.c
1563 1558
1564ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 1559ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
1565M: Dan Williams <dan.j.williams@intel.com> 1560R: Dan Williams <dan.j.williams@intel.com>
1566W: http://sourceforge.net/projects/xscaleiop 1561W: http://sourceforge.net/projects/xscaleiop
1567S: Maintained 1562S: Odd fixes
1568F: Documentation/crypto/async-tx-api.txt 1563F: Documentation/crypto/async-tx-api.txt
1569F: crypto/async_tx/ 1564F: crypto/async_tx/
1570F: drivers/dma/ 1565F: drivers/dma/
@@ -2995,13 +2990,11 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
2995 2990
2996DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 2991DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
2997M: Vinod Koul <vinod.koul@intel.com> 2992M: Vinod Koul <vinod.koul@intel.com>
2998M: Dan Williams <dan.j.williams@intel.com>
2999L: dmaengine@vger.kernel.org 2993L: dmaengine@vger.kernel.org
3000Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 2994Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
3001S: Supported 2995S: Maintained
3002F: drivers/dma/ 2996F: drivers/dma/
3003F: include/linux/dma* 2997F: include/linux/dma*
3004T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
3005T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma) 2998T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
3006 2999
3007DME1737 HARDWARE MONITOR DRIVER 3000DME1737 HARDWARE MONITOR DRIVER
@@ -4754,8 +4747,8 @@ F: arch/x86/kernel/cpu/microcode/core*
4754F: arch/x86/kernel/cpu/microcode/intel* 4747F: arch/x86/kernel/cpu/microcode/intel*
4755 4748
4756INTEL I/OAT DMA DRIVER 4749INTEL I/OAT DMA DRIVER
4757M: Dan Williams <dan.j.williams@intel.com>
4758M: Dave Jiang <dave.jiang@intel.com> 4750M: Dave Jiang <dave.jiang@intel.com>
4751R: Dan Williams <dan.j.williams@intel.com>
4759L: dmaengine@vger.kernel.org 4752L: dmaengine@vger.kernel.org
4760Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 4753Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
4761S: Supported 4754S: Supported
@@ -4770,7 +4763,7 @@ F: drivers/iommu/intel-iommu.c
4770F: include/linux/intel-iommu.h 4763F: include/linux/intel-iommu.h
4771 4764
4772INTEL IOP-ADMA DMA DRIVER 4765INTEL IOP-ADMA DMA DRIVER
4773M: Dan Williams <dan.j.williams@intel.com> 4766R: Dan Williams <dan.j.williams@intel.com>
4774S: Odd fixes 4767S: Odd fixes
4775F: drivers/dma/iop-adma.c 4768F: drivers/dma/iop-adma.c
4776 4769
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3c562f5a60bb..e1bce26cd4f9 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -78,8 +78,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
78 tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, 78 tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
79 xor_src_cnt, unmap->len, 79 xor_src_cnt, unmap->len,
80 dma_flags); 80 dma_flags);
81 src_list[0] = tmp;
82
83 81
84 if (unlikely(!tx)) 82 if (unlikely(!tx))
85 async_tx_quiesce(&submit->depend_tx); 83 async_tx_quiesce(&submit->depend_tx);
@@ -92,6 +90,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
92 xor_src_cnt, unmap->len, 90 xor_src_cnt, unmap->len,
93 dma_flags); 91 dma_flags);
94 } 92 }
93 src_list[0] = tmp;
95 94
96 dma_set_unmap(tx, unmap); 95 dma_set_unmap(tx, unmap);
97 async_tx_submit(chan, tx, submit); 96 async_tx_submit(chan, tx, submit);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9b1ea0ef59af..a016490c95ae 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -427,18 +427,6 @@ config DMA_OF
427comment "DMA Clients" 427comment "DMA Clients"
428 depends on DMA_ENGINE 428 depends on DMA_ENGINE
429 429
430config NET_DMA
431 bool "Network: TCP receive copy offload"
432 depends on DMA_ENGINE && NET
433 default (INTEL_IOATDMA || FSL_DMA)
434 depends on BROKEN
435 help
436 This enables the use of DMA engines in the network stack to
437 offload receive copy-to-user operations, freeing CPU cycles.
438
439 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
440 say N.
441
442config ASYNC_TX_DMA 430config ASYNC_TX_DMA
443 bool "Async_tx: Offload support for the async_tx api" 431 bool "Async_tx: Offload support for the async_tx api"
444 depends on DMA_ENGINE 432 depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c6adb925f0b9..cb626c179911 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
6obj-$(CONFIG_DMA_ACPI) += acpi-dma.o 6obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
7obj-$(CONFIG_DMA_OF) += of-dma.o 7obj-$(CONFIG_DMA_OF) += of-dma.o
8 8
9obj-$(CONFIG_NET_DMA) += iovlock.o
10obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o 9obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
11obj-$(CONFIG_DMATEST) += dmatest.o 10obj-$(CONFIG_DMATEST) += dmatest.o
12obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 11obj-$(CONFIG_INTEL_IOATDMA) += ioat/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d5d30ed863ce..24bfaf0b92ba 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1081} 1081}
1082EXPORT_SYMBOL(dmaengine_get_unmap_data); 1082EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083 1083
1084/**
1085 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1086 * @chan: DMA channel to offload copy to
1087 * @dest_pg: destination page
1088 * @dest_off: offset in page to copy to
1089 * @src_pg: source page
1090 * @src_off: offset in page to copy from
1091 * @len: length
1092 *
1093 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1094 * address according to the DMA mapping API rules for streaming mappings.
1095 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1096 * (kernel memory or locked user space pages).
1097 */
1098dma_cookie_t
1099dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1100 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1101 size_t len)
1102{
1103 struct dma_device *dev = chan->device;
1104 struct dma_async_tx_descriptor *tx;
1105 struct dmaengine_unmap_data *unmap;
1106 dma_cookie_t cookie;
1107 unsigned long flags;
1108
1109 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1110 if (!unmap)
1111 return -ENOMEM;
1112
1113 unmap->to_cnt = 1;
1114 unmap->from_cnt = 1;
1115 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1116 DMA_TO_DEVICE);
1117 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1118 DMA_FROM_DEVICE);
1119 unmap->len = len;
1120 flags = DMA_CTRL_ACK;
1121 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1122 len, flags);
1123
1124 if (!tx) {
1125 dmaengine_unmap_put(unmap);
1126 return -ENOMEM;
1127 }
1128
1129 dma_set_unmap(tx, unmap);
1130 cookie = tx->tx_submit(tx);
1131 dmaengine_unmap_put(unmap);
1132
1133 preempt_disable();
1134 __this_cpu_add(chan->local->bytes_transferred, len);
1135 __this_cpu_inc(chan->local->memcpy_count);
1136 preempt_enable();
1137
1138 return cookie;
1139}
1140EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1141
1142/**
1143 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1144 * @chan: DMA channel to offload copy to
1145 * @dest: destination address (virtual)
1146 * @src: source address (virtual)
1147 * @len: length
1148 *
1149 * Both @dest and @src must be mappable to a bus address according to the
1150 * DMA mapping API rules for streaming mappings.
1151 * Both @dest and @src must stay memory resident (kernel memory or locked
1152 * user space pages).
1153 */
1154dma_cookie_t
1155dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1156 void *src, size_t len)
1157{
1158 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1159 (unsigned long) dest & ~PAGE_MASK,
1160 virt_to_page(src),
1161 (unsigned long) src & ~PAGE_MASK, len);
1162}
1163EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1164
1165/**
1166 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1167 * @chan: DMA channel to offload copy to
1168 * @page: destination page
1169 * @offset: offset in page to copy to
1170 * @kdata: source address (virtual)
1171 * @len: length
1172 *
1173 * Both @page/@offset and @kdata must be mappable to a bus address according
1174 * to the DMA mapping API rules for streaming mappings.
1175 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1176 * locked user space pages)
1177 */
1178dma_cookie_t
1179dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1180 unsigned int offset, void *kdata, size_t len)
1181{
1182 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1183 virt_to_page(kdata),
1184 (unsigned long) kdata & ~PAGE_MASK, len);
1185}
1186EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1187
1188void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1084void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1189 struct dma_chan *chan) 1085 struct dma_chan *chan)
1190{ 1086{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e27cec25c59e..a8d7809e2f4c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -688,14 +688,14 @@ static int dmatest_func(void *data)
688 runtime = ktime_us_delta(ktime_get(), ktime); 688 runtime = ktime_us_delta(ktime_get(), ktime);
689 689
690 ret = 0; 690 ret = 0;
691err_dstbuf:
691 for (i = 0; thread->dsts[i]; i++) 692 for (i = 0; thread->dsts[i]; i++)
692 kfree(thread->dsts[i]); 693 kfree(thread->dsts[i]);
693err_dstbuf:
694 kfree(thread->dsts); 694 kfree(thread->dsts);
695err_dsts: 695err_dsts:
696err_srcbuf:
696 for (i = 0; thread->srcs[i]; i++) 697 for (i = 0; thread->srcs[i]; i++)
697 kfree(thread->srcs[i]); 698 kfree(thread->srcs[i]);
698err_srcbuf:
699 kfree(thread->srcs); 699 kfree(thread->srcs);
700err_srcs: 700err_srcs:
701 kfree(pq_coefs); 701 kfree(pq_coefs);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9e84d5bc9307..3b55bb8d969a 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -35,6 +35,7 @@
35 35
36#include "dma.h" 36#include "dma.h"
37#include "registers.h" 37#include "registers.h"
38#include "dma_v2.h"
38 39
39/* 40/*
40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 41 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
147 u16 id; 148 u16 id;
148 149
149 /* This implementation only supports PCI-Express */ 150 /* This implementation only supports PCI-Express */
150 if (dev->bus != &pci_bus_type) 151 if (!dev_is_pci(dev))
151 return -ENODEV; 152 return -ENODEV;
152 pdev = to_pci_dev(dev); 153 pdev = to_pci_dev(dev);
153 id = dcaid_from_pcidev(pdev); 154 id = dcaid_from_pcidev(pdev);
@@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
179 int i; 180 int i;
180 181
181 /* This implementation only supports PCI-Express */ 182 /* This implementation only supports PCI-Express */
182 if (dev->bus != &pci_bus_type) 183 if (!dev_is_pci(dev))
183 return -ENODEV; 184 return -ENODEV;
184 pdev = to_pci_dev(dev); 185 pdev = to_pci_dev(dev);
185 186
@@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
320 u16 global_req_table; 321 u16 global_req_table;
321 322
322 /* This implementation only supports PCI-Express */ 323 /* This implementation only supports PCI-Express */
323 if (dev->bus != &pci_bus_type) 324 if (!dev_is_pci(dev))
324 return -ENODEV; 325 return -ENODEV;
325 pdev = to_pci_dev(dev); 326 pdev = to_pci_dev(dev);
326 id = dcaid_from_pcidev(pdev); 327 id = dcaid_from_pcidev(pdev);
@@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
354 u16 global_req_table; 355 u16 global_req_table;
355 356
356 /* This implementation only supports PCI-Express */ 357 /* This implementation only supports PCI-Express */
357 if (dev->bus != &pci_bus_type) 358 if (!dev_is_pci(dev))
358 return -ENODEV; 359 return -ENODEV;
359 pdev = to_pci_dev(dev); 360 pdev = to_pci_dev(dev);
360 361
@@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
496 u16 global_req_table; 497 u16 global_req_table;
497 498
498 /* This implementation only supports PCI-Express */ 499 /* This implementation only supports PCI-Express */
499 if (dev->bus != &pci_bus_type) 500 if (!dev_is_pci(dev))
500 return -ENODEV; 501 return -ENODEV;
501 pdev = to_pci_dev(dev); 502 pdev = to_pci_dev(dev);
502 id = dcaid_from_pcidev(pdev); 503 id = dcaid_from_pcidev(pdev);
@@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca,
530 u16 global_req_table; 531 u16 global_req_table;
531 532
532 /* This implementation only supports PCI-Express */ 533 /* This implementation only supports PCI-Express */
533 if (dev->bus != &pci_bus_type) 534 if (!dev_is_pci(dev))
534 return -ENODEV; 535 return -ENODEV;
535 pdev = to_pci_dev(dev); 536 pdev = to_pci_dev(dev);
536 537
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 4e3549a16132..940c1502a8b5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -947,7 +947,7 @@ msix:
947 for (i = 0; i < msixcnt; i++) 947 for (i = 0; i < msixcnt; i++)
948 device->msix_entries[i].entry = i; 948 device->msix_entries[i].entry = i;
949 949
950 err = pci_enable_msix(pdev, device->msix_entries, msixcnt); 950 err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
951 if (err) 951 if (err)
952 goto msi; 952 goto msi;
953 953
@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1222 err = ioat_probe(device); 1222 err = ioat_probe(device);
1223 if (err) 1223 if (err)
1224 return err; 1224 return err;
1225 ioat_set_tcp_copy_break(4096);
1226 err = ioat_register(device); 1225 err = ioat_register(device);
1227 if (err) 1226 if (err)
1228 return err; 1227 return err;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index e982f00a9843..d63f68b1aa35 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
214#define dump_desc_dbg(c, d) \ 214#define dump_desc_dbg(c, d) \
215 ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) 215 ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
216 216
217static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
218{
219 #ifdef CONFIG_NET_DMA
220 sysctl_tcp_dma_copybreak = copybreak;
221 #endif
222}
223
224static inline struct ioat_chan_common * 217static inline struct ioat_chan_common *
225ioat_chan_by_index(struct ioatdma_device *device, int index) 218ioat_chan_by_index(struct ioatdma_device *device, int index)
226{ 219{
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8d1058085eeb..695483e6be32 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
735 * called under bh_disabled so we need to trigger the timer 735 * called under bh_disabled so we need to trigger the timer
736 * event directly 736 * event directly
737 */ 737 */
738 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { 738 if (time_is_before_jiffies(chan->timer.expires)
739 && timer_pending(&chan->timer)) {
739 struct ioatdma_device *device = chan->device; 740 struct ioatdma_device *device = chan->device;
740 741
741 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 742 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
@@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
899 err = ioat_probe(device); 900 err = ioat_probe(device);
900 if (err) 901 if (err)
901 return err; 902 return err;
902 ioat_set_tcp_copy_break(2048);
903 903
904 list_for_each_entry(c, &dma->channels, device_node) { 904 list_for_each_entry(c, &dma->channels, device_node) {
905 chan = to_chan_common(c); 905 chan = to_chan_common(c);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b9b38a1cf92f..895f869d6c2c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
740 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); 740 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
741} 741}
742 742
743struct dma_async_tx_descriptor * 743static struct dma_async_tx_descriptor *
744ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 744ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
745 unsigned int src_cnt, size_t len, 745 unsigned int src_cnt, size_t len,
746 enum sum_check_flags *result, unsigned long flags) 746 enum sum_check_flags *result, unsigned long flags)
@@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1091 } 1091 }
1092} 1092}
1093 1093
1094struct dma_async_tx_descriptor * 1094static struct dma_async_tx_descriptor *
1095ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 1095ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1096 unsigned int src_cnt, const unsigned char *scf, size_t len, 1096 unsigned int src_cnt, const unsigned char *scf, size_t len,
1097 enum sum_check_flags *pqres, unsigned long flags) 1097 enum sum_check_flags *pqres, unsigned long flags)
@@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1133 flags); 1133 flags);
1134} 1134}
1135 1135
1136struct dma_async_tx_descriptor * 1136static struct dma_async_tx_descriptor *
1137ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 1137ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1138 unsigned int src_cnt, size_t len, 1138 unsigned int src_cnt, size_t len,
1139 enum sum_check_flags *result, unsigned long flags) 1139 enum sum_check_flags *result, unsigned long flags)
@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1655 err = ioat_probe(device); 1655 err = ioat_probe(device);
1656 if (err) 1656 if (err)
1657 return err; 1657 return err;
1658 ioat_set_tcp_copy_break(262144);
1659 1658
1660 list_for_each_entry(c, &dma->channels, device_node) { 1659 list_for_each_entry(c, &dma->channels, device_node) {
1661 chan = to_chan_common(c); 1660 chan = to_chan_common(c);
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
deleted file mode 100644
index bb48a57c2fc1..000000000000
--- a/drivers/dma/iovlock.c
+++ /dev/null
@@ -1,280 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * Portions based on net/core/datagram.c and copyrighted by their authors.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22
23/*
24 * This code allows the net stack to make use of a DMA engine for
25 * skb to iovec copies.
26 */
27
28#include <linux/dmaengine.h>
29#include <linux/pagemap.h>
30#include <linux/slab.h>
31#include <net/tcp.h> /* for memcpy_toiovec */
32#include <asm/io.h>
33#include <asm/uaccess.h>
34
35static int num_pages_spanned(struct iovec *iov)
36{
37 return
38 ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
39 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
40}
41
42/*
43 * Pin down all the iovec pages needed for len bytes.
44 * Return a struct dma_pinned_list to keep track of pages pinned down.
45 *
46 * We are allocating a single chunk of memory, and then carving it up into
47 * 3 sections, the latter 2 whose size depends on the number of iovecs and the
48 * total number of pages, respectively.
49 */
50struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
51{
52 struct dma_pinned_list *local_list;
53 struct page **pages;
54 int i;
55 int ret;
56 int nr_iovecs = 0;
57 int iovec_len_used = 0;
58 int iovec_pages_used = 0;
59
60 /* don't pin down non-user-based iovecs */
61 if (segment_eq(get_fs(), KERNEL_DS))
62 return NULL;
63
64 /* determine how many iovecs/pages there are, up front */
65 do {
66 iovec_len_used += iov[nr_iovecs].iov_len;
67 iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
68 nr_iovecs++;
69 } while (iovec_len_used < len);
70
71 /* single kmalloc for pinned list, page_list[], and the page arrays */
72 local_list = kmalloc(sizeof(*local_list)
73 + (nr_iovecs * sizeof (struct dma_page_list))
74 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
75 if (!local_list)
76 goto out;
77
78 /* list of pages starts right after the page list array */
79 pages = (struct page **) &local_list->page_list[nr_iovecs];
80
81 local_list->nr_iovecs = 0;
82
83 for (i = 0; i < nr_iovecs; i++) {
84 struct dma_page_list *page_list = &local_list->page_list[i];
85
86 len -= iov[i].iov_len;
87
88 if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
89 goto unpin;
90
91 page_list->nr_pages = num_pages_spanned(&iov[i]);
92 page_list->base_address = iov[i].iov_base;
93
94 page_list->pages = pages;
95 pages += page_list->nr_pages;
96
97 /* pin pages down */
98 down_read(&current->mm->mmap_sem);
99 ret = get_user_pages(
100 current,
101 current->mm,
102 (unsigned long) iov[i].iov_base,
103 page_list->nr_pages,
104 1, /* write */
105 0, /* force */
106 page_list->pages,
107 NULL);
108 up_read(&current->mm->mmap_sem);
109
110 if (ret != page_list->nr_pages)
111 goto unpin;
112
113 local_list->nr_iovecs = i + 1;
114 }
115
116 return local_list;
117
118unpin:
119 dma_unpin_iovec_pages(local_list);
120out:
121 return NULL;
122}
123
124void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
125{
126 int i, j;
127
128 if (!pinned_list)
129 return;
130
131 for (i = 0; i < pinned_list->nr_iovecs; i++) {
132 struct dma_page_list *page_list = &pinned_list->page_list[i];
133 for (j = 0; j < page_list->nr_pages; j++) {
134 set_page_dirty_lock(page_list->pages[j]);
135 page_cache_release(page_list->pages[j]);
136 }
137 }
138
139 kfree(pinned_list);
140}
141
142
143/*
144 * We have already pinned down the pages we will be using in the iovecs.
145 * Each entry in iov array has corresponding entry in pinned_list->page_list.
146 * Using array indexing to keep iov[] and page_list[] in sync.
147 * Initial elements in iov array's iov->iov_len will be 0 if already copied into
148 * by another call.
149 * iov array length remaining guaranteed to be bigger than len.
150 */
151dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
152 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
153{
154 int iov_byte_offset;
155 int copy;
156 dma_cookie_t dma_cookie = 0;
157 int iovec_idx;
158 int page_idx;
159
160 if (!chan)
161 return memcpy_toiovec(iov, kdata, len);
162
163 iovec_idx = 0;
164 while (iovec_idx < pinned_list->nr_iovecs) {
165 struct dma_page_list *page_list;
166
167 /* skip already used-up iovecs */
168 while (!iov[iovec_idx].iov_len)
169 iovec_idx++;
170
171 page_list = &pinned_list->page_list[iovec_idx];
172
173 iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
174 page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
175 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
176
177 /* break up copies to not cross page boundary */
178 while (iov[iovec_idx].iov_len) {
179 copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
180 copy = min_t(int, copy, iov[iovec_idx].iov_len);
181
182 dma_cookie = dma_async_memcpy_buf_to_pg(chan,
183 page_list->pages[page_idx],
184 iov_byte_offset,
185 kdata,
186 copy);
187 /* poll for a descriptor slot */
188 if (unlikely(dma_cookie < 0)) {
189 dma_async_issue_pending(chan);
190 continue;
191 }
192
193 len -= copy;
194 iov[iovec_idx].iov_len -= copy;
195 iov[iovec_idx].iov_base += copy;
196
197 if (!len)
198 return dma_cookie;
199
200 kdata += copy;
201 iov_byte_offset = 0;
202 page_idx++;
203 }
204 iovec_idx++;
205 }
206
207 /* really bad if we ever run out of iovecs */
208 BUG();
209 return -EFAULT;
210}
211
212dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
213 struct dma_pinned_list *pinned_list, struct page *page,
214 unsigned int offset, size_t len)
215{
216 int iov_byte_offset;
217 int copy;
218 dma_cookie_t dma_cookie = 0;
219 int iovec_idx;
220 int page_idx;
221 int err;
222
223 /* this needs as-yet-unimplemented buf-to-buff, so punt. */
224 /* TODO: use dma for this */
225 if (!chan || !pinned_list) {
226 u8 *vaddr = kmap(page);
227 err = memcpy_toiovec(iov, vaddr + offset, len);
228 kunmap(page);
229 return err;
230 }
231
232 iovec_idx = 0;
233 while (iovec_idx < pinned_list->nr_iovecs) {
234 struct dma_page_list *page_list;
235
236 /* skip already used-up iovecs */
237 while (!iov[iovec_idx].iov_len)
238 iovec_idx++;
239
240 page_list = &pinned_list->page_list[iovec_idx];
241
242 iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
243 page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
244 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
245
246 /* break up copies to not cross page boundary */
247 while (iov[iovec_idx].iov_len) {
248 copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
249 copy = min_t(int, copy, iov[iovec_idx].iov_len);
250
251 dma_cookie = dma_async_memcpy_pg_to_pg(chan,
252 page_list->pages[page_idx],
253 iov_byte_offset,
254 page,
255 offset,
256 copy);
257 /* poll for a descriptor slot */
258 if (unlikely(dma_cookie < 0)) {
259 dma_async_issue_pending(chan);
260 continue;
261 }
262
263 len -= copy;
264 iov[iovec_idx].iov_len -= copy;
265 iov[iovec_idx].iov_base += copy;
266
267 if (!len)
268 return dma_cookie;
269
270 offset += copy;
271 iov_byte_offset = 0;
272 page_idx++;
273 }
274 iovec_idx++;
275 }
276
277 /* really bad if we ever run out of iovecs */
278 BUG();
279 return -EFAULT;
280}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 394cbc5c93e3..7938272f2edf 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
310 return 0; 310 return 0;
311} 311}
312 312
313static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 313/* This function must be called with the mv_xor_chan spinlock held */
314static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
314{ 315{
315 struct mv_xor_desc_slot *iter, *_iter; 316 struct mv_xor_desc_slot *iter, *_iter;
316 dma_cookie_t cookie = 0; 317 dma_cookie_t cookie = 0;
@@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
366 mv_chan->dmachan.completed_cookie = cookie; 367 mv_chan->dmachan.completed_cookie = cookie;
367} 368}
368 369
369static void
370mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
371{
372 spin_lock_bh(&mv_chan->lock);
373 __mv_xor_slot_cleanup(mv_chan);
374 spin_unlock_bh(&mv_chan->lock);
375}
376
377static void mv_xor_tasklet(unsigned long data) 370static void mv_xor_tasklet(unsigned long data)
378{ 371{
379 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 372 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
373
374 spin_lock_bh(&chan->lock);
380 mv_xor_slot_cleanup(chan); 375 mv_xor_slot_cleanup(chan);
376 spin_unlock_bh(&chan->lock);
381} 377}
382 378
383static struct mv_xor_desc_slot * 379static struct mv_xor_desc_slot *
@@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
656 struct mv_xor_desc_slot *iter, *_iter; 652 struct mv_xor_desc_slot *iter, *_iter;
657 int in_use_descs = 0; 653 int in_use_descs = 0;
658 654
655 spin_lock_bh(&mv_chan->lock);
656
659 mv_xor_slot_cleanup(mv_chan); 657 mv_xor_slot_cleanup(mv_chan);
660 658
661 spin_lock_bh(&mv_chan->lock);
662 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 659 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
663 chain_node) { 660 chain_node) {
664 in_use_descs++; 661 in_use_descs++;
@@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
700 enum dma_status ret; 697 enum dma_status ret;
701 698
702 ret = dma_cookie_status(chan, cookie, txstate); 699 ret = dma_cookie_status(chan, cookie, txstate);
703 if (ret == DMA_COMPLETE) { 700 if (ret == DMA_COMPLETE)
704 mv_xor_clean_completed_slots(mv_chan);
705 return ret; 701 return ret;
706 } 702
703 spin_lock_bh(&mv_chan->lock);
707 mv_xor_slot_cleanup(mv_chan); 704 mv_xor_slot_cleanup(mv_chan);
705 spin_unlock_bh(&mv_chan->lock);
708 706
709 return dma_cookie_status(chan, cookie, txstate); 707 return dma_cookie_status(chan, cookie, txstate);
710} 708}
@@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
782 780
783static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 781static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
784{ 782{
785 int i; 783 int i, ret;
786 void *src, *dest; 784 void *src, *dest;
787 dma_addr_t src_dma, dest_dma; 785 dma_addr_t src_dma, dest_dma;
788 struct dma_chan *dma_chan; 786 struct dma_chan *dma_chan;
@@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 817
820 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 818 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
821 PAGE_SIZE, DMA_TO_DEVICE); 819 PAGE_SIZE, DMA_TO_DEVICE);
822 unmap->to_cnt = 1;
823 unmap->addr[0] = src_dma; 820 unmap->addr[0] = src_dma;
824 821
822 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
823 if (ret) {
824 err = -ENOMEM;
825 goto free_resources;
826 }
827 unmap->to_cnt = 1;
828
825 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 829 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 PAGE_SIZE, DMA_FROM_DEVICE); 830 PAGE_SIZE, DMA_FROM_DEVICE);
827 unmap->from_cnt = 1;
828 unmap->addr[1] = dest_dma; 831 unmap->addr[1] = dest_dma;
829 832
833 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
834 if (ret) {
835 err = -ENOMEM;
836 goto free_resources;
837 }
838 unmap->from_cnt = 1;
830 unmap->len = PAGE_SIZE; 839 unmap->len = PAGE_SIZE;
831 840
832 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 841 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
833 PAGE_SIZE, 0); 842 PAGE_SIZE, 0);
843 if (!tx) {
844 dev_err(dma_chan->device->dev,
845 "Self-test cannot prepare operation, disabling\n");
846 err = -ENODEV;
847 goto free_resources;
848 }
849
834 cookie = mv_xor_tx_submit(tx); 850 cookie = mv_xor_tx_submit(tx);
851 if (dma_submit_error(cookie)) {
852 dev_err(dma_chan->device->dev,
853 "Self-test submit error, disabling\n");
854 err = -ENODEV;
855 goto free_resources;
856 }
857
835 mv_xor_issue_pending(dma_chan); 858 mv_xor_issue_pending(dma_chan);
836 async_tx_ack(tx); 859 async_tx_ack(tx);
837 msleep(1); 860 msleep(1);
@@ -866,7 +889,7 @@ out:
866static int 889static int
867mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 890mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
868{ 891{
869 int i, src_idx; 892 int i, src_idx, ret;
870 struct page *dest; 893 struct page *dest;
871 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 894 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
872 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 895 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
@@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
929 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 952 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
930 0, PAGE_SIZE, DMA_TO_DEVICE); 953 0, PAGE_SIZE, DMA_TO_DEVICE);
931 dma_srcs[i] = unmap->addr[i]; 954 dma_srcs[i] = unmap->addr[i];
955 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
956 if (ret) {
957 err = -ENOMEM;
958 goto free_resources;
959 }
932 unmap->to_cnt++; 960 unmap->to_cnt++;
933 } 961 }
934 962
935 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 963 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
936 DMA_FROM_DEVICE); 964 DMA_FROM_DEVICE);
937 dest_dma = unmap->addr[src_count]; 965 dest_dma = unmap->addr[src_count];
966 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
967 if (ret) {
968 err = -ENOMEM;
969 goto free_resources;
970 }
938 unmap->from_cnt = 1; 971 unmap->from_cnt = 1;
939 unmap->len = PAGE_SIZE; 972 unmap->len = PAGE_SIZE;
940 973
941 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 974 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
942 src_count, PAGE_SIZE, 0); 975 src_count, PAGE_SIZE, 0);
976 if (!tx) {
977 dev_err(dma_chan->device->dev,
978 "Self-test cannot prepare operation, disabling\n");
979 err = -ENODEV;
980 goto free_resources;
981 }
943 982
944 cookie = mv_xor_tx_submit(tx); 983 cookie = mv_xor_tx_submit(tx);
984 if (dma_submit_error(cookie)) {
985 dev_err(dma_chan->device->dev,
986 "Self-test submit error, disabling\n");
987 err = -ENODEV;
988 goto free_resources;
989 }
990
945 mv_xor_issue_pending(dma_chan); 991 mv_xor_issue_pending(dma_chan);
946 async_tx_ack(tx); 992 async_tx_ack(tx);
947 msleep(8); 993 msleep(8);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1f9e642c66ad..212c5b9ac106 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -900,18 +900,6 @@ static inline void dmaengine_put(void)
900} 900}
901#endif 901#endif
902 902
903#ifdef CONFIG_NET_DMA
904#define net_dmaengine_get() dmaengine_get()
905#define net_dmaengine_put() dmaengine_put()
906#else
907static inline void net_dmaengine_get(void)
908{
909}
910static inline void net_dmaengine_put(void)
911{
912}
913#endif
914
915#ifdef CONFIG_ASYNC_TX_DMA 903#ifdef CONFIG_ASYNC_TX_DMA
916#define async_dmaengine_get() dmaengine_get() 904#define async_dmaengine_get() dmaengine_get()
917#define async_dmaengine_put() dmaengine_put() 905#define async_dmaengine_put() dmaengine_put()
@@ -933,16 +921,8 @@ async_dma_find_channel(enum dma_transaction_type type)
933 return NULL; 921 return NULL;
934} 922}
935#endif /* CONFIG_ASYNC_TX_DMA */ 923#endif /* CONFIG_ASYNC_TX_DMA */
936
937dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
938 void *dest, void *src, size_t len);
939dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
940 struct page *page, unsigned int offset, void *kdata, size_t len);
941dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
942 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
943 unsigned int src_off, size_t len);
944void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 924void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
945 struct dma_chan *chan); 925 struct dma_chan *chan);
946 926
947static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 927static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
948{ 928{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index abde271c18ae..a17ba0881afb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -28,7 +28,6 @@
28#include <linux/textsearch.h> 28#include <linux/textsearch.h>
29#include <net/checksum.h> 29#include <net/checksum.h>
30#include <linux/rcupdate.h> 30#include <linux/rcupdate.h>
31#include <linux/dmaengine.h>
32#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h> 33#include <linux/netdev_features.h>
@@ -581,11 +580,8 @@ struct sk_buff {
581 /* 2/4 bit hole (depending on ndisc_nodetype presence) */ 580 /* 2/4 bit hole (depending on ndisc_nodetype presence) */
582 kmemcheck_bitfield_end(flags2); 581 kmemcheck_bitfield_end(flags2);
583 582
584#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL 583#ifdef CONFIG_NET_RX_BUSY_POLL
585 union { 584 unsigned int napi_id;
586 unsigned int napi_id;
587 dma_cookie_t dma_cookie;
588 };
589#endif 585#endif
590#ifdef CONFIG_NETWORK_SECMARK 586#ifdef CONFIG_NETWORK_SECMARK
591 __u32 secmark; 587 __u32 secmark;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fa5258f322e7..ac82c5ea955b 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,7 +19,6 @@
19 19
20 20
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/dmaengine.h>
23#include <net/sock.h> 22#include <net/sock.h>
24#include <net/inet_connection_sock.h> 23#include <net/inet_connection_sock.h>
25#include <net/inet_timewait_sock.h> 24#include <net/inet_timewait_sock.h>
@@ -166,13 +165,6 @@ struct tcp_sock {
166 struct iovec *iov; 165 struct iovec *iov;
167 int memory; 166 int memory;
168 int len; 167 int len;
169#ifdef CONFIG_NET_DMA
170 /* members for async copy */
171 struct dma_chan *dma_chan;
172 int wakeup;
173 struct dma_pinned_list *pinned_list;
174 dma_cookie_t dma_cookie;
175#endif
176 } ucopy; 168 } ucopy;
177 169
178 u32 snd_wl1; /* Sequence for window update */ 170 u32 snd_wl1; /* Sequence for window update */
diff --git a/include/net/netdma.h b/include/net/netdma.h
deleted file mode 100644
index 8ba8ce284eeb..000000000000
--- a/include/net/netdma.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef NETDMA_H
22#define NETDMA_H
23#ifdef CONFIG_NET_DMA
24#include <linux/dmaengine.h>
25#include <linux/skbuff.h>
26
27int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
28 struct sk_buff *skb, int offset, struct iovec *to,
29 size_t len, struct dma_pinned_list *pinned_list);
30
31#endif /* CONFIG_NET_DMA */
32#endif /* NETDMA_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index b9a5bd0ed9f3..591e607cca35 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -233,7 +233,6 @@ struct cg_proto;
233 * @sk_receive_queue: incoming packets 233 * @sk_receive_queue: incoming packets
234 * @sk_wmem_alloc: transmit queue bytes committed 234 * @sk_wmem_alloc: transmit queue bytes committed
235 * @sk_write_queue: Packet sending queue 235 * @sk_write_queue: Packet sending queue
236 * @sk_async_wait_queue: DMA copied packets
237 * @sk_omem_alloc: "o" is "option" or "other" 236 * @sk_omem_alloc: "o" is "option" or "other"
238 * @sk_wmem_queued: persistent queue size 237 * @sk_wmem_queued: persistent queue size
239 * @sk_forward_alloc: space allocated forward 238 * @sk_forward_alloc: space allocated forward
@@ -362,10 +361,6 @@ struct sock {
362 struct sk_filter __rcu *sk_filter; 361 struct sk_filter __rcu *sk_filter;
363 struct socket_wq __rcu *sk_wq; 362 struct socket_wq __rcu *sk_wq;
364 363
365#ifdef CONFIG_NET_DMA
366 struct sk_buff_head sk_async_wait_queue;
367#endif
368
369#ifdef CONFIG_XFRM 364#ifdef CONFIG_XFRM
370 struct xfrm_policy *sk_policy[2]; 365 struct xfrm_policy *sk_policy[2];
371#endif 366#endif
@@ -2206,27 +2201,15 @@ void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
2206 * sk_eat_skb - Release a skb if it is no longer needed 2201 * sk_eat_skb - Release a skb if it is no longer needed
2207 * @sk: socket to eat this skb from 2202 * @sk: socket to eat this skb from
2208 * @skb: socket buffer to eat 2203 * @skb: socket buffer to eat
2209 * @copied_early: flag indicating whether DMA operations copied this data early
2210 * 2204 *
2211 * This routine must be called with interrupts disabled or with the socket 2205 * This routine must be called with interrupts disabled or with the socket
2212 * locked so that the sk_buff queue operation is ok. 2206 * locked so that the sk_buff queue operation is ok.
2213*/ 2207*/
2214#ifdef CONFIG_NET_DMA 2208static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2215static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
2216{
2217 __skb_unlink(skb, &sk->sk_receive_queue);
2218 if (!copied_early)
2219 __kfree_skb(skb);
2220 else
2221 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
2222}
2223#else
2224static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
2225{ 2209{
2226 __skb_unlink(skb, &sk->sk_receive_queue); 2210 __skb_unlink(skb, &sk->sk_receive_queue);
2227 __kfree_skb(skb); 2211 __kfree_skb(skb);
2228} 2212}
2229#endif
2230 2213
2231static inline 2214static inline
2232struct net *sock_net(const struct sock *sk) 2215struct net *sock_net(const struct sock *sk)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 590e01a476ac..7523c325673e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -27,7 +27,6 @@
27#include <linux/cache.h> 27#include <linux/cache.h>
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h> 30#include <linux/crypto.h>
32#include <linux/cryptohash.h> 31#include <linux/cryptohash.h>
33#include <linux/kref.h> 32#include <linux/kref.h>
@@ -262,7 +261,6 @@ extern int sysctl_tcp_adv_win_scale;
262extern int sysctl_tcp_tw_reuse; 261extern int sysctl_tcp_tw_reuse;
263extern int sysctl_tcp_frto; 262extern int sysctl_tcp_frto;
264extern int sysctl_tcp_low_latency; 263extern int sysctl_tcp_low_latency;
265extern int sysctl_tcp_dma_copybreak;
266extern int sysctl_tcp_nometrics_save; 264extern int sysctl_tcp_nometrics_save;
267extern int sysctl_tcp_moderate_rcvbuf; 265extern int sysctl_tcp_moderate_rcvbuf;
268extern int sysctl_tcp_tso_win_divisor; 266extern int sysctl_tcp_tso_win_divisor;
@@ -368,7 +366,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
368void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 366void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
369 const struct tcphdr *th, unsigned int len); 367 const struct tcphdr *th, unsigned int len);
370void tcp_rcv_space_adjust(struct sock *sk); 368void tcp_rcv_space_adjust(struct sock *sk);
371void tcp_cleanup_rbuf(struct sock *sk, int copied);
372int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 369int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
373void tcp_twsk_destructor(struct sock *sk); 370void tcp_twsk_destructor(struct sock *sk);
374ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 371ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
@@ -1031,12 +1028,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1031 tp->ucopy.len = 0; 1028 tp->ucopy.len = 0;
1032 tp->ucopy.memory = 0; 1029 tp->ucopy.memory = 0;
1033 skb_queue_head_init(&tp->ucopy.prequeue); 1030 skb_queue_head_init(&tp->ucopy.prequeue);
1034#ifdef CONFIG_NET_DMA
1035 tp->ucopy.dma_chan = NULL;
1036 tp->ucopy.wakeup = 0;
1037 tp->ucopy.pinned_list = NULL;
1038 tp->ucopy.dma_cookie = 0;
1039#endif
1040} 1031}
1041 1032
1042bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1033bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index e4ba9a5a5ccb..9a4f750a2963 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -390,7 +390,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
390 { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, 390 { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
391 { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" }, 391 { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
392 { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" }, 392 { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
393 { CTL_INT, NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
394 { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" }, 393 { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
395 { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" }, 394 { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
396 { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" }, 395 { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
diff --git a/net/core/Makefile b/net/core/Makefile
index 71093d94ad2b..235e6c50708d 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -16,7 +16,6 @@ obj-y += net-sysfs.o
16obj-$(CONFIG_PROC_FS) += net-procfs.o 16obj-$(CONFIG_PROC_FS) += net-procfs.o
17obj-$(CONFIG_NET_PKTGEN) += pktgen.o 17obj-$(CONFIG_NET_PKTGEN) += pktgen.o
18obj-$(CONFIG_NETPOLL) += netpoll.o 18obj-$(CONFIG_NETPOLL) += netpoll.o
19obj-$(CONFIG_NET_DMA) += user_dma.o
20obj-$(CONFIG_FIB_RULES) += fib_rules.o 19obj-$(CONFIG_FIB_RULES) += fib_rules.o
21obj-$(CONFIG_TRACEPOINTS) += net-traces.o 20obj-$(CONFIG_TRACEPOINTS) += net-traces.o
22obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o 21obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
diff --git a/net/core/dev.c b/net/core/dev.c
index cf8a95f48cff..130d64220229 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1284,7 +1284,6 @@ static int __dev_open(struct net_device *dev)
1284 clear_bit(__LINK_STATE_START, &dev->state); 1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else { 1285 else {
1286 dev->flags |= IFF_UP; 1286 dev->flags |= IFF_UP;
1287 net_dmaengine_get();
1288 dev_set_rx_mode(dev); 1287 dev_set_rx_mode(dev);
1289 dev_activate(dev); 1288 dev_activate(dev);
1290 add_device_randomness(dev->dev_addr, dev->addr_len); 1289 add_device_randomness(dev->dev_addr, dev->addr_len);
@@ -1363,7 +1362,6 @@ static int __dev_close_many(struct list_head *head)
1363 ops->ndo_stop(dev); 1362 ops->ndo_stop(dev);
1364 1363
1365 dev->flags &= ~IFF_UP; 1364 dev->flags &= ~IFF_UP;
1366 net_dmaengine_put();
1367 netpoll_poll_enable(dev); 1365 netpoll_poll_enable(dev);
1368 } 1366 }
1369 1367
@@ -4505,14 +4503,6 @@ static void net_rx_action(struct softirq_action *h)
4505out: 4503out:
4506 net_rps_action_and_irq_enable(sd); 4504 net_rps_action_and_irq_enable(sd);
4507 4505
4508#ifdef CONFIG_NET_DMA
4509 /*
4510 * There may not be any more sk_buffs coming right now, so push
4511 * any pending DMA copies to hardware
4512 */
4513 dma_issue_pending_all();
4514#endif
4515
4516 return; 4506 return;
4517 4507
4518softnet_break: 4508softnet_break:
diff --git a/net/core/sock.c b/net/core/sock.c
index 9c3f823e76a9..611f424fb76b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1489,9 +1489,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1489 atomic_set(&newsk->sk_omem_alloc, 0); 1489 atomic_set(&newsk->sk_omem_alloc, 0);
1490 skb_queue_head_init(&newsk->sk_receive_queue); 1490 skb_queue_head_init(&newsk->sk_receive_queue);
1491 skb_queue_head_init(&newsk->sk_write_queue); 1491 skb_queue_head_init(&newsk->sk_write_queue);
1492#ifdef CONFIG_NET_DMA
1493 skb_queue_head_init(&newsk->sk_async_wait_queue);
1494#endif
1495 1492
1496 spin_lock_init(&newsk->sk_dst_lock); 1493 spin_lock_init(&newsk->sk_dst_lock);
1497 rwlock_init(&newsk->sk_callback_lock); 1494 rwlock_init(&newsk->sk_callback_lock);
@@ -2308,9 +2305,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2308 skb_queue_head_init(&sk->sk_receive_queue); 2305 skb_queue_head_init(&sk->sk_receive_queue);
2309 skb_queue_head_init(&sk->sk_write_queue); 2306 skb_queue_head_init(&sk->sk_write_queue);
2310 skb_queue_head_init(&sk->sk_error_queue); 2307 skb_queue_head_init(&sk->sk_error_queue);
2311#ifdef CONFIG_NET_DMA
2312 skb_queue_head_init(&sk->sk_async_wait_queue);
2313#endif
2314 2308
2315 sk->sk_send_head = NULL; 2309 sk->sk_send_head = NULL;
2316 2310
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
deleted file mode 100644
index 1b5fefdb8198..000000000000
--- a/net/core/user_dma.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * Portions based on net/core/datagram.c and copyrighted by their authors.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22
23/*
24 * This code allows the net stack to make use of a DMA engine for
25 * skb to iovec copies.
26 */
27
28#include <linux/dmaengine.h>
29#include <linux/socket.h>
30#include <linux/export.h>
31#include <net/tcp.h>
32#include <net/netdma.h>
33
34#define NET_DMA_DEFAULT_COPYBREAK 4096
35
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
37EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
38
39/**
40 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
41 * @skb - buffer to copy
42 * @offset - offset in the buffer to start copying from
43 * @iovec - io vector to copy to
44 * @len - amount of data to copy from buffer to iovec
45 * @pinned_list - locked iovec buffer data
46 *
47 * Note: the iovec is modified during the copy.
48 */
49int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
50 struct sk_buff *skb, int offset, struct iovec *to,
51 size_t len, struct dma_pinned_list *pinned_list)
52{
53 int start = skb_headlen(skb);
54 int i, copy = start - offset;
55 struct sk_buff *frag_iter;
56 dma_cookie_t cookie = 0;
57
58 /* Copy header. */
59 if (copy > 0) {
60 if (copy > len)
61 copy = len;
62 cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
63 skb->data + offset, copy);
64 if (cookie < 0)
65 goto fault;
66 len -= copy;
67 if (len == 0)
68 goto end;
69 offset += copy;
70 }
71
72 /* Copy paged appendix. Hmm... why does this look so complicated? */
73 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
74 int end;
75 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
76
77 WARN_ON(start > offset + len);
78
79 end = start + skb_frag_size(frag);
80 copy = end - offset;
81 if (copy > 0) {
82 struct page *page = skb_frag_page(frag);
83
84 if (copy > len)
85 copy = len;
86
87 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
88 frag->page_offset + offset - start, copy);
89 if (cookie < 0)
90 goto fault;
91 len -= copy;
92 if (len == 0)
93 goto end;
94 offset += copy;
95 }
96 start = end;
97 }
98
99 skb_walk_frags(skb, frag_iter) {
100 int end;
101
102 WARN_ON(start > offset + len);
103
104 end = start + frag_iter->len;
105 copy = end - offset;
106 if (copy > 0) {
107 if (copy > len)
108 copy = len;
109 cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
110 offset - start,
111 to, copy,
112 pinned_list);
113 if (cookie < 0)
114 goto fault;
115 len -= copy;
116 if (len == 0)
117 goto end;
118 offset += copy;
119 }
120 start = end;
121 }
122
123end:
124 if (!len) {
125 skb->dma_cookie = cookie;
126 return cookie;
127 }
128
129fault:
130 return -EFAULT;
131}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index de2c1e719305..f440cc7c9f72 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
848 default: 848 default:
849 dccp_pr_debug("packet_type=%s\n", 849 dccp_pr_debug("packet_type=%s\n",
850 dccp_packet_name(dh->dccph_type)); 850 dccp_packet_name(dh->dccph_type));
851 sk_eat_skb(sk, skb, false); 851 sk_eat_skb(sk, skb);
852 } 852 }
853verify_sock_status: 853verify_sock_status:
854 if (sock_flag(sk, SOCK_DONE)) { 854 if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@ verify_sock_status:
905 len = skb->len; 905 len = skb->len;
906 found_fin_ok: 906 found_fin_ok:
907 if (!(flags & MSG_PEEK)) 907 if (!(flags & MSG_PEEK))
908 sk_eat_skb(sk, skb, false); 908 sk_eat_skb(sk, skb);
909 break; 909 break;
910 } while (1); 910 } while (1);
911out: 911out:
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 79a007c52558..a9fde0eef77c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -628,15 +628,6 @@ static struct ctl_table ipv4_table[] = {
628 .mode = 0644, 628 .mode = 0644,
629 .proc_handler = proc_dointvec 629 .proc_handler = proc_dointvec
630 }, 630 },
631#ifdef CONFIG_NET_DMA
632 {
633 .procname = "tcp_dma_copybreak",
634 .data = &sysctl_tcp_dma_copybreak,
635 .maxlen = sizeof(int),
636 .mode = 0644,
637 .proc_handler = proc_dointvec
638 },
639#endif
640 { 631 {
641 .procname = "tcp_slow_start_after_idle", 632 .procname = "tcp_slow_start_after_idle",
642 .data = &sysctl_tcp_slow_start_after_idle, 633 .data = &sysctl_tcp_slow_start_after_idle,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 541f26a67ba2..8ee43ae90396 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -274,7 +274,6 @@
274#include <net/tcp.h> 274#include <net/tcp.h>
275#include <net/xfrm.h> 275#include <net/xfrm.h>
276#include <net/ip.h> 276#include <net/ip.h>
277#include <net/netdma.h>
278#include <net/sock.h> 277#include <net/sock.h>
279 278
280#include <asm/uaccess.h> 279#include <asm/uaccess.h>
@@ -1394,7 +1393,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1394 * calculation of whether or not we must ACK for the sake of 1393 * calculation of whether or not we must ACK for the sake of
1395 * a window update. 1394 * a window update.
1396 */ 1395 */
1397void tcp_cleanup_rbuf(struct sock *sk, int copied) 1396static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1398{ 1397{
1399 struct tcp_sock *tp = tcp_sk(sk); 1398 struct tcp_sock *tp = tcp_sk(sk);
1400 bool time_to_ack = false; 1399 bool time_to_ack = false;
@@ -1470,39 +1469,6 @@ static void tcp_prequeue_process(struct sock *sk)
1470 tp->ucopy.memory = 0; 1469 tp->ucopy.memory = 0;
1471} 1470}
1472 1471
1473#ifdef CONFIG_NET_DMA
1474static void tcp_service_net_dma(struct sock *sk, bool wait)
1475{
1476 dma_cookie_t done, used;
1477 dma_cookie_t last_issued;
1478 struct tcp_sock *tp = tcp_sk(sk);
1479
1480 if (!tp->ucopy.dma_chan)
1481 return;
1482
1483 last_issued = tp->ucopy.dma_cookie;
1484 dma_async_issue_pending(tp->ucopy.dma_chan);
1485
1486 do {
1487 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1488 last_issued, &done,
1489 &used) == DMA_COMPLETE) {
1490 /* Safe to free early-copied skbs now */
1491 __skb_queue_purge(&sk->sk_async_wait_queue);
1492 break;
1493 } else {
1494 struct sk_buff *skb;
1495 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1496 (dma_async_is_complete(skb->dma_cookie, done,
1497 used) == DMA_COMPLETE)) {
1498 __skb_dequeue(&sk->sk_async_wait_queue);
1499 kfree_skb(skb);
1500 }
1501 }
1502 } while (wait);
1503}
1504#endif
1505
1506static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1472static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1507{ 1473{
1508 struct sk_buff *skb; 1474 struct sk_buff *skb;
@@ -1520,7 +1486,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1520 * splitted a fat GRO packet, while we released socket lock 1486 * splitted a fat GRO packet, while we released socket lock
1521 * in skb_splice_bits() 1487 * in skb_splice_bits()
1522 */ 1488 */
1523 sk_eat_skb(sk, skb, false); 1489 sk_eat_skb(sk, skb);
1524 } 1490 }
1525 return NULL; 1491 return NULL;
1526} 1492}
@@ -1586,11 +1552,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1586 continue; 1552 continue;
1587 } 1553 }
1588 if (tcp_hdr(skb)->fin) { 1554 if (tcp_hdr(skb)->fin) {
1589 sk_eat_skb(sk, skb, false); 1555 sk_eat_skb(sk, skb);
1590 ++seq; 1556 ++seq;
1591 break; 1557 break;
1592 } 1558 }
1593 sk_eat_skb(sk, skb, false); 1559 sk_eat_skb(sk, skb);
1594 if (!desc->count) 1560 if (!desc->count)
1595 break; 1561 break;
1596 tp->copied_seq = seq; 1562 tp->copied_seq = seq;
@@ -1628,7 +1594,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1628 int target; /* Read at least this many bytes */ 1594 int target; /* Read at least this many bytes */
1629 long timeo; 1595 long timeo;
1630 struct task_struct *user_recv = NULL; 1596 struct task_struct *user_recv = NULL;
1631 bool copied_early = false;
1632 struct sk_buff *skb; 1597 struct sk_buff *skb;
1633 u32 urg_hole = 0; 1598 u32 urg_hole = 0;
1634 1599
@@ -1674,28 +1639,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1674 1639
1675 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1640 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1676 1641
1677#ifdef CONFIG_NET_DMA
1678 tp->ucopy.dma_chan = NULL;
1679 preempt_disable();
1680 skb = skb_peek_tail(&sk->sk_receive_queue);
1681 {
1682 int available = 0;
1683
1684 if (skb)
1685 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1686 if ((available < target) &&
1687 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1688 !sysctl_tcp_low_latency &&
1689 net_dma_find_channel()) {
1690 preempt_enable();
1691 tp->ucopy.pinned_list =
1692 dma_pin_iovec_pages(msg->msg_iov, len);
1693 } else {
1694 preempt_enable();
1695 }
1696 }
1697#endif
1698
1699 do { 1642 do {
1700 u32 offset; 1643 u32 offset;
1701 1644
@@ -1826,16 +1769,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1826 /* __ Set realtime policy in scheduler __ */ 1769 /* __ Set realtime policy in scheduler __ */
1827 } 1770 }
1828 1771
1829#ifdef CONFIG_NET_DMA
1830 if (tp->ucopy.dma_chan) {
1831 if (tp->rcv_wnd == 0 &&
1832 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1833 tcp_service_net_dma(sk, true);
1834 tcp_cleanup_rbuf(sk, copied);
1835 } else
1836 dma_async_issue_pending(tp->ucopy.dma_chan);
1837 }
1838#endif
1839 if (copied >= target) { 1772 if (copied >= target) {
1840 /* Do not sleep, just process backlog. */ 1773 /* Do not sleep, just process backlog. */
1841 release_sock(sk); 1774 release_sock(sk);
@@ -1843,11 +1776,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1843 } else 1776 } else
1844 sk_wait_data(sk, &timeo); 1777 sk_wait_data(sk, &timeo);
1845 1778
1846#ifdef CONFIG_NET_DMA
1847 tcp_service_net_dma(sk, false); /* Don't block */
1848 tp->ucopy.wakeup = 0;
1849#endif
1850
1851 if (user_recv) { 1779 if (user_recv) {
1852 int chunk; 1780 int chunk;
1853 1781
@@ -1905,43 +1833,13 @@ do_prequeue:
1905 } 1833 }
1906 1834
1907 if (!(flags & MSG_TRUNC)) { 1835 if (!(flags & MSG_TRUNC)) {
1908#ifdef CONFIG_NET_DMA 1836 err = skb_copy_datagram_iovec(skb, offset,
1909 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1837 msg->msg_iov, used);
1910 tp->ucopy.dma_chan = net_dma_find_channel(); 1838 if (err) {
1911 1839 /* Exception. Bailout! */
1912 if (tp->ucopy.dma_chan) { 1840 if (!copied)
1913 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1841 copied = -EFAULT;
1914 tp->ucopy.dma_chan, skb, offset, 1842 break;
1915 msg->msg_iov, used,
1916 tp->ucopy.pinned_list);
1917
1918 if (tp->ucopy.dma_cookie < 0) {
1919
1920 pr_alert("%s: dma_cookie < 0\n",
1921 __func__);
1922
1923 /* Exception. Bailout! */
1924 if (!copied)
1925 copied = -EFAULT;
1926 break;
1927 }
1928
1929 dma_async_issue_pending(tp->ucopy.dma_chan);
1930
1931 if ((offset + used) == skb->len)
1932 copied_early = true;
1933
1934 } else
1935#endif
1936 {
1937 err = skb_copy_datagram_iovec(skb, offset,
1938 msg->msg_iov, used);
1939 if (err) {
1940 /* Exception. Bailout! */
1941 if (!copied)
1942 copied = -EFAULT;
1943 break;
1944 }
1945 } 1843 }
1946 } 1844 }
1947 1845
@@ -1961,19 +1859,15 @@ skip_copy:
1961 1859
1962 if (tcp_hdr(skb)->fin) 1860 if (tcp_hdr(skb)->fin)
1963 goto found_fin_ok; 1861 goto found_fin_ok;
1964 if (!(flags & MSG_PEEK)) { 1862 if (!(flags & MSG_PEEK))
1965 sk_eat_skb(sk, skb, copied_early); 1863 sk_eat_skb(sk, skb);
1966 copied_early = false;
1967 }
1968 continue; 1864 continue;
1969 1865
1970 found_fin_ok: 1866 found_fin_ok:
1971 /* Process the FIN. */ 1867 /* Process the FIN. */
1972 ++*seq; 1868 ++*seq;
1973 if (!(flags & MSG_PEEK)) { 1869 if (!(flags & MSG_PEEK))
1974 sk_eat_skb(sk, skb, copied_early); 1870 sk_eat_skb(sk, skb);
1975 copied_early = false;
1976 }
1977 break; 1871 break;
1978 } while (len > 0); 1872 } while (len > 0);
1979 1873
@@ -1996,16 +1890,6 @@ skip_copy:
1996 tp->ucopy.len = 0; 1890 tp->ucopy.len = 0;
1997 } 1891 }
1998 1892
1999#ifdef CONFIG_NET_DMA
2000 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
2001 tp->ucopy.dma_chan = NULL;
2002
2003 if (tp->ucopy.pinned_list) {
2004 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
2005 tp->ucopy.pinned_list = NULL;
2006 }
2007#endif
2008
2009 /* According to UNIX98, msg_name/msg_namelen are ignored 1893 /* According to UNIX98, msg_name/msg_namelen are ignored
2010 * on connected socket. I was just happy when found this 8) --ANK 1894 * on connected socket. I was just happy when found this 8) --ANK
2011 */ 1895 */
@@ -2349,9 +2233,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2349 __skb_queue_purge(&sk->sk_receive_queue); 2233 __skb_queue_purge(&sk->sk_receive_queue);
2350 tcp_write_queue_purge(sk); 2234 tcp_write_queue_purge(sk);
2351 __skb_queue_purge(&tp->out_of_order_queue); 2235 __skb_queue_purge(&tp->out_of_order_queue);
2352#ifdef CONFIG_NET_DMA
2353 __skb_queue_purge(&sk->sk_async_wait_queue);
2354#endif
2355 2236
2356 inet->inet_dport = 0; 2237 inet->inet_dport = 0;
2357 2238
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a906e0200ff2..0185eea59342 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -73,7 +73,6 @@
73#include <net/inet_common.h> 73#include <net/inet_common.h>
74#include <linux/ipsec.h> 74#include <linux/ipsec.h>
75#include <asm/unaligned.h> 75#include <asm/unaligned.h>
76#include <net/netdma.h>
77#include <linux/errqueue.h> 76#include <linux/errqueue.h>
78 77
79int sysctl_tcp_timestamps __read_mostly = 1; 78int sysctl_tcp_timestamps __read_mostly = 1;
@@ -4951,53 +4950,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
4951 __tcp_checksum_complete_user(sk, skb); 4950 __tcp_checksum_complete_user(sk, skb);
4952} 4951}
4953 4952
4954#ifdef CONFIG_NET_DMA
4955static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
4956 int hlen)
4957{
4958 struct tcp_sock *tp = tcp_sk(sk);
4959 int chunk = skb->len - hlen;
4960 int dma_cookie;
4961 bool copied_early = false;
4962
4963 if (tp->ucopy.wakeup)
4964 return false;
4965
4966 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
4967 tp->ucopy.dma_chan = net_dma_find_channel();
4968
4969 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
4970
4971 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
4972 skb, hlen,
4973 tp->ucopy.iov, chunk,
4974 tp->ucopy.pinned_list);
4975
4976 if (dma_cookie < 0)
4977 goto out;
4978
4979 tp->ucopy.dma_cookie = dma_cookie;
4980 copied_early = true;
4981
4982 tp->ucopy.len -= chunk;
4983 tp->copied_seq += chunk;
4984 tcp_rcv_space_adjust(sk);
4985
4986 if ((tp->ucopy.len == 0) ||
4987 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
4988 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
4989 tp->ucopy.wakeup = 1;
4990 sk->sk_data_ready(sk);
4991 }
4992 } else if (chunk > 0) {
4993 tp->ucopy.wakeup = 1;
4994 sk->sk_data_ready(sk);
4995 }
4996out:
4997 return copied_early;
4998}
4999#endif /* CONFIG_NET_DMA */
5000
5001/* Does PAWS and seqno based validation of an incoming segment, flags will 4953/* Does PAWS and seqno based validation of an incoming segment, flags will
5002 * play significant role here. 4954 * play significant role here.
5003 */ 4955 */
@@ -5177,27 +5129,15 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5177 } 5129 }
5178 } else { 5130 } else {
5179 int eaten = 0; 5131 int eaten = 0;
5180 int copied_early = 0;
5181 bool fragstolen = false; 5132 bool fragstolen = false;
5182 5133
5183 if (tp->copied_seq == tp->rcv_nxt && 5134 if (tp->ucopy.task == current &&
5184 len - tcp_header_len <= tp->ucopy.len) { 5135 tp->copied_seq == tp->rcv_nxt &&
5185#ifdef CONFIG_NET_DMA 5136 len - tcp_header_len <= tp->ucopy.len &&
5186 if (tp->ucopy.task == current && 5137 sock_owned_by_user(sk)) {
5187 sock_owned_by_user(sk) && 5138 __set_current_state(TASK_RUNNING);
5188 tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
5189 copied_early = 1;
5190 eaten = 1;
5191 }
5192#endif
5193 if (tp->ucopy.task == current &&
5194 sock_owned_by_user(sk) && !copied_early) {
5195 __set_current_state(TASK_RUNNING);
5196 5139
5197 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 5140 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
5198 eaten = 1;
5199 }
5200 if (eaten) {
5201 /* Predicted packet is in window by definition. 5141 /* Predicted packet is in window by definition.
5202 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5142 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5203 * Hence, check seq<=rcv_wup reduces to: 5143 * Hence, check seq<=rcv_wup reduces to:
@@ -5213,9 +5153,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5213 __skb_pull(skb, tcp_header_len); 5153 __skb_pull(skb, tcp_header_len);
5214 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 5154 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 5155 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
5156 eaten = 1;
5216 } 5157 }
5217 if (copied_early)
5218 tcp_cleanup_rbuf(sk, skb->len);
5219 } 5158 }
5220 if (!eaten) { 5159 if (!eaten) {
5221 if (tcp_checksum_complete_user(sk, skb)) 5160 if (tcp_checksum_complete_user(sk, skb))
@@ -5252,14 +5191,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5252 goto no_ack; 5191 goto no_ack;
5253 } 5192 }
5254 5193
5255 if (!copied_early || tp->rcv_nxt != tp->rcv_wup) 5194 __tcp_ack_snd_check(sk, 0);
5256 __tcp_ack_snd_check(sk, 0);
5257no_ack: 5195no_ack:
5258#ifdef CONFIG_NET_DMA
5259 if (copied_early)
5260 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
5261 else
5262#endif
5263 if (eaten) 5196 if (eaten)
5264 kfree_skb_partial(skb, fragstolen); 5197 kfree_skb_partial(skb, fragstolen);
5265 sk->sk_data_ready(sk); 5198 sk->sk_data_ready(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cd17f009aede..fbea536cf5c0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,7 +72,6 @@
72#include <net/inet_common.h> 72#include <net/inet_common.h>
73#include <net/timewait_sock.h> 73#include <net/timewait_sock.h>
74#include <net/xfrm.h> 74#include <net/xfrm.h>
75#include <net/netdma.h>
76#include <net/secure_seq.h> 75#include <net/secure_seq.h>
77#include <net/tcp_memcontrol.h> 76#include <net/tcp_memcontrol.h>
78#include <net/busy_poll.h> 77#include <net/busy_poll.h>
@@ -1670,18 +1669,8 @@ process:
1670 bh_lock_sock_nested(sk); 1669 bh_lock_sock_nested(sk);
1671 ret = 0; 1670 ret = 0;
1672 if (!sock_owned_by_user(sk)) { 1671 if (!sock_owned_by_user(sk)) {
1673#ifdef CONFIG_NET_DMA 1672 if (!tcp_prequeue(sk, skb))
1674 struct tcp_sock *tp = tcp_sk(sk);
1675 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1676 tp->ucopy.dma_chan = net_dma_find_channel();
1677 if (tp->ucopy.dma_chan)
1678 ret = tcp_v4_do_rcv(sk, skb); 1673 ret = tcp_v4_do_rcv(sk, skb);
1679 else
1680#endif
1681 {
1682 if (!tcp_prequeue(sk, skb))
1683 ret = tcp_v4_do_rcv(sk, skb);
1684 }
1685 } else if (unlikely(sk_add_backlog(sk, skb, 1674 } else if (unlikely(sk_add_backlog(sk, skb,
1686 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1675 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1687 bh_unlock_sock(sk); 1676 bh_unlock_sock(sk);
@@ -1841,11 +1830,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
1841 } 1830 }
1842#endif 1831#endif
1843 1832
1844#ifdef CONFIG_NET_DMA
1845 /* Cleans up our sk_async_wait_queue */
1846 __skb_queue_purge(&sk->sk_async_wait_queue);
1847#endif
1848
1849 /* Clean prequeue, it must be empty really */ 1833 /* Clean prequeue, it must be empty really */
1850 __skb_queue_purge(&tp->ucopy.prequeue); 1834 __skb_queue_purge(&tp->ucopy.prequeue);
1851 1835
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 29964c3d363c..03a5d1ed3340 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -59,7 +59,6 @@
59#include <net/snmp.h> 59#include <net/snmp.h>
60#include <net/dsfield.h> 60#include <net/dsfield.h>
61#include <net/timewait_sock.h> 61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63#include <net/inet_common.h> 62#include <net/inet_common.h>
64#include <net/secure_seq.h> 63#include <net/secure_seq.h>
65#include <net/tcp_memcontrol.h> 64#include <net/tcp_memcontrol.h>
@@ -1446,18 +1445,8 @@ process:
1446 bh_lock_sock_nested(sk); 1445 bh_lock_sock_nested(sk);
1447 ret = 0; 1446 ret = 0;
1448 if (!sock_owned_by_user(sk)) { 1447 if (!sock_owned_by_user(sk)) {
1449#ifdef CONFIG_NET_DMA 1448 if (!tcp_prequeue(sk, skb))
1450 struct tcp_sock *tp = tcp_sk(sk);
1451 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1452 tp->ucopy.dma_chan = net_dma_find_channel();
1453 if (tp->ucopy.dma_chan)
1454 ret = tcp_v6_do_rcv(sk, skb); 1449 ret = tcp_v6_do_rcv(sk, skb);
1455 else
1456#endif
1457 {
1458 if (!tcp_prequeue(sk, skb))
1459 ret = tcp_v6_do_rcv(sk, skb);
1460 }
1461 } else if (unlikely(sk_add_backlog(sk, skb, 1450 } else if (unlikely(sk_add_backlog(sk, skb,
1462 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1451 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1463 bh_unlock_sock(sk); 1452 bh_unlock_sock(sk);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 0080d2b0a8ae..bb9cbc17d926 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -839,7 +839,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
839 839
840 if (!(flags & MSG_PEEK)) { 840 if (!(flags & MSG_PEEK)) {
841 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 841 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
842 sk_eat_skb(sk, skb, false); 842 sk_eat_skb(sk, skb);
843 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 843 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
844 *seq = 0; 844 *seq = 0;
845 } 845 }
@@ -861,10 +861,10 @@ copy_uaddr:
861 llc_cmsg_rcv(msg, skb); 861 llc_cmsg_rcv(msg, skb);
862 862
863 if (!(flags & MSG_PEEK)) { 863 if (!(flags & MSG_PEEK)) {
864 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 864 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
865 sk_eat_skb(sk, skb, false); 865 sk_eat_skb(sk, skb);
866 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 866 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
867 *seq = 0; 867 *seq = 0;
868 } 868 }
869 869
870 goto out; 870 goto out;