aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
commitd0cd84817c745655428dbfdb1e3f754230b46bef (patch)
treea7b6f422f6ac50f506ffa7a66f8e83387f90f212 /drivers/dma
parentbdf428feb225229b1d4715b45bbdad4a934cd89c (diff)
parent3f334078567245429540e6461c81c749fce87f70 (diff)
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams: "Even though this has fixes marked for -stable, given the size and the needed conflict resolutions this is 3.18-rc1/merge-window material. These patches have been languishing in my tree for a long while. The fact that I do not have the time to do proper/prompt maintenance of this tree is a primary factor in the decision to step down as dmaengine maintainer. That and the fact that the bulk of drivers/dma/ activity is going through Vinod these days. The net_dma removal has not been in -next. It has developed simple conflicts against mainline and net-next (for-3.18). Continuing thanks to Vinod for staying on top of drivers/dma/. Summary: 1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes" * tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net: make tcp_cleanup_rbuf private net_dma: revert 'copied_early' net_dma: simple removal dmaengine maintainer update dmatest: prevent memory leakage on error path in thread ioat: Use time_before_jiffies() dmaengine: fix xor sources continuation dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup() dma: mv_xor: Remove all callers of mv_xor_slot_cleanup() dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call ioat: Use pci_enable_msix_exact() instead of pci_enable_msix() drivers: dma: Include appropriate header file in dca.c drivers: dma: Mark functions as static in dma_v3.c dma: mv_xor: Add DMA API error checks ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c104
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/ioat/dca.c13
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma.h7
-rw-r--r--drivers/dma/ioat/dma_v2.c4
-rw-r--r--drivers/dma/ioat/dma_v3.c7
-rw-r--r--drivers/dma/iovlock.c280
-rw-r--r--drivers/dma/mv_xor.c80
11 files changed, 78 insertions, 437 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9b1ea0ef59af..a016490c95ae 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -427,18 +427,6 @@ config DMA_OF
427comment "DMA Clients" 427comment "DMA Clients"
428 depends on DMA_ENGINE 428 depends on DMA_ENGINE
429 429
430config NET_DMA
431 bool "Network: TCP receive copy offload"
432 depends on DMA_ENGINE && NET
433 default (INTEL_IOATDMA || FSL_DMA)
434 depends on BROKEN
435 help
436 This enables the use of DMA engines in the network stack to
437 offload receive copy-to-user operations, freeing CPU cycles.
438
439 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
440 say N.
441
442config ASYNC_TX_DMA 430config ASYNC_TX_DMA
443 bool "Async_tx: Offload support for the async_tx api" 431 bool "Async_tx: Offload support for the async_tx api"
444 depends on DMA_ENGINE 432 depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c6adb925f0b9..cb626c179911 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
6obj-$(CONFIG_DMA_ACPI) += acpi-dma.o 6obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
7obj-$(CONFIG_DMA_OF) += of-dma.o 7obj-$(CONFIG_DMA_OF) += of-dma.o
8 8
9obj-$(CONFIG_NET_DMA) += iovlock.o
10obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o 9obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
11obj-$(CONFIG_DMATEST) += dmatest.o 10obj-$(CONFIG_DMATEST) += dmatest.o
12obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 11obj-$(CONFIG_INTEL_IOATDMA) += ioat/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d5d30ed863ce..24bfaf0b92ba 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1081} 1081}
1082EXPORT_SYMBOL(dmaengine_get_unmap_data); 1082EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083 1083
1084/**
1085 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1086 * @chan: DMA channel to offload copy to
1087 * @dest_pg: destination page
1088 * @dest_off: offset in page to copy to
1089 * @src_pg: source page
1090 * @src_off: offset in page to copy from
1091 * @len: length
1092 *
1093 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1094 * address according to the DMA mapping API rules for streaming mappings.
1095 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1096 * (kernel memory or locked user space pages).
1097 */
1098dma_cookie_t
1099dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1100 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1101 size_t len)
1102{
1103 struct dma_device *dev = chan->device;
1104 struct dma_async_tx_descriptor *tx;
1105 struct dmaengine_unmap_data *unmap;
1106 dma_cookie_t cookie;
1107 unsigned long flags;
1108
1109 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1110 if (!unmap)
1111 return -ENOMEM;
1112
1113 unmap->to_cnt = 1;
1114 unmap->from_cnt = 1;
1115 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1116 DMA_TO_DEVICE);
1117 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1118 DMA_FROM_DEVICE);
1119 unmap->len = len;
1120 flags = DMA_CTRL_ACK;
1121 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1122 len, flags);
1123
1124 if (!tx) {
1125 dmaengine_unmap_put(unmap);
1126 return -ENOMEM;
1127 }
1128
1129 dma_set_unmap(tx, unmap);
1130 cookie = tx->tx_submit(tx);
1131 dmaengine_unmap_put(unmap);
1132
1133 preempt_disable();
1134 __this_cpu_add(chan->local->bytes_transferred, len);
1135 __this_cpu_inc(chan->local->memcpy_count);
1136 preempt_enable();
1137
1138 return cookie;
1139}
1140EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1141
1142/**
1143 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1144 * @chan: DMA channel to offload copy to
1145 * @dest: destination address (virtual)
1146 * @src: source address (virtual)
1147 * @len: length
1148 *
1149 * Both @dest and @src must be mappable to a bus address according to the
1150 * DMA mapping API rules for streaming mappings.
1151 * Both @dest and @src must stay memory resident (kernel memory or locked
1152 * user space pages).
1153 */
1154dma_cookie_t
1155dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1156 void *src, size_t len)
1157{
1158 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1159 (unsigned long) dest & ~PAGE_MASK,
1160 virt_to_page(src),
1161 (unsigned long) src & ~PAGE_MASK, len);
1162}
1163EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1164
1165/**
1166 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1167 * @chan: DMA channel to offload copy to
1168 * @page: destination page
1169 * @offset: offset in page to copy to
1170 * @kdata: source address (virtual)
1171 * @len: length
1172 *
1173 * Both @page/@offset and @kdata must be mappable to a bus address according
1174 * to the DMA mapping API rules for streaming mappings.
1175 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1176 * locked user space pages)
1177 */
1178dma_cookie_t
1179dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1180 unsigned int offset, void *kdata, size_t len)
1181{
1182 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1183 virt_to_page(kdata),
1184 (unsigned long) kdata & ~PAGE_MASK, len);
1185}
1186EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1187
1188void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1084void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1189 struct dma_chan *chan) 1085 struct dma_chan *chan)
1190{ 1086{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e27cec25c59e..a8d7809e2f4c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -688,14 +688,14 @@ static int dmatest_func(void *data)
688 runtime = ktime_us_delta(ktime_get(), ktime); 688 runtime = ktime_us_delta(ktime_get(), ktime);
689 689
690 ret = 0; 690 ret = 0;
691err_dstbuf:
691 for (i = 0; thread->dsts[i]; i++) 692 for (i = 0; thread->dsts[i]; i++)
692 kfree(thread->dsts[i]); 693 kfree(thread->dsts[i]);
693err_dstbuf:
694 kfree(thread->dsts); 694 kfree(thread->dsts);
695err_dsts: 695err_dsts:
696err_srcbuf:
696 for (i = 0; thread->srcs[i]; i++) 697 for (i = 0; thread->srcs[i]; i++)
697 kfree(thread->srcs[i]); 698 kfree(thread->srcs[i]);
698err_srcbuf:
699 kfree(thread->srcs); 699 kfree(thread->srcs);
700err_srcs: 700err_srcs:
701 kfree(pq_coefs); 701 kfree(pq_coefs);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9e84d5bc9307..3b55bb8d969a 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -35,6 +35,7 @@
35 35
36#include "dma.h" 36#include "dma.h"
37#include "registers.h" 37#include "registers.h"
38#include "dma_v2.h"
38 39
39/* 40/*
40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 41 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
147 u16 id; 148 u16 id;
148 149
149 /* This implementation only supports PCI-Express */ 150 /* This implementation only supports PCI-Express */
150 if (dev->bus != &pci_bus_type) 151 if (!dev_is_pci(dev))
151 return -ENODEV; 152 return -ENODEV;
152 pdev = to_pci_dev(dev); 153 pdev = to_pci_dev(dev);
153 id = dcaid_from_pcidev(pdev); 154 id = dcaid_from_pcidev(pdev);
@@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
179 int i; 180 int i;
180 181
181 /* This implementation only supports PCI-Express */ 182 /* This implementation only supports PCI-Express */
182 if (dev->bus != &pci_bus_type) 183 if (!dev_is_pci(dev))
183 return -ENODEV; 184 return -ENODEV;
184 pdev = to_pci_dev(dev); 185 pdev = to_pci_dev(dev);
185 186
@@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
320 u16 global_req_table; 321 u16 global_req_table;
321 322
322 /* This implementation only supports PCI-Express */ 323 /* This implementation only supports PCI-Express */
323 if (dev->bus != &pci_bus_type) 324 if (!dev_is_pci(dev))
324 return -ENODEV; 325 return -ENODEV;
325 pdev = to_pci_dev(dev); 326 pdev = to_pci_dev(dev);
326 id = dcaid_from_pcidev(pdev); 327 id = dcaid_from_pcidev(pdev);
@@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
354 u16 global_req_table; 355 u16 global_req_table;
355 356
356 /* This implementation only supports PCI-Express */ 357 /* This implementation only supports PCI-Express */
357 if (dev->bus != &pci_bus_type) 358 if (!dev_is_pci(dev))
358 return -ENODEV; 359 return -ENODEV;
359 pdev = to_pci_dev(dev); 360 pdev = to_pci_dev(dev);
360 361
@@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
496 u16 global_req_table; 497 u16 global_req_table;
497 498
498 /* This implementation only supports PCI-Express */ 499 /* This implementation only supports PCI-Express */
499 if (dev->bus != &pci_bus_type) 500 if (!dev_is_pci(dev))
500 return -ENODEV; 501 return -ENODEV;
501 pdev = to_pci_dev(dev); 502 pdev = to_pci_dev(dev);
502 id = dcaid_from_pcidev(pdev); 503 id = dcaid_from_pcidev(pdev);
@@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca,
530 u16 global_req_table; 531 u16 global_req_table;
531 532
532 /* This implementation only supports PCI-Express */ 533 /* This implementation only supports PCI-Express */
533 if (dev->bus != &pci_bus_type) 534 if (!dev_is_pci(dev))
534 return -ENODEV; 535 return -ENODEV;
535 pdev = to_pci_dev(dev); 536 pdev = to_pci_dev(dev);
536 537
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 4e3549a16132..940c1502a8b5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -947,7 +947,7 @@ msix:
947 for (i = 0; i < msixcnt; i++) 947 for (i = 0; i < msixcnt; i++)
948 device->msix_entries[i].entry = i; 948 device->msix_entries[i].entry = i;
949 949
950 err = pci_enable_msix(pdev, device->msix_entries, msixcnt); 950 err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
951 if (err) 951 if (err)
952 goto msi; 952 goto msi;
953 953
@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1222 err = ioat_probe(device); 1222 err = ioat_probe(device);
1223 if (err) 1223 if (err)
1224 return err; 1224 return err;
1225 ioat_set_tcp_copy_break(4096);
1226 err = ioat_register(device); 1225 err = ioat_register(device);
1227 if (err) 1226 if (err)
1228 return err; 1227 return err;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index e982f00a9843..d63f68b1aa35 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
214#define dump_desc_dbg(c, d) \ 214#define dump_desc_dbg(c, d) \
215 ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) 215 ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
216 216
217static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
218{
219 #ifdef CONFIG_NET_DMA
220 sysctl_tcp_dma_copybreak = copybreak;
221 #endif
222}
223
224static inline struct ioat_chan_common * 217static inline struct ioat_chan_common *
225ioat_chan_by_index(struct ioatdma_device *device, int index) 218ioat_chan_by_index(struct ioatdma_device *device, int index)
226{ 219{
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8d1058085eeb..695483e6be32 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
735 * called under bh_disabled so we need to trigger the timer 735 * called under bh_disabled so we need to trigger the timer
736 * event directly 736 * event directly
737 */ 737 */
738 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { 738 if (time_is_before_jiffies(chan->timer.expires)
739 && timer_pending(&chan->timer)) {
739 struct ioatdma_device *device = chan->device; 740 struct ioatdma_device *device = chan->device;
740 741
741 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 742 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
@@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
899 err = ioat_probe(device); 900 err = ioat_probe(device);
900 if (err) 901 if (err)
901 return err; 902 return err;
902 ioat_set_tcp_copy_break(2048);
903 903
904 list_for_each_entry(c, &dma->channels, device_node) { 904 list_for_each_entry(c, &dma->channels, device_node) {
905 chan = to_chan_common(c); 905 chan = to_chan_common(c);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b9b38a1cf92f..895f869d6c2c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
740 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); 740 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
741} 741}
742 742
743struct dma_async_tx_descriptor * 743static struct dma_async_tx_descriptor *
744ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 744ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
745 unsigned int src_cnt, size_t len, 745 unsigned int src_cnt, size_t len,
746 enum sum_check_flags *result, unsigned long flags) 746 enum sum_check_flags *result, unsigned long flags)
@@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1091 } 1091 }
1092} 1092}
1093 1093
1094struct dma_async_tx_descriptor * 1094static struct dma_async_tx_descriptor *
1095ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 1095ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1096 unsigned int src_cnt, const unsigned char *scf, size_t len, 1096 unsigned int src_cnt, const unsigned char *scf, size_t len,
1097 enum sum_check_flags *pqres, unsigned long flags) 1097 enum sum_check_flags *pqres, unsigned long flags)
@@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1133 flags); 1133 flags);
1134} 1134}
1135 1135
1136struct dma_async_tx_descriptor * 1136static struct dma_async_tx_descriptor *
1137ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 1137ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1138 unsigned int src_cnt, size_t len, 1138 unsigned int src_cnt, size_t len,
1139 enum sum_check_flags *result, unsigned long flags) 1139 enum sum_check_flags *result, unsigned long flags)
@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1655 err = ioat_probe(device); 1655 err = ioat_probe(device);
1656 if (err) 1656 if (err)
1657 return err; 1657 return err;
1658 ioat_set_tcp_copy_break(262144);
1659 1658
1660 list_for_each_entry(c, &dma->channels, device_node) { 1659 list_for_each_entry(c, &dma->channels, device_node) {
1661 chan = to_chan_common(c); 1660 chan = to_chan_common(c);
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
deleted file mode 100644
index bb48a57c2fc1..000000000000
--- a/drivers/dma/iovlock.c
+++ /dev/null
@@ -1,280 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * Portions based on net/core/datagram.c and copyrighted by their authors.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22
23/*
24 * This code allows the net stack to make use of a DMA engine for
25 * skb to iovec copies.
26 */
27
28#include <linux/dmaengine.h>
29#include <linux/pagemap.h>
30#include <linux/slab.h>
31#include <net/tcp.h> /* for memcpy_toiovec */
32#include <asm/io.h>
33#include <asm/uaccess.h>
34
35static int num_pages_spanned(struct iovec *iov)
36{
37 return
38 ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
39 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
40}
41
42/*
43 * Pin down all the iovec pages needed for len bytes.
44 * Return a struct dma_pinned_list to keep track of pages pinned down.
45 *
46 * We are allocating a single chunk of memory, and then carving it up into
47 * 3 sections, the latter 2 whose size depends on the number of iovecs and the
48 * total number of pages, respectively.
49 */
50struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
51{
52 struct dma_pinned_list *local_list;
53 struct page **pages;
54 int i;
55 int ret;
56 int nr_iovecs = 0;
57 int iovec_len_used = 0;
58 int iovec_pages_used = 0;
59
60 /* don't pin down non-user-based iovecs */
61 if (segment_eq(get_fs(), KERNEL_DS))
62 return NULL;
63
64 /* determine how many iovecs/pages there are, up front */
65 do {
66 iovec_len_used += iov[nr_iovecs].iov_len;
67 iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
68 nr_iovecs++;
69 } while (iovec_len_used < len);
70
71 /* single kmalloc for pinned list, page_list[], and the page arrays */
72 local_list = kmalloc(sizeof(*local_list)
73 + (nr_iovecs * sizeof (struct dma_page_list))
74 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
75 if (!local_list)
76 goto out;
77
78 /* list of pages starts right after the page list array */
79 pages = (struct page **) &local_list->page_list[nr_iovecs];
80
81 local_list->nr_iovecs = 0;
82
83 for (i = 0; i < nr_iovecs; i++) {
84 struct dma_page_list *page_list = &local_list->page_list[i];
85
86 len -= iov[i].iov_len;
87
88 if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
89 goto unpin;
90
91 page_list->nr_pages = num_pages_spanned(&iov[i]);
92 page_list->base_address = iov[i].iov_base;
93
94 page_list->pages = pages;
95 pages += page_list->nr_pages;
96
97 /* pin pages down */
98 down_read(&current->mm->mmap_sem);
99 ret = get_user_pages(
100 current,
101 current->mm,
102 (unsigned long) iov[i].iov_base,
103 page_list->nr_pages,
104 1, /* write */
105 0, /* force */
106 page_list->pages,
107 NULL);
108 up_read(&current->mm->mmap_sem);
109
110 if (ret != page_list->nr_pages)
111 goto unpin;
112
113 local_list->nr_iovecs = i + 1;
114 }
115
116 return local_list;
117
118unpin:
119 dma_unpin_iovec_pages(local_list);
120out:
121 return NULL;
122}
123
124void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
125{
126 int i, j;
127
128 if (!pinned_list)
129 return;
130
131 for (i = 0; i < pinned_list->nr_iovecs; i++) {
132 struct dma_page_list *page_list = &pinned_list->page_list[i];
133 for (j = 0; j < page_list->nr_pages; j++) {
134 set_page_dirty_lock(page_list->pages[j]);
135 page_cache_release(page_list->pages[j]);
136 }
137 }
138
139 kfree(pinned_list);
140}
141
142
143/*
144 * We have already pinned down the pages we will be using in the iovecs.
145 * Each entry in iov array has corresponding entry in pinned_list->page_list.
146 * Using array indexing to keep iov[] and page_list[] in sync.
147 * Initial elements in iov array's iov->iov_len will be 0 if already copied into
148 * by another call.
149 * iov array length remaining guaranteed to be bigger than len.
150 */
151dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
152 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
153{
154 int iov_byte_offset;
155 int copy;
156 dma_cookie_t dma_cookie = 0;
157 int iovec_idx;
158 int page_idx;
159
160 if (!chan)
161 return memcpy_toiovec(iov, kdata, len);
162
163 iovec_idx = 0;
164 while (iovec_idx < pinned_list->nr_iovecs) {
165 struct dma_page_list *page_list;
166
167 /* skip already used-up iovecs */
168 while (!iov[iovec_idx].iov_len)
169 iovec_idx++;
170
171 page_list = &pinned_list->page_list[iovec_idx];
172
173 iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
174 page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
175 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
176
177 /* break up copies to not cross page boundary */
178 while (iov[iovec_idx].iov_len) {
179 copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
180 copy = min_t(int, copy, iov[iovec_idx].iov_len);
181
182 dma_cookie = dma_async_memcpy_buf_to_pg(chan,
183 page_list->pages[page_idx],
184 iov_byte_offset,
185 kdata,
186 copy);
187 /* poll for a descriptor slot */
188 if (unlikely(dma_cookie < 0)) {
189 dma_async_issue_pending(chan);
190 continue;
191 }
192
193 len -= copy;
194 iov[iovec_idx].iov_len -= copy;
195 iov[iovec_idx].iov_base += copy;
196
197 if (!len)
198 return dma_cookie;
199
200 kdata += copy;
201 iov_byte_offset = 0;
202 page_idx++;
203 }
204 iovec_idx++;
205 }
206
207 /* really bad if we ever run out of iovecs */
208 BUG();
209 return -EFAULT;
210}
211
212dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
213 struct dma_pinned_list *pinned_list, struct page *page,
214 unsigned int offset, size_t len)
215{
216 int iov_byte_offset;
217 int copy;
218 dma_cookie_t dma_cookie = 0;
219 int iovec_idx;
220 int page_idx;
221 int err;
222
223 /* this needs as-yet-unimplemented buf-to-buff, so punt. */
224 /* TODO: use dma for this */
225 if (!chan || !pinned_list) {
226 u8 *vaddr = kmap(page);
227 err = memcpy_toiovec(iov, vaddr + offset, len);
228 kunmap(page);
229 return err;
230 }
231
232 iovec_idx = 0;
233 while (iovec_idx < pinned_list->nr_iovecs) {
234 struct dma_page_list *page_list;
235
236 /* skip already used-up iovecs */
237 while (!iov[iovec_idx].iov_len)
238 iovec_idx++;
239
240 page_list = &pinned_list->page_list[iovec_idx];
241
242 iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
243 page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
244 - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
245
246 /* break up copies to not cross page boundary */
247 while (iov[iovec_idx].iov_len) {
248 copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
249 copy = min_t(int, copy, iov[iovec_idx].iov_len);
250
251 dma_cookie = dma_async_memcpy_pg_to_pg(chan,
252 page_list->pages[page_idx],
253 iov_byte_offset,
254 page,
255 offset,
256 copy);
257 /* poll for a descriptor slot */
258 if (unlikely(dma_cookie < 0)) {
259 dma_async_issue_pending(chan);
260 continue;
261 }
262
263 len -= copy;
264 iov[iovec_idx].iov_len -= copy;
265 iov[iovec_idx].iov_base += copy;
266
267 if (!len)
268 return dma_cookie;
269
270 offset += copy;
271 iov_byte_offset = 0;
272 page_idx++;
273 }
274 iovec_idx++;
275 }
276
277 /* really bad if we ever run out of iovecs */
278 BUG();
279 return -EFAULT;
280}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 394cbc5c93e3..7938272f2edf 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
310 return 0; 310 return 0;
311} 311}
312 312
313static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 313/* This function must be called with the mv_xor_chan spinlock held */
314static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
314{ 315{
315 struct mv_xor_desc_slot *iter, *_iter; 316 struct mv_xor_desc_slot *iter, *_iter;
316 dma_cookie_t cookie = 0; 317 dma_cookie_t cookie = 0;
@@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
366 mv_chan->dmachan.completed_cookie = cookie; 367 mv_chan->dmachan.completed_cookie = cookie;
367} 368}
368 369
369static void
370mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
371{
372 spin_lock_bh(&mv_chan->lock);
373 __mv_xor_slot_cleanup(mv_chan);
374 spin_unlock_bh(&mv_chan->lock);
375}
376
377static void mv_xor_tasklet(unsigned long data) 370static void mv_xor_tasklet(unsigned long data)
378{ 371{
379 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 372 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
373
374 spin_lock_bh(&chan->lock);
380 mv_xor_slot_cleanup(chan); 375 mv_xor_slot_cleanup(chan);
376 spin_unlock_bh(&chan->lock);
381} 377}
382 378
383static struct mv_xor_desc_slot * 379static struct mv_xor_desc_slot *
@@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
656 struct mv_xor_desc_slot *iter, *_iter; 652 struct mv_xor_desc_slot *iter, *_iter;
657 int in_use_descs = 0; 653 int in_use_descs = 0;
658 654
655 spin_lock_bh(&mv_chan->lock);
656
659 mv_xor_slot_cleanup(mv_chan); 657 mv_xor_slot_cleanup(mv_chan);
660 658
661 spin_lock_bh(&mv_chan->lock);
662 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 659 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
663 chain_node) { 660 chain_node) {
664 in_use_descs++; 661 in_use_descs++;
@@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
700 enum dma_status ret; 697 enum dma_status ret;
701 698
702 ret = dma_cookie_status(chan, cookie, txstate); 699 ret = dma_cookie_status(chan, cookie, txstate);
703 if (ret == DMA_COMPLETE) { 700 if (ret == DMA_COMPLETE)
704 mv_xor_clean_completed_slots(mv_chan);
705 return ret; 701 return ret;
706 } 702
703 spin_lock_bh(&mv_chan->lock);
707 mv_xor_slot_cleanup(mv_chan); 704 mv_xor_slot_cleanup(mv_chan);
705 spin_unlock_bh(&mv_chan->lock);
708 706
709 return dma_cookie_status(chan, cookie, txstate); 707 return dma_cookie_status(chan, cookie, txstate);
710} 708}
@@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
782 780
783static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 781static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
784{ 782{
785 int i; 783 int i, ret;
786 void *src, *dest; 784 void *src, *dest;
787 dma_addr_t src_dma, dest_dma; 785 dma_addr_t src_dma, dest_dma;
788 struct dma_chan *dma_chan; 786 struct dma_chan *dma_chan;
@@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 817
820 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 818 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
821 PAGE_SIZE, DMA_TO_DEVICE); 819 PAGE_SIZE, DMA_TO_DEVICE);
822 unmap->to_cnt = 1;
823 unmap->addr[0] = src_dma; 820 unmap->addr[0] = src_dma;
824 821
822 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
823 if (ret) {
824 err = -ENOMEM;
825 goto free_resources;
826 }
827 unmap->to_cnt = 1;
828
825 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 829 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 PAGE_SIZE, DMA_FROM_DEVICE); 830 PAGE_SIZE, DMA_FROM_DEVICE);
827 unmap->from_cnt = 1;
828 unmap->addr[1] = dest_dma; 831 unmap->addr[1] = dest_dma;
829 832
833 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
834 if (ret) {
835 err = -ENOMEM;
836 goto free_resources;
837 }
838 unmap->from_cnt = 1;
830 unmap->len = PAGE_SIZE; 839 unmap->len = PAGE_SIZE;
831 840
832 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 841 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
833 PAGE_SIZE, 0); 842 PAGE_SIZE, 0);
843 if (!tx) {
844 dev_err(dma_chan->device->dev,
845 "Self-test cannot prepare operation, disabling\n");
846 err = -ENODEV;
847 goto free_resources;
848 }
849
834 cookie = mv_xor_tx_submit(tx); 850 cookie = mv_xor_tx_submit(tx);
851 if (dma_submit_error(cookie)) {
852 dev_err(dma_chan->device->dev,
853 "Self-test submit error, disabling\n");
854 err = -ENODEV;
855 goto free_resources;
856 }
857
835 mv_xor_issue_pending(dma_chan); 858 mv_xor_issue_pending(dma_chan);
836 async_tx_ack(tx); 859 async_tx_ack(tx);
837 msleep(1); 860 msleep(1);
@@ -866,7 +889,7 @@ out:
866static int 889static int
867mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 890mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
868{ 891{
869 int i, src_idx; 892 int i, src_idx, ret;
870 struct page *dest; 893 struct page *dest;
871 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 894 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
872 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 895 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
@@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
929 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 952 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
930 0, PAGE_SIZE, DMA_TO_DEVICE); 953 0, PAGE_SIZE, DMA_TO_DEVICE);
931 dma_srcs[i] = unmap->addr[i]; 954 dma_srcs[i] = unmap->addr[i];
955 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
956 if (ret) {
957 err = -ENOMEM;
958 goto free_resources;
959 }
932 unmap->to_cnt++; 960 unmap->to_cnt++;
933 } 961 }
934 962
935 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 963 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
936 DMA_FROM_DEVICE); 964 DMA_FROM_DEVICE);
937 dest_dma = unmap->addr[src_count]; 965 dest_dma = unmap->addr[src_count];
966 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
967 if (ret) {
968 err = -ENOMEM;
969 goto free_resources;
970 }
938 unmap->from_cnt = 1; 971 unmap->from_cnt = 1;
939 unmap->len = PAGE_SIZE; 972 unmap->len = PAGE_SIZE;
940 973
941 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 974 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
942 src_count, PAGE_SIZE, 0); 975 src_count, PAGE_SIZE, 0);
976 if (!tx) {
977 dev_err(dma_chan->device->dev,
978 "Self-test cannot prepare operation, disabling\n");
979 err = -ENODEV;
980 goto free_resources;
981 }
943 982
944 cookie = mv_xor_tx_submit(tx); 983 cookie = mv_xor_tx_submit(tx);
984 if (dma_submit_error(cookie)) {
985 dev_err(dma_chan->device->dev,
986 "Self-test submit error, disabling\n");
987 err = -ENODEV;
988 goto free_resources;
989 }
990
945 mv_xor_issue_pending(dma_chan); 991 mv_xor_issue_pending(dma_chan);
946 async_tx_ack(tx); 992 async_tx_ack(tx);
947 msleep(8); 993 msleep(8);