aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
commitd0cd84817c745655428dbfdb1e3f754230b46bef (patch)
treea7b6f422f6ac50f506ffa7a66f8e83387f90f212 /drivers/dma/dmaengine.c
parentbdf428feb225229b1d4715b45bbdad4a934cd89c (diff)
parent3f334078567245429540e6461c81c749fce87f70 (diff)
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams: "Even though this has fixes marked for -stable, given the size and the needed conflict resolutions this is 3.18-rc1/merge-window material. These patches have been languishing in my tree for a long while. The fact that I do not have the time to do proper/prompt maintenance of this tree is a primary factor in the decision to step down as dmaengine maintainer. That and the fact that the bulk of drivers/dma/ activity is going through Vinod these days. The net_dma removal has not been in -next. It has developed simple conflicts against mainline and net-next (for-3.18). Continuing thanks to Vinod for staying on top of drivers/dma/. Summary: 1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes" * tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net: make tcp_cleanup_rbuf private net_dma: revert 'copied_early' net_dma: simple removal dmaengine maintainer update dmatest: prevent memory leakage on error path in thread ioat: Use time_before_jiffies() dmaengine: fix xor sources continuation dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup() dma: mv_xor: Remove all callers of mv_xor_slot_cleanup() dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call ioat: Use pci_enable_msix_exact() instead of pci_enable_msix() drivers: dma: Include appropriate header file in dca.c drivers: dma: Mark functions as static in dma_v3.c dma: mv_xor: Add DMA API error checks ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c104
1 files changed, 0 insertions, 104 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d5d30ed863ce..24bfaf0b92ba 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1081} 1081}
1082EXPORT_SYMBOL(dmaengine_get_unmap_data); 1082EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083 1083
1084/**
1085 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1086 * @chan: DMA channel to offload copy to
1087 * @dest_pg: destination page
1088 * @dest_off: offset in page to copy to
1089 * @src_pg: source page
1090 * @src_off: offset in page to copy from
1091 * @len: length
1092 *
1093 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1094 * address according to the DMA mapping API rules for streaming mappings.
1095 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1096 * (kernel memory or locked user space pages).
1097 */
1098dma_cookie_t
1099dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1100 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1101 size_t len)
1102{
1103 struct dma_device *dev = chan->device;
1104 struct dma_async_tx_descriptor *tx;
1105 struct dmaengine_unmap_data *unmap;
1106 dma_cookie_t cookie;
1107 unsigned long flags;
1108
1109 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1110 if (!unmap)
1111 return -ENOMEM;
1112
1113 unmap->to_cnt = 1;
1114 unmap->from_cnt = 1;
1115 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1116 DMA_TO_DEVICE);
1117 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1118 DMA_FROM_DEVICE);
1119 unmap->len = len;
1120 flags = DMA_CTRL_ACK;
1121 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1122 len, flags);
1123
1124 if (!tx) {
1125 dmaengine_unmap_put(unmap);
1126 return -ENOMEM;
1127 }
1128
1129 dma_set_unmap(tx, unmap);
1130 cookie = tx->tx_submit(tx);
1131 dmaengine_unmap_put(unmap);
1132
1133 preempt_disable();
1134 __this_cpu_add(chan->local->bytes_transferred, len);
1135 __this_cpu_inc(chan->local->memcpy_count);
1136 preempt_enable();
1137
1138 return cookie;
1139}
1140EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1141
1142/**
1143 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1144 * @chan: DMA channel to offload copy to
1145 * @dest: destination address (virtual)
1146 * @src: source address (virtual)
1147 * @len: length
1148 *
1149 * Both @dest and @src must be mappable to a bus address according to the
1150 * DMA mapping API rules for streaming mappings.
1151 * Both @dest and @src must stay memory resident (kernel memory or locked
1152 * user space pages).
1153 */
1154dma_cookie_t
1155dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1156 void *src, size_t len)
1157{
1158 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1159 (unsigned long) dest & ~PAGE_MASK,
1160 virt_to_page(src),
1161 (unsigned long) src & ~PAGE_MASK, len);
1162}
1163EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1164
1165/**
1166 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1167 * @chan: DMA channel to offload copy to
1168 * @page: destination page
1169 * @offset: offset in page to copy to
1170 * @kdata: source address (virtual)
1171 * @len: length
1172 *
1173 * Both @page/@offset and @kdata must be mappable to a bus address according
1174 * to the DMA mapping API rules for streaming mappings.
1175 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1176 * locked user space pages)
1177 */
1178dma_cookie_t
1179dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1180 unsigned int offset, void *kdata, size_t len)
1181{
1182 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1183 virt_to_page(kdata),
1184 (unsigned long) kdata & ~PAGE_MASK, len);
1185}
1186EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1187
1188void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1084void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1189 struct dma_chan *chan) 1085 struct dma_chan *chan)
1190{ 1086{