diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2013-10-18 13:35:32 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:04:38 -0500 |
commit | 54f8d501e842879143e867e70996574a54d1e130 (patch) | |
tree | 1fcd65a5152d330167f5eefba5cc5d514ec91da1 /drivers | |
parent | 6f57fd0578dff23a4bd16118f0cb4201bcec91f1 (diff) |
dmaengine: remove DMA unmap from drivers
Remove support for DMA unmapping from drivers as it is no longer
needed (DMA core code is now handling it).
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
[djbw: fix up chan2parent() unused warning in drivers/dma/dw/core.c]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 31 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 25 | ||||
-rw-r--r-- | drivers/dma/dw/core.c | 24 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 29 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 16 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 16 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 12 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 166 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 96 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 44 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 269 | ||||
-rw-r--r-- | drivers/dma/timb_dma.c | 36 | ||||
-rw-r--r-- | drivers/dma/txx9dmac.c | 24 |
14 files changed, 3 insertions, 786 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 7f9846464b77..6a5f782ec7eb 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1164,43 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | |||
1164 | kfree(txd); | 1164 | kfree(txd); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1168 | { | ||
1169 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
1170 | struct pl08x_sg *dsg; | ||
1171 | |||
1172 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1173 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1174 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1175 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
1176 | DMA_TO_DEVICE); | ||
1177 | else { | ||
1178 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1179 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
1180 | DMA_TO_DEVICE); | ||
1181 | } | ||
1182 | } | ||
1183 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1184 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1185 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1186 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
1187 | DMA_FROM_DEVICE); | ||
1188 | else | ||
1189 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1190 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
1191 | DMA_FROM_DEVICE); | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | static void pl08x_desc_free(struct virt_dma_desc *vd) | 1167 | static void pl08x_desc_free(struct virt_dma_desc *vd) |
1196 | { | 1168 | { |
1197 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
1198 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); | 1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); |
1199 | 1171 | ||
1200 | dma_descriptor_unmap(txd); | 1172 | dma_descriptor_unmap(txd); |
1201 | if (!plchan->slave) | ||
1202 | pl08x_unmap_buffers(txd); | ||
1203 | |||
1204 | if (!txd->done) | 1173 | if (!txd->done) |
1205 | pl08x_release_mux(plchan); | 1174 | pl08x_release_mux(plchan); |
1206 | 1175 | ||
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index cc7098ddf9d4..6deaefbec0b0 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -344,32 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
344 | /* move myself to free_list */ | 344 | /* move myself to free_list */ |
345 | list_move(&desc->desc_node, &atchan->free_list); | 345 | list_move(&desc->desc_node, &atchan->free_list); |
346 | 346 | ||
347 | /* unmap dma addresses (not on slave channels) */ | ||
348 | dma_descriptor_unmap(txd); | 347 | dma_descriptor_unmap(txd); |
349 | if (!atchan->chan_common.private) { | ||
350 | struct device *parent = chan2parent(&atchan->chan_common); | ||
351 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
352 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
353 | dma_unmap_single(parent, | ||
354 | desc->lli.daddr, | ||
355 | desc->len, DMA_FROM_DEVICE); | ||
356 | else | ||
357 | dma_unmap_page(parent, | ||
358 | desc->lli.daddr, | ||
359 | desc->len, DMA_FROM_DEVICE); | ||
360 | } | ||
361 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
362 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
363 | dma_unmap_single(parent, | ||
364 | desc->lli.saddr, | ||
365 | desc->len, DMA_TO_DEVICE); | ||
366 | else | ||
367 | dma_unmap_page(parent, | ||
368 | desc->lli.saddr, | ||
369 | desc->len, DMA_TO_DEVICE); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* for cyclic transfers, | 348 | /* for cyclic transfers, |
374 | * no need to replay callback function while stopping */ | 349 | * no need to replay callback function while stopping */ |
375 | if (!atc_chan_is_cyclic(atchan)) { | 350 | if (!atc_chan_is_cyclic(atchan)) { |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index e3fe1b1a73b1..1f39ccce2727 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) | |||
85 | { | 85 | { |
86 | return &chan->dev->device; | 86 | return &chan->dev->device; |
87 | } | 87 | } |
88 | static struct device *chan2parent(struct dma_chan *chan) | ||
89 | { | ||
90 | return chan->dev->device.parent; | ||
91 | } | ||
92 | 88 | ||
93 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 89 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
94 | { | 90 | { |
@@ -312,26 +308,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
312 | list_move(&desc->desc_node, &dwc->free_list); | 308 | list_move(&desc->desc_node, &dwc->free_list); |
313 | 309 | ||
314 | dma_descriptor_unmap(txd); | 310 | dma_descriptor_unmap(txd); |
315 | if (!is_slave_direction(dwc->direction)) { | ||
316 | struct device *parent = chan2parent(&dwc->chan); | ||
317 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
318 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
319 | dma_unmap_single(parent, desc->lli.dar, | ||
320 | desc->total_len, DMA_FROM_DEVICE); | ||
321 | else | ||
322 | dma_unmap_page(parent, desc->lli.dar, | ||
323 | desc->total_len, DMA_FROM_DEVICE); | ||
324 | } | ||
325 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
326 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
327 | dma_unmap_single(parent, desc->lli.sar, | ||
328 | desc->total_len, DMA_TO_DEVICE); | ||
329 | else | ||
330 | dma_unmap_page(parent, desc->lli.sar, | ||
331 | desc->total_len, DMA_TO_DEVICE); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | spin_unlock_irqrestore(&dwc->lock, flags); | 311 | spin_unlock_irqrestore(&dwc->lock, flags); |
336 | 312 | ||
337 | if (callback) | 313 | if (callback) |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index dcd6bf5d3091..cb4bf682a708 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) | |||
733 | spin_unlock_irqrestore(&edmac->lock, flags); | 733 | spin_unlock_irqrestore(&edmac->lock, flags); |
734 | } | 734 | } |
735 | 735 | ||
736 | static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) | ||
737 | { | ||
738 | struct device *dev = desc->txd.chan->device->dev; | ||
739 | |||
740 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
741 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
742 | dma_unmap_single(dev, desc->src_addr, desc->size, | ||
743 | DMA_TO_DEVICE); | ||
744 | else | ||
745 | dma_unmap_page(dev, desc->src_addr, desc->size, | ||
746 | DMA_TO_DEVICE); | ||
747 | } | ||
748 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
749 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
750 | dma_unmap_single(dev, desc->dst_addr, desc->size, | ||
751 | DMA_FROM_DEVICE); | ||
752 | else | ||
753 | dma_unmap_page(dev, desc->dst_addr, desc->size, | ||
754 | DMA_FROM_DEVICE); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | static void ep93xx_dma_tasklet(unsigned long data) | 736 | static void ep93xx_dma_tasklet(unsigned long data) |
759 | { | 737 | { |
760 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | 738 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
@@ -787,14 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
787 | 765 | ||
788 | /* Now we can release all the chained descriptors */ | 766 | /* Now we can release all the chained descriptors */ |
789 | list_for_each_entry_safe(desc, d, &list, node) { | 767 | list_for_each_entry_safe(desc, d, &list, node) { |
790 | /* | ||
791 | * For the memcpy channels the API requires us to unmap the | ||
792 | * buffers unless requested otherwise. | ||
793 | */ | ||
794 | dma_descriptor_unmap(&desc->txd); | 768 | dma_descriptor_unmap(&desc->txd); |
795 | if (!edmac->chan.private) | ||
796 | ep93xx_dma_unmap_buffers(desc); | ||
797 | |||
798 | ep93xx_dma_desc_put(edmac, desc); | 769 | ep93xx_dma_desc_put(edmac, desc); |
799 | } | 770 | } |
800 | 771 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 66c4052a1f34..d9e6381b2b16 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -869,22 +869,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | |||
869 | dma_run_dependencies(txd); | 869 | dma_run_dependencies(txd); |
870 | 870 | ||
871 | dma_descriptor_unmap(txd); | 871 | dma_descriptor_unmap(txd); |
872 | /* Unmap the dst buffer, if requested */ | ||
873 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
874 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
875 | dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); | ||
876 | else | ||
877 | dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); | ||
878 | } | ||
879 | |||
880 | /* Unmap the src buffer, if requested */ | ||
881 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
882 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
883 | dma_unmap_single(dev, src, len, DMA_TO_DEVICE); | ||
884 | else | ||
885 | dma_unmap_page(dev, src, len, DMA_TO_DEVICE); | ||
886 | } | ||
887 | |||
888 | #ifdef FSL_DMA_LD_DEBUG | 872 | #ifdef FSL_DMA_LD_DEBUG |
889 | chan_dbg(chan, "LD %p free\n", desc); | 873 | chan_dbg(chan, "LD %p free\n", desc); |
890 | #endif | 874 | #endif |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 26f8cfd6bc3f..c123e32dbbb0 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) | |||
531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
532 | } | 532 | } |
533 | 533 | ||
534 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
535 | size_t len, struct ioat_dma_descriptor *hw) | ||
536 | { | ||
537 | struct pci_dev *pdev = chan->device->pdev; | ||
538 | size_t offset = len - hw->size; | ||
539 | |||
540 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
541 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
542 | PCI_DMA_FROMDEVICE, flags, 1); | ||
543 | |||
544 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
545 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
546 | PCI_DMA_TODEVICE, flags, 0); | ||
547 | } | ||
548 | |||
549 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | 534 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
550 | { | 535 | { |
551 | dma_addr_t phys_complete; | 536 | dma_addr_t phys_complete; |
@@ -603,7 +588,6 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | |||
603 | if (tx->cookie) { | 588 | if (tx->cookie) { |
604 | dma_cookie_complete(tx); | 589 | dma_cookie_complete(tx); |
605 | dma_descriptor_unmap(tx); | 590 | dma_descriptor_unmap(tx); |
606 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
607 | ioat->active -= desc->hw->tx_cnt; | 591 | ioat->active -= desc->hw->tx_cnt; |
608 | if (tx->callback) { | 592 | if (tx->callback) { |
609 | tx->callback(tx->callback_param); | 593 | tx->callback(tx->callback_param); |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..4300d5af188f 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -342,16 +342,6 @@ static inline bool is_ioat_bug(unsigned long err) | |||
342 | return !!err; | 342 | return !!err; |
343 | } | 343 | } |
344 | 344 | ||
345 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
346 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
347 | { | ||
348 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
349 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
350 | pci_unmap_single(pdev, addr, len, direction); | ||
351 | else | ||
352 | pci_unmap_page(pdev, addr, len, direction); | ||
353 | } | ||
354 | |||
355 | int ioat_probe(struct ioatdma_device *device); | 345 | int ioat_probe(struct ioatdma_device *device); |
356 | int ioat_register(struct ioatdma_device *device); | 346 | int ioat_register(struct ioatdma_device *device); |
357 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); | 347 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
@@ -363,8 +353,6 @@ void ioat_init_channel(struct ioatdma_device *device, | |||
363 | struct ioat_chan_common *chan, int idx); | 353 | struct ioat_chan_common *chan, int idx); |
364 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 354 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
365 | struct dma_tx_state *txstate); | 355 | struct dma_tx_state *txstate); |
366 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
367 | size_t len, struct ioat_dma_descriptor *hw); | ||
368 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 356 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
369 | dma_addr_t *phys_complete); | 357 | dma_addr_t *phys_complete); |
370 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 358 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index fc7b50a813cc..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -149,7 +149,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
149 | dump_desc_dbg(ioat, desc); | 149 | dump_desc_dbg(ioat, desc); |
150 | if (tx->cookie) { | 150 | if (tx->cookie) { |
151 | dma_descriptor_unmap(tx); | 151 | dma_descriptor_unmap(tx); |
152 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
153 | dma_cookie_complete(tx); | 152 | dma_cookie_complete(tx); |
154 | if (tx->callback) { | 153 | if (tx->callback) { |
155 | tx->callback(tx->callback_param); | 154 | tx->callback(tx->callback_param); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 57a2901b917a..43386c171bba 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -96,13 +96,6 @@ static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, | |||
96 | 96 | ||
97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); | 97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); |
98 | 98 | ||
99 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
100 | { | ||
101 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
102 | |||
103 | return raw->field[xor_idx_to_field[idx]]; | ||
104 | } | ||
105 | |||
106 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | 99 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], |
107 | dma_addr_t addr, u32 offset, int idx) | 100 | dma_addr_t addr, u32 offset, int idx) |
108 | { | 101 | { |
@@ -296,164 +289,6 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s | |||
296 | kmem_cache_free(device->sed_pool, sed); | 289 | kmem_cache_free(device->sed_pool, sed); |
297 | } | 290 | } |
298 | 291 | ||
299 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | ||
300 | struct ioat_ring_ent *desc, int idx) | ||
301 | { | ||
302 | struct ioat_chan_common *chan = &ioat->base; | ||
303 | struct pci_dev *pdev = chan->device->pdev; | ||
304 | size_t len = desc->len; | ||
305 | size_t offset = len - desc->hw->size; | ||
306 | struct dma_async_tx_descriptor *tx = &desc->txd; | ||
307 | enum dma_ctrl_flags flags = tx->flags; | ||
308 | |||
309 | switch (desc->hw->ctl_f.op) { | ||
310 | case IOAT_OP_COPY: | ||
311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | ||
312 | ioat_dma_unmap(chan, flags, len, desc->hw); | ||
313 | break; | ||
314 | case IOAT_OP_XOR_VAL: | ||
315 | case IOAT_OP_XOR: { | ||
316 | struct ioat_xor_descriptor *xor = desc->xor; | ||
317 | struct ioat_ring_ent *ext; | ||
318 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
319 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | ||
320 | struct ioat_raw_descriptor *descs[2]; | ||
321 | int i; | ||
322 | |||
323 | if (src_cnt > 5) { | ||
324 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
325 | xor_ex = ext->xor_ex; | ||
326 | } | ||
327 | |||
328 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
329 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
330 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
331 | for (i = 0; i < src_cnt; i++) { | ||
332 | dma_addr_t src = xor_get_src(descs, i); | ||
333 | |||
334 | ioat_unmap(pdev, src - offset, len, | ||
335 | PCI_DMA_TODEVICE, flags, 0); | ||
336 | } | ||
337 | |||
338 | /* dest is a source in xor validate operations */ | ||
339 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
340 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
341 | PCI_DMA_TODEVICE, flags, 1); | ||
342 | break; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
347 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
348 | PCI_DMA_FROMDEVICE, flags, 1); | ||
349 | break; | ||
350 | } | ||
351 | case IOAT_OP_PQ_VAL: | ||
352 | case IOAT_OP_PQ: { | ||
353 | struct ioat_pq_descriptor *pq = desc->pq; | ||
354 | struct ioat_ring_ent *ext; | ||
355 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
356 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
357 | struct ioat_raw_descriptor *descs[2]; | ||
358 | int i; | ||
359 | |||
360 | if (src_cnt > 3) { | ||
361 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
362 | pq_ex = ext->pq_ex; | ||
363 | } | ||
364 | |||
365 | /* in the 'continue' case don't unmap the dests as sources */ | ||
366 | if (dmaf_p_disabled_continue(flags)) | ||
367 | src_cnt--; | ||
368 | else if (dmaf_continue(flags)) | ||
369 | src_cnt -= 3; | ||
370 | |||
371 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
372 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
373 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
374 | for (i = 0; i < src_cnt; i++) { | ||
375 | dma_addr_t src = pq_get_src(descs, i); | ||
376 | |||
377 | ioat_unmap(pdev, src - offset, len, | ||
378 | PCI_DMA_TODEVICE, flags, 0); | ||
379 | } | ||
380 | |||
381 | /* the dests are sources in pq validate operations */ | ||
382 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
383 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
384 | ioat_unmap(pdev, pq->p_addr - offset, | ||
385 | len, PCI_DMA_TODEVICE, flags, 0); | ||
386 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
387 | ioat_unmap(pdev, pq->q_addr - offset, | ||
388 | len, PCI_DMA_TODEVICE, flags, 0); | ||
389 | break; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
394 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
395 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
396 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
397 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
398 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
399 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
400 | } | ||
401 | break; | ||
402 | } | ||
403 | case IOAT_OP_PQ_16S: | ||
404 | case IOAT_OP_PQ_VAL_16S: { | ||
405 | struct ioat_pq_descriptor *pq = desc->pq; | ||
406 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
407 | struct ioat_raw_descriptor *descs[4]; | ||
408 | int i; | ||
409 | |||
410 | /* in the 'continue' case don't unmap the dests as sources */ | ||
411 | if (dmaf_p_disabled_continue(flags)) | ||
412 | src_cnt--; | ||
413 | else if (dmaf_continue(flags)) | ||
414 | src_cnt -= 3; | ||
415 | |||
416 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
417 | descs[0] = (struct ioat_raw_descriptor *)pq; | ||
418 | descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); | ||
419 | descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); | ||
420 | for (i = 0; i < src_cnt; i++) { | ||
421 | dma_addr_t src = pq16_get_src(descs, i); | ||
422 | |||
423 | ioat_unmap(pdev, src - offset, len, | ||
424 | PCI_DMA_TODEVICE, flags, 0); | ||
425 | } | ||
426 | |||
427 | /* the dests are sources in pq validate operations */ | ||
428 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
429 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
430 | ioat_unmap(pdev, pq->p_addr - offset, | ||
431 | len, PCI_DMA_TODEVICE, | ||
432 | flags, 0); | ||
433 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
434 | ioat_unmap(pdev, pq->q_addr - offset, | ||
435 | len, PCI_DMA_TODEVICE, | ||
436 | flags, 0); | ||
437 | break; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
442 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
443 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
444 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
445 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
446 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
447 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
448 | } | ||
449 | break; | ||
450 | } | ||
451 | default: | ||
452 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | ||
453 | __func__, desc->hw->ctl_f.op); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | static bool desc_has_ext(struct ioat_ring_ent *desc) | 292 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
458 | { | 293 | { |
459 | struct ioat_dma_descriptor *hw = desc->hw; | 294 | struct ioat_dma_descriptor *hw = desc->hw; |
@@ -578,7 +413,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
578 | if (tx->cookie) { | 413 | if (tx->cookie) { |
579 | dma_cookie_complete(tx); | 414 | dma_cookie_complete(tx); |
580 | dma_descriptor_unmap(tx); | 415 | dma_descriptor_unmap(tx); |
581 | ioat3_dma_unmap(ioat, desc, idx + i); | ||
582 | if (tx->callback) { | 416 | if (tx->callback) { |
583 | tx->callback(tx->callback_param); | 417 | tx->callback(tx->callback_param); |
584 | tx->callback = NULL; | 418 | tx->callback = NULL; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 8f6e426590eb..173e26ff18f8 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | static void | ||
65 | iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
66 | { | ||
67 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
68 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
69 | struct device *dev = &iop_chan->device->pdev->dev; | ||
70 | u32 len = unmap->unmap_len; | ||
71 | enum dma_ctrl_flags flags = tx->flags; | ||
72 | u32 src_cnt; | ||
73 | dma_addr_t addr; | ||
74 | dma_addr_t dest; | ||
75 | |||
76 | src_cnt = unmap->unmap_src_cnt; | ||
77 | dest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
78 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
79 | enum dma_data_direction dir; | ||
80 | |||
81 | if (src_cnt > 1) /* is xor? */ | ||
82 | dir = DMA_BIDIRECTIONAL; | ||
83 | else | ||
84 | dir = DMA_FROM_DEVICE; | ||
85 | |||
86 | dma_unmap_page(dev, dest, len, dir); | ||
87 | } | ||
88 | |||
89 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
90 | while (src_cnt--) { | ||
91 | addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); | ||
92 | if (addr == dest) | ||
93 | continue; | ||
94 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
95 | } | ||
96 | } | ||
97 | desc->group_head = NULL; | ||
98 | } | ||
99 | |||
100 | static void | ||
101 | iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
102 | { | ||
103 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
104 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
105 | struct device *dev = &iop_chan->device->pdev->dev; | ||
106 | u32 len = unmap->unmap_len; | ||
107 | enum dma_ctrl_flags flags = tx->flags; | ||
108 | u32 src_cnt = unmap->unmap_src_cnt; | ||
109 | dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
110 | dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); | ||
111 | int i; | ||
112 | |||
113 | if (tx->flags & DMA_PREP_CONTINUE) | ||
114 | src_cnt -= 3; | ||
115 | |||
116 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { | ||
117 | dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); | ||
118 | dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); | ||
119 | } | ||
120 | |||
121 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
122 | dma_addr_t addr; | ||
123 | |||
124 | for (i = 0; i < src_cnt; i++) { | ||
125 | addr = iop_desc_get_src_addr(unmap, iop_chan, i); | ||
126 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
127 | } | ||
128 | if (desc->pq_check_result) { | ||
129 | dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); | ||
130 | dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | desc->group_head = NULL; | ||
135 | } | ||
136 | |||
137 | |||
138 | static dma_cookie_t | 64 | static dma_cookie_t |
139 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | 65 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, |
140 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 66 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
@@ -153,15 +79,8 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
153 | tx->callback(tx->callback_param); | 79 | tx->callback(tx->callback_param); |
154 | 80 | ||
155 | dma_descriptor_unmap(tx); | 81 | dma_descriptor_unmap(tx); |
156 | /* unmap dma addresses | 82 | if (desc->group_head) |
157 | * (unmap_single vs unmap_page?) | 83 | desc->group_head = NULL; |
158 | */ | ||
159 | if (desc->group_head && desc->unmap_len) { | ||
160 | if (iop_desc_is_pq(desc)) | ||
161 | iop_desc_unmap_pq(iop_chan, desc); | ||
162 | else | ||
163 | iop_desc_unmap(iop_chan, desc); | ||
164 | } | ||
165 | } | 84 | } |
166 | 85 | ||
167 | /* run dependent operations */ | 86 | /* run dependent operations */ |
@@ -592,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
592 | if (sw_desc) { | 511 | if (sw_desc) { |
593 | grp_start = sw_desc->group_head; | 512 | grp_start = sw_desc->group_head; |
594 | iop_desc_init_interrupt(grp_start, iop_chan); | 513 | iop_desc_init_interrupt(grp_start, iop_chan); |
595 | grp_start->unmap_len = 0; | ||
596 | sw_desc->async_tx.flags = flags; | 514 | sw_desc->async_tx.flags = flags; |
597 | } | 515 | } |
598 | spin_unlock_bh(&iop_chan->lock); | 516 | spin_unlock_bh(&iop_chan->lock); |
@@ -624,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
624 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 542 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
625 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 543 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
626 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | 544 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); |
627 | sw_desc->unmap_src_cnt = 1; | ||
628 | sw_desc->unmap_len = len; | ||
629 | sw_desc->async_tx.flags = flags; | 545 | sw_desc->async_tx.flags = flags; |
630 | } | 546 | } |
631 | spin_unlock_bh(&iop_chan->lock); | 547 | spin_unlock_bh(&iop_chan->lock); |
@@ -658,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
658 | iop_desc_init_xor(grp_start, src_cnt, flags); | 574 | iop_desc_init_xor(grp_start, src_cnt, flags); |
659 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 575 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
660 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 576 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
661 | sw_desc->unmap_src_cnt = src_cnt; | ||
662 | sw_desc->unmap_len = len; | ||
663 | sw_desc->async_tx.flags = flags; | 577 | sw_desc->async_tx.flags = flags; |
664 | while (src_cnt--) | 578 | while (src_cnt--) |
665 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | 579 | iop_desc_set_xor_src_addr(grp_start, src_cnt, |
@@ -695,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, | |||
695 | grp_start->xor_check_result = result; | 609 | grp_start->xor_check_result = result; |
696 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 610 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
697 | __func__, grp_start->xor_check_result); | 611 | __func__, grp_start->xor_check_result); |
698 | sw_desc->unmap_src_cnt = src_cnt; | ||
699 | sw_desc->unmap_len = len; | ||
700 | sw_desc->async_tx.flags = flags; | 612 | sw_desc->async_tx.flags = flags; |
701 | while (src_cnt--) | 613 | while (src_cnt--) |
702 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | 614 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, |
@@ -749,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
749 | dst[0] = dst[1] & 0x7; | 661 | dst[0] = dst[1] & 0x7; |
750 | 662 | ||
751 | iop_desc_set_pq_addr(g, dst); | 663 | iop_desc_set_pq_addr(g, dst); |
752 | sw_desc->unmap_src_cnt = src_cnt; | ||
753 | sw_desc->unmap_len = len; | ||
754 | sw_desc->async_tx.flags = flags; | 664 | sw_desc->async_tx.flags = flags; |
755 | for (i = 0; i < src_cnt; i++) | 665 | for (i = 0; i < src_cnt; i++) |
756 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); | 666 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); |
@@ -805,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
805 | g->pq_check_result = pqres; | 715 | g->pq_check_result = pqres; |
806 | pr_debug("\t%s: g->pq_check_result: %p\n", | 716 | pr_debug("\t%s: g->pq_check_result: %p\n", |
807 | __func__, g->pq_check_result); | 717 | __func__, g->pq_check_result); |
808 | sw_desc->unmap_src_cnt = src_cnt+2; | ||
809 | sw_desc->unmap_len = len; | ||
810 | sw_desc->async_tx.flags = flags; | 718 | sw_desc->async_tx.flags = flags; |
811 | while (src_cnt--) | 719 | while (src_cnt--) |
812 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, | 720 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index ed1ab1d0875e..17326e780e23 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | |||
60 | return hw_desc->phy_dest_addr; | 60 | return hw_desc->phy_dest_addr; |
61 | } | 61 | } |
62 | 62 | ||
63 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | ||
64 | int src_idx) | ||
65 | { | ||
66 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
67 | return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; | ||
68 | } | ||
69 | |||
70 | |||
71 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 63 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
72 | u32 byte_count) | 64 | u32 byte_count) |
73 | { | 65 | { |
@@ -279,42 +271,8 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
279 | desc->async_tx.callback_param); | 271 | desc->async_tx.callback_param); |
280 | 272 | ||
281 | dma_descriptor_unmap(&desc->async_tx); | 273 | dma_descriptor_unmap(&desc->async_tx); |
282 | /* unmap dma addresses | 274 | if (desc->group_head) |
283 | * (unmap_single vs unmap_page?) | ||
284 | */ | ||
285 | if (desc->group_head && desc->unmap_len) { | ||
286 | struct mv_xor_desc_slot *unmap = desc->group_head; | ||
287 | struct device *dev = mv_chan_to_devp(mv_chan); | ||
288 | u32 len = unmap->unmap_len; | ||
289 | enum dma_ctrl_flags flags = desc->async_tx.flags; | ||
290 | u32 src_cnt; | ||
291 | dma_addr_t addr; | ||
292 | dma_addr_t dest; | ||
293 | |||
294 | src_cnt = unmap->unmap_src_cnt; | ||
295 | dest = mv_desc_get_dest_addr(unmap); | ||
296 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
297 | enum dma_data_direction dir; | ||
298 | |||
299 | if (src_cnt > 1) /* is xor ? */ | ||
300 | dir = DMA_BIDIRECTIONAL; | ||
301 | else | ||
302 | dir = DMA_FROM_DEVICE; | ||
303 | dma_unmap_page(dev, dest, len, dir); | ||
304 | } | ||
305 | |||
306 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
307 | while (src_cnt--) { | ||
308 | addr = mv_desc_get_src_addr(unmap, | ||
309 | src_cnt); | ||
310 | if (addr == dest) | ||
311 | continue; | ||
312 | dma_unmap_page(dev, addr, len, | ||
313 | DMA_TO_DEVICE); | ||
314 | } | ||
315 | } | ||
316 | desc->group_head = NULL; | 275 | desc->group_head = NULL; |
317 | } | ||
318 | } | 276 | } |
319 | 277 | ||
320 | /* run dependent operations */ | 278 | /* run dependent operations */ |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 442492da7415..429be432ab7e 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -802,218 +802,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, | |||
802 | } | 802 | } |
803 | 803 | ||
804 | /** | 804 | /** |
805 | * ppc440spe_desc_get_src_addr - extract the source address from the descriptor | ||
806 | */ | ||
807 | static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, | ||
808 | struct ppc440spe_adma_chan *chan, int src_idx) | ||
809 | { | ||
810 | struct dma_cdb *dma_hw_desc; | ||
811 | struct xor_cb *xor_hw_desc; | ||
812 | |||
813 | switch (chan->device->id) { | ||
814 | case PPC440SPE_DMA0_ID: | ||
815 | case PPC440SPE_DMA1_ID: | ||
816 | dma_hw_desc = desc->hw_desc; | ||
817 | /* May have 0, 1, 2, or 3 sources */ | ||
818 | switch (dma_hw_desc->opc) { | ||
819 | case DMA_CDB_OPC_NO_OP: | ||
820 | case DMA_CDB_OPC_DFILL128: | ||
821 | return 0; | ||
822 | case DMA_CDB_OPC_DCHECK128: | ||
823 | if (unlikely(src_idx)) { | ||
824 | printk(KERN_ERR "%s: try to get %d source for" | ||
825 | " DCHECK128\n", __func__, src_idx); | ||
826 | BUG(); | ||
827 | } | ||
828 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
829 | case DMA_CDB_OPC_MULTICAST: | ||
830 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
831 | if (unlikely(src_idx > 2)) { | ||
832 | printk(KERN_ERR "%s: try to get %d source from" | ||
833 | " DMA descr\n", __func__, src_idx); | ||
834 | BUG(); | ||
835 | } | ||
836 | if (src_idx) { | ||
837 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
838 | DMA_CUED_XOR_WIN_MSK) { | ||
839 | u8 region; | ||
840 | |||
841 | if (src_idx == 1) | ||
842 | return le32_to_cpu( | ||
843 | dma_hw_desc->sg1l) + | ||
844 | desc->unmap_len; | ||
845 | |||
846 | region = (le32_to_cpu( | ||
847 | dma_hw_desc->sg1u)) >> | ||
848 | DMA_CUED_REGION_OFF; | ||
849 | |||
850 | region &= DMA_CUED_REGION_MSK; | ||
851 | switch (region) { | ||
852 | case DMA_RXOR123: | ||
853 | return le32_to_cpu( | ||
854 | dma_hw_desc->sg1l) + | ||
855 | (desc->unmap_len << 1); | ||
856 | case DMA_RXOR124: | ||
857 | return le32_to_cpu( | ||
858 | dma_hw_desc->sg1l) + | ||
859 | (desc->unmap_len * 3); | ||
860 | case DMA_RXOR125: | ||
861 | return le32_to_cpu( | ||
862 | dma_hw_desc->sg1l) + | ||
863 | (desc->unmap_len << 2); | ||
864 | default: | ||
865 | printk(KERN_ERR | ||
866 | "%s: try to" | ||
867 | " get src3 for region %02x" | ||
868 | "PPC440SPE_DESC_RXOR12?\n", | ||
869 | __func__, region); | ||
870 | BUG(); | ||
871 | } | ||
872 | } else { | ||
873 | printk(KERN_ERR | ||
874 | "%s: try to get %d" | ||
875 | " source for non-cued descr\n", | ||
876 | __func__, src_idx); | ||
877 | BUG(); | ||
878 | } | ||
879 | } | ||
880 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
881 | default: | ||
882 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
883 | __func__, dma_hw_desc->opc); | ||
884 | BUG(); | ||
885 | } | ||
886 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
887 | case PPC440SPE_XOR_ID: | ||
888 | /* May have up to 16 sources */ | ||
889 | xor_hw_desc = desc->hw_desc; | ||
890 | return xor_hw_desc->ops[src_idx].l; | ||
891 | } | ||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | /** | ||
896 | * ppc440spe_desc_get_dest_addr - extract the destination address from the | ||
897 | * descriptor | ||
898 | */ | ||
899 | static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, | ||
900 | struct ppc440spe_adma_chan *chan, int idx) | ||
901 | { | ||
902 | struct dma_cdb *dma_hw_desc; | ||
903 | struct xor_cb *xor_hw_desc; | ||
904 | |||
905 | switch (chan->device->id) { | ||
906 | case PPC440SPE_DMA0_ID: | ||
907 | case PPC440SPE_DMA1_ID: | ||
908 | dma_hw_desc = desc->hw_desc; | ||
909 | |||
910 | if (likely(!idx)) | ||
911 | return le32_to_cpu(dma_hw_desc->sg2l); | ||
912 | return le32_to_cpu(dma_hw_desc->sg3l); | ||
913 | case PPC440SPE_XOR_ID: | ||
914 | xor_hw_desc = desc->hw_desc; | ||
915 | return xor_hw_desc->cbtal; | ||
916 | } | ||
917 | return 0; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * ppc440spe_desc_get_src_num - extract the number of source addresses from | ||
922 | * the descriptor | ||
923 | */ | ||
924 | static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, | ||
925 | struct ppc440spe_adma_chan *chan) | ||
926 | { | ||
927 | struct dma_cdb *dma_hw_desc; | ||
928 | struct xor_cb *xor_hw_desc; | ||
929 | |||
930 | switch (chan->device->id) { | ||
931 | case PPC440SPE_DMA0_ID: | ||
932 | case PPC440SPE_DMA1_ID: | ||
933 | dma_hw_desc = desc->hw_desc; | ||
934 | |||
935 | switch (dma_hw_desc->opc) { | ||
936 | case DMA_CDB_OPC_NO_OP: | ||
937 | case DMA_CDB_OPC_DFILL128: | ||
938 | return 0; | ||
939 | case DMA_CDB_OPC_DCHECK128: | ||
940 | return 1; | ||
941 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
942 | case DMA_CDB_OPC_MULTICAST: | ||
943 | /* | ||
944 | * Only for RXOR operations we have more than | ||
945 | * one source | ||
946 | */ | ||
947 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
948 | DMA_CUED_XOR_WIN_MSK) { | ||
949 | /* RXOR op, there are 2 or 3 sources */ | ||
950 | if (((le32_to_cpu(dma_hw_desc->sg1u) >> | ||
951 | DMA_CUED_REGION_OFF) & | ||
952 | DMA_CUED_REGION_MSK) == DMA_RXOR12) { | ||
953 | /* RXOR 1-2 */ | ||
954 | return 2; | ||
955 | } else { | ||
956 | /* RXOR 1-2-3/1-2-4/1-2-5 */ | ||
957 | return 3; | ||
958 | } | ||
959 | } | ||
960 | return 1; | ||
961 | default: | ||
962 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
963 | __func__, dma_hw_desc->opc); | ||
964 | BUG(); | ||
965 | } | ||
966 | case PPC440SPE_XOR_ID: | ||
967 | /* up to 16 sources */ | ||
968 | xor_hw_desc = desc->hw_desc; | ||
969 | return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; | ||
970 | default: | ||
971 | BUG(); | ||
972 | } | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | /** | ||
977 | * ppc440spe_desc_get_dst_num - get the number of destination addresses in | ||
978 | * this descriptor | ||
979 | */ | ||
980 | static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, | ||
981 | struct ppc440spe_adma_chan *chan) | ||
982 | { | ||
983 | struct dma_cdb *dma_hw_desc; | ||
984 | |||
985 | switch (chan->device->id) { | ||
986 | case PPC440SPE_DMA0_ID: | ||
987 | case PPC440SPE_DMA1_ID: | ||
988 | /* May be 1 or 2 destinations */ | ||
989 | dma_hw_desc = desc->hw_desc; | ||
990 | switch (dma_hw_desc->opc) { | ||
991 | case DMA_CDB_OPC_NO_OP: | ||
992 | case DMA_CDB_OPC_DCHECK128: | ||
993 | return 0; | ||
994 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
995 | case DMA_CDB_OPC_DFILL128: | ||
996 | return 1; | ||
997 | case DMA_CDB_OPC_MULTICAST: | ||
998 | if (desc->dst_cnt == 2) | ||
999 | return 2; | ||
1000 | else | ||
1001 | return 1; | ||
1002 | default: | ||
1003 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
1004 | __func__, dma_hw_desc->opc); | ||
1005 | BUG(); | ||
1006 | } | ||
1007 | case PPC440SPE_XOR_ID: | ||
1008 | /* Always only 1 destination */ | ||
1009 | return 1; | ||
1010 | default: | ||
1011 | BUG(); | ||
1012 | } | ||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | /** | ||
1017 | * ppc440spe_desc_get_link - get the address of the descriptor that | 805 | * ppc440spe_desc_get_link - get the address of the descriptor that |
1018 | * follows this one | 806 | * follows this one |
1019 | */ | 807 | */ |
@@ -1705,43 +1493,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, | |||
1705 | } | 1493 | } |
1706 | } | 1494 | } |
1707 | 1495 | ||
1708 | static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, | ||
1709 | struct ppc440spe_adma_desc_slot *desc) | ||
1710 | { | ||
1711 | u32 src_cnt, dst_cnt; | ||
1712 | dma_addr_t addr; | ||
1713 | |||
1714 | /* | ||
1715 | * get the number of sources & destination | ||
1716 | * included in this descriptor and unmap | ||
1717 | * them all | ||
1718 | */ | ||
1719 | src_cnt = ppc440spe_desc_get_src_num(desc, chan); | ||
1720 | dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); | ||
1721 | |||
1722 | /* unmap destinations */ | ||
1723 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1724 | while (dst_cnt--) { | ||
1725 | addr = ppc440spe_desc_get_dest_addr( | ||
1726 | desc, chan, dst_cnt); | ||
1727 | dma_unmap_page(chan->device->dev, | ||
1728 | addr, desc->unmap_len, | ||
1729 | DMA_FROM_DEVICE); | ||
1730 | } | ||
1731 | } | ||
1732 | |||
1733 | /* unmap sources */ | ||
1734 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1735 | while (src_cnt--) { | ||
1736 | addr = ppc440spe_desc_get_src_addr( | ||
1737 | desc, chan, src_cnt); | ||
1738 | dma_unmap_page(chan->device->dev, | ||
1739 | addr, desc->unmap_len, | ||
1740 | DMA_TO_DEVICE); | ||
1741 | } | ||
1742 | } | ||
1743 | } | ||
1744 | |||
1745 | /** | 1496 | /** |
1746 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called | 1497 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called |
1747 | * upon completion | 1498 | * upon completion |
@@ -1766,26 +1517,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
1766 | desc->async_tx.callback_param); | 1517 | desc->async_tx.callback_param); |
1767 | 1518 | ||
1768 | dma_descriptor_unmap(&desc->async_tx); | 1519 | dma_descriptor_unmap(&desc->async_tx); |
1769 | /* unmap dma addresses | ||
1770 | * (unmap_single vs unmap_page?) | ||
1771 | * | ||
1772 | * actually, ppc's dma_unmap_page() functions are empty, so | ||
1773 | * the following code is just for the sake of completeness | ||
1774 | */ | ||
1775 | if (chan && chan->needs_unmap && desc->group_head && | ||
1776 | desc->unmap_len) { | ||
1777 | struct ppc440spe_adma_desc_slot *unmap = | ||
1778 | desc->group_head; | ||
1779 | /* assume 1 slot per op always */ | ||
1780 | u32 slot_count = unmap->slot_cnt; | ||
1781 | |||
1782 | /* Run through the group list and unmap addresses */ | ||
1783 | for (i = 0; i < slot_count; i++) { | ||
1784 | BUG_ON(!unmap); | ||
1785 | ppc440spe_adma_unmap(chan, unmap); | ||
1786 | unmap = unmap->hw_next; | ||
1787 | } | ||
1788 | } | ||
1789 | } | 1520 | } |
1790 | 1521 | ||
1791 | /* run dependent operations */ | 1522 | /* run dependent operations */ |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 1d0c98839087..4506a7b4f972 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | |||
154 | return done; | 154 | return done; |
155 | } | 155 | } |
156 | 156 | ||
157 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | ||
158 | bool single) | ||
159 | { | ||
160 | dma_addr_t addr; | ||
161 | int len; | ||
162 | |||
163 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | ||
164 | dma_desc[4]; | ||
165 | |||
166 | len = (dma_desc[3] << 8) | dma_desc[2]; | ||
167 | |||
168 | if (single) | ||
169 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | ||
170 | DMA_TO_DEVICE); | ||
171 | else | ||
172 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | ||
173 | DMA_TO_DEVICE); | ||
174 | } | ||
175 | |||
176 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | ||
177 | { | ||
178 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | ||
179 | struct timb_dma_chan, chan); | ||
180 | u8 *descs; | ||
181 | |||
182 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | ||
183 | __td_unmap_desc(td_chan, descs, single); | ||
184 | if (descs[0] & 0x02) | ||
185 | break; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | 157 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
190 | struct scatterlist *sg, bool last) | 158 | struct scatterlist *sg, bool last) |
191 | { | 159 | { |
@@ -294,10 +262,6 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
294 | list_move(&td_desc->desc_node, &td_chan->free_list); | 262 | list_move(&td_desc->desc_node, &td_chan->free_list); |
295 | 263 | ||
296 | dma_descriptor_unmap(txd); | 264 | dma_descriptor_unmap(txd); |
297 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
298 | __td_unmap_descs(td_desc, | ||
299 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | ||
300 | |||
301 | /* | 265 | /* |
302 | * The API requires that no submissions are done from a | 266 | * The API requires that no submissions are done from a |
303 | * callback, so we don't need to drop the lock here | 267 | * callback, so we don't need to drop the lock here |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 22a0b6c78c77..6f2729874016 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -420,30 +420,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
420 | list_move(&desc->desc_node, &dc->free_list); | 420 | list_move(&desc->desc_node, &dc->free_list); |
421 | 421 | ||
422 | dma_descriptor_unmap(txd); | 422 | dma_descriptor_unmap(txd); |
423 | if (!ds) { | ||
424 | dma_addr_t dmaaddr; | ||
425 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
426 | dmaaddr = is_dmac64(dc) ? | ||
427 | desc->hwdesc.DAR : desc->hwdesc32.DAR; | ||
428 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
429 | dma_unmap_single(chan2parent(&dc->chan), | ||
430 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
431 | else | ||
432 | dma_unmap_page(chan2parent(&dc->chan), | ||
433 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
434 | } | ||
435 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
436 | dmaaddr = is_dmac64(dc) ? | ||
437 | desc->hwdesc.SAR : desc->hwdesc32.SAR; | ||
438 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
439 | dma_unmap_single(chan2parent(&dc->chan), | ||
440 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
441 | else | ||
442 | dma_unmap_page(chan2parent(&dc->chan), | ||
443 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | /* | 423 | /* |
448 | * The API requires that no submissions are done from a | 424 | * The API requires that no submissions are done from a |
449 | * callback, so we don't need to drop the lock here | 425 | * callback, so we don't need to drop the lock here |