diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
commit | 5115f3c19d17851aaff5a857f55b4a019c908775 (patch) | |
tree | 0d02cf01e12e86365f4f5e3b234f986daef181a7 /drivers/dma/tegra20-apb-dma.c | |
parent | c41b3810c09e60664433548c5218cc6ece6a8903 (diff) | |
parent | 17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This is fairly big pull by my standards as I had missed last merge
window. So we have the support for device tree for slave-dmaengine,
large updates to dw_dmac driver from Andy for reusing on different
architectures. Along with this we have fixes on bunch of the drivers"
Fix up trivial conflicts, usually due to #include line movement next to
each other.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits)
Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT"
ARM: dts: pl330: Add #dma-cells for generic dma binding support
DMA: PL330: Register the DMA controller with the generic DMA helpers
DMA: PL330: Add xlate function
DMA: PL330: Add new pl330 filter for DT case.
dma: tegra20-apb-dma: remove unnecessary assignment
edma: do not waste memory for dma_mask
dma: coh901318: set residue only if dma is in progress
dma: coh901318: avoid unbalanced locking
dmaengine.h: remove redundant else keyword
dma: of-dma: protect list write operation by spin_lock
dmaengine: ste_dma40: do not remove descriptors for cyclic transfers
dma: of-dma.c: fix memory leakage
dw_dmac: apply default dma_mask if needed
dmaengine: ioat - fix spare sparse complain
dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c
ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING
dw_dmac: add support for Lynxpoint DMA controllers
dw_dmac: return proper residue value
dw_dmac: fill individual length of descriptor
...
Diffstat (limited to 'drivers/dma/tegra20-apb-dma.c')
-rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 55 |
1 files changed, 49 insertions, 6 deletions
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index f6c018f1b453..fcee27eae1f6 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -63,6 +63,9 @@ | |||
63 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 | 63 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 |
64 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC | 64 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC |
65 | 65 | ||
66 | #define TEGRA_APBDMA_CHAN_CSRE 0x00C | ||
67 | #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31) | ||
68 | |||
66 | /* AHB memory address */ | 69 | /* AHB memory address */ |
67 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 | 70 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 |
68 | 71 | ||
@@ -113,10 +116,12 @@ struct tegra_dma; | |||
113 | * tegra_dma_chip_data Tegra chip specific DMA data | 116 | * tegra_dma_chip_data Tegra chip specific DMA data |
114 | * @nr_channels: Number of channels available in the controller. | 117 | * @nr_channels: Number of channels available in the controller. |
115 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. | 118 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. |
119 | * @support_channel_pause: Support channel wise pause of dma. | ||
116 | */ | 120 | */ |
117 | struct tegra_dma_chip_data { | 121 | struct tegra_dma_chip_data { |
118 | int nr_channels; | 122 | int nr_channels; |
119 | int max_dma_count; | 123 | int max_dma_count; |
124 | bool support_channel_pause; | ||
120 | }; | 125 | }; |
121 | 126 | ||
122 | /* DMA channel registers */ | 127 | /* DMA channel registers */ |
@@ -355,6 +360,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) | |||
355 | spin_unlock(&tdma->global_lock); | 360 | spin_unlock(&tdma->global_lock); |
356 | } | 361 | } |
357 | 362 | ||
363 | static void tegra_dma_pause(struct tegra_dma_channel *tdc, | ||
364 | bool wait_for_burst_complete) | ||
365 | { | ||
366 | struct tegra_dma *tdma = tdc->tdma; | ||
367 | |||
368 | if (tdma->chip_data->support_channel_pause) { | ||
369 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, | ||
370 | TEGRA_APBDMA_CHAN_CSRE_PAUSE); | ||
371 | if (wait_for_burst_complete) | ||
372 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
373 | } else { | ||
374 | tegra_dma_global_pause(tdc, wait_for_burst_complete); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | static void tegra_dma_resume(struct tegra_dma_channel *tdc) | ||
379 | { | ||
380 | struct tegra_dma *tdma = tdc->tdma; | ||
381 | |||
382 | if (tdma->chip_data->support_channel_pause) { | ||
383 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0); | ||
384 | } else { | ||
385 | tegra_dma_global_resume(tdc); | ||
386 | } | ||
387 | } | ||
388 | |||
358 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) | 389 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) |
359 | { | 390 | { |
360 | u32 csr; | 391 | u32 csr; |
@@ -410,7 +441,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | |||
410 | * If there is already IEC status then interrupt handler need to | 441 | * If there is already IEC status then interrupt handler need to |
411 | * load new configuration. | 442 | * load new configuration. |
412 | */ | 443 | */ |
413 | tegra_dma_global_pause(tdc, false); | 444 | tegra_dma_pause(tdc, false); |
414 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | 445 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
415 | 446 | ||
416 | /* | 447 | /* |
@@ -420,7 +451,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | |||
420 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | 451 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
421 | dev_err(tdc2dev(tdc), | 452 | dev_err(tdc2dev(tdc), |
422 | "Skipping new configuration as interrupt is pending\n"); | 453 | "Skipping new configuration as interrupt is pending\n"); |
423 | tegra_dma_global_resume(tdc); | 454 | tegra_dma_resume(tdc); |
424 | return; | 455 | return; |
425 | } | 456 | } |
426 | 457 | ||
@@ -431,7 +462,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | |||
431 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); | 462 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); |
432 | nsg_req->configured = true; | 463 | nsg_req->configured = true; |
433 | 464 | ||
434 | tegra_dma_global_resume(tdc); | 465 | tegra_dma_resume(tdc); |
435 | } | 466 | } |
436 | 467 | ||
437 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) | 468 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) |
@@ -692,7 +723,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) | |||
692 | goto skip_dma_stop; | 723 | goto skip_dma_stop; |
693 | 724 | ||
694 | /* Pause DMA before checking the queue status */ | 725 | /* Pause DMA before checking the queue status */ |
695 | tegra_dma_global_pause(tdc, true); | 726 | tegra_dma_pause(tdc, true); |
696 | 727 | ||
697 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | 728 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
698 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | 729 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
@@ -710,7 +741,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) | |||
710 | sgreq->dma_desc->bytes_transferred += | 741 | sgreq->dma_desc->bytes_transferred += |
711 | get_current_xferred_count(tdc, sgreq, status); | 742 | get_current_xferred_count(tdc, sgreq, status); |
712 | } | 743 | } |
713 | tegra_dma_global_resume(tdc); | 744 | tegra_dma_resume(tdc); |
714 | 745 | ||
715 | skip_dma_stop: | 746 | skip_dma_stop: |
716 | tegra_dma_abort_all(tdc); | 747 | tegra_dma_abort_all(tdc); |
@@ -738,7 +769,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | |||
738 | 769 | ||
739 | ret = dma_cookie_status(dc, cookie, txstate); | 770 | ret = dma_cookie_status(dc, cookie, txstate); |
740 | if (ret == DMA_SUCCESS) { | 771 | if (ret == DMA_SUCCESS) { |
741 | dma_set_residue(txstate, 0); | ||
742 | spin_unlock_irqrestore(&tdc->lock, flags); | 772 | spin_unlock_irqrestore(&tdc->lock, flags); |
743 | return ret; | 773 | return ret; |
744 | } | 774 | } |
@@ -1180,6 +1210,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) | |||
1180 | static const struct tegra_dma_chip_data tegra20_dma_chip_data = { | 1210 | static const struct tegra_dma_chip_data tegra20_dma_chip_data = { |
1181 | .nr_channels = 16, | 1211 | .nr_channels = 16, |
1182 | .max_dma_count = 1024UL * 64, | 1212 | .max_dma_count = 1024UL * 64, |
1213 | .support_channel_pause = false, | ||
1183 | }; | 1214 | }; |
1184 | 1215 | ||
1185 | #if defined(CONFIG_OF) | 1216 | #if defined(CONFIG_OF) |
@@ -1187,10 +1218,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { | |||
1187 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { | 1218 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { |
1188 | .nr_channels = 32, | 1219 | .nr_channels = 32, |
1189 | .max_dma_count = 1024UL * 64, | 1220 | .max_dma_count = 1024UL * 64, |
1221 | .support_channel_pause = false, | ||
1190 | }; | 1222 | }; |
1191 | 1223 | ||
1224 | /* Tegra114 specific DMA controller information */ | ||
1225 | static const struct tegra_dma_chip_data tegra114_dma_chip_data = { | ||
1226 | .nr_channels = 32, | ||
1227 | .max_dma_count = 1024UL * 64, | ||
1228 | .support_channel_pause = true, | ||
1229 | }; | ||
1230 | |||
1231 | |||
1192 | static const struct of_device_id tegra_dma_of_match[] = { | 1232 | static const struct of_device_id tegra_dma_of_match[] = { |
1193 | { | 1233 | { |
1234 | .compatible = "nvidia,tegra114-apbdma", | ||
1235 | .data = &tegra114_dma_chip_data, | ||
1236 | }, { | ||
1194 | .compatible = "nvidia,tegra30-apbdma", | 1237 | .compatible = "nvidia,tegra30-apbdma", |
1195 | .data = &tegra30_dma_chip_data, | 1238 | .data = &tegra30_dma_chip_data, |
1196 | }, { | 1239 | }, { |