diff options
30 files changed, 2541 insertions, 1011 deletions
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt index 5a0cb1ef6164..94b7e0f96b38 100644 --- a/Documentation/dmaengine.txt +++ b/Documentation/dmaengine.txt | |||
| @@ -10,87 +10,181 @@ NOTE: For DMA Engine usage in async_tx please see: | |||
| 10 | Below is a guide to device driver writers on how to use the Slave-DMA API of the | 10 | Below is a guide to device driver writers on how to use the Slave-DMA API of the |
| 11 | DMA Engine. This is applicable only for slave DMA usage only. | 11 | DMA Engine. This is applicable only for slave DMA usage only. |
| 12 | 12 | ||
| 13 | The slave DMA usage consists of following steps | 13 | The slave DMA usage consists of following steps: |
| 14 | 1. Allocate a DMA slave channel | 14 | 1. Allocate a DMA slave channel |
| 15 | 2. Set slave and controller specific parameters | 15 | 2. Set slave and controller specific parameters |
| 16 | 3. Get a descriptor for transaction | 16 | 3. Get a descriptor for transaction |
| 17 | 4. Submit the transaction and wait for callback notification | 17 | 4. Submit the transaction |
| 18 | 5. Issue pending requests and wait for callback notification | ||
| 18 | 19 | ||
| 19 | 1. Allocate a DMA slave channel | 20 | 1. Allocate a DMA slave channel |
| 20 | Channel allocation is slightly different in the slave DMA context, client | 21 | |
| 21 | drivers typically need a channel from a particular DMA controller only and even | 22 | Channel allocation is slightly different in the slave DMA context, |
| 22 | in some cases a specific channel is desired. To request a channel | 23 | client drivers typically need a channel from a particular DMA |
| 23 | dma_request_channel() API is used. | 24 | controller only and even in some cases a specific channel is desired. |
| 24 | 25 | To request a channel dma_request_channel() API is used. | |
| 25 | Interface: | 26 | |
| 26 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, | 27 | Interface: |
| 27 | dma_filter_fn filter_fn, | 28 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, |
| 28 | void *filter_param); | 29 | dma_filter_fn filter_fn, |
| 29 | where dma_filter_fn is defined as: | 30 | void *filter_param); |
| 30 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 31 | where dma_filter_fn is defined as: |
| 31 | 32 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | |
| 32 | When the optional 'filter_fn' parameter is set to NULL dma_request_channel | 33 | |
| 33 | simply returns the first channel that satisfies the capability mask. Otherwise, | 34 | The 'filter_fn' parameter is optional, but highly recommended for |
| 34 | when the mask parameter is insufficient for specifying the necessary channel, | 35 | slave and cyclic channels as they typically need to obtain a specific |
| 35 | the filter_fn routine can be used to disposition the available channels in the | 36 | DMA channel. |
| 36 | system. The filter_fn routine is called once for each free channel in the | 37 | |
| 37 | system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags | 38 | When the optional 'filter_fn' parameter is NULL, dma_request_channel() |
| 38 | that channel to be the return value from dma_request_channel. A channel | 39 | simply returns the first channel that satisfies the capability mask. |
| 39 | allocated via this interface is exclusive to the caller, until | 40 | |
| 40 | dma_release_channel() is called. | 41 | Otherwise, the 'filter_fn' routine will be called once for each free |
| 42 | channel which has a capability in 'mask'. 'filter_fn' is expected to | ||
| 43 | return 'true' when the desired DMA channel is found. | ||
| 44 | |||
| 45 | A channel allocated via this interface is exclusive to the caller, | ||
| 46 | until dma_release_channel() is called. | ||
| 41 | 47 | ||
| 42 | 2. Set slave and controller specific parameters | 48 | 2. Set slave and controller specific parameters |
| 43 | Next step is always to pass some specific information to the DMA driver. Most of | 49 | |
| 44 | the generic information which a slave DMA can use is in struct dma_slave_config. | 50 | Next step is always to pass some specific information to the DMA |
| 45 | It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA | 51 | driver. Most of the generic information which a slave DMA can use |
| 46 | burst lengths etc. If some DMA controllers have more parameters to be sent then | 52 | is in struct dma_slave_config. This allows the clients to specify |
| 47 | they should try to embed struct dma_slave_config in their controller specific | 53 | DMA direction, DMA addresses, bus widths, DMA burst lengths etc |
| 48 | structure. That gives flexibility to client to pass more parameters, if | 54 | for the peripheral. |
| 49 | required. | 55 | |
| 50 | 56 | If some DMA controllers have more parameters to be sent then they | |
| 51 | Interface: | 57 | should try to embed struct dma_slave_config in their controller |
| 52 | int dmaengine_slave_config(struct dma_chan *chan, | 58 | specific structure. That gives flexibility to client to pass more |
| 53 | struct dma_slave_config *config) | 59 | parameters, if required. |
| 60 | |||
| 61 | Interface: | ||
| 62 | int dmaengine_slave_config(struct dma_chan *chan, | ||
| 63 | struct dma_slave_config *config) | ||
| 64 | |||
| 65 | Please see the dma_slave_config structure definition in dmaengine.h | ||
| 66 | for a detailed explaination of the struct members. Please note | ||
| 67 | that the 'direction' member will be going away as it duplicates the | ||
| 68 | direction given in the prepare call. | ||
| 54 | 69 | ||
| 55 | 3. Get a descriptor for transaction | 70 | 3. Get a descriptor for transaction |
| 56 | For slave usage the various modes of slave transfers supported by the | 71 | |
| 57 | DMA-engine are: | 72 | For slave usage the various modes of slave transfers supported by the |
| 58 | slave_sg - DMA a list of scatter gather buffers from/to a peripheral | 73 | DMA-engine are: |
| 59 | dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the | 74 | |
| 75 | slave_sg - DMA a list of scatter gather buffers from/to a peripheral | ||
| 76 | dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the | ||
| 60 | operation is explicitly stopped. | 77 | operation is explicitly stopped. |
| 61 | The non NULL return of this transfer API represents a "descriptor" for the given | 78 | |
| 62 | transaction. | 79 | A non-NULL return of this transfer API represents a "descriptor" for |
| 63 | 80 | the given transaction. | |
| 64 | Interface: | 81 | |
| 65 | struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)( | 82 | Interface: |
| 66 | struct dma_chan *chan, | 83 | struct dma_async_tx_descriptor *(*chan->device->device_prep_slave_sg)( |
| 67 | struct scatterlist *dst_sg, unsigned int dst_nents, | 84 | struct dma_chan *chan, struct scatterlist *sgl, |
| 68 | struct scatterlist *src_sg, unsigned int src_nents, | 85 | unsigned int sg_len, enum dma_data_direction direction, |
| 69 | unsigned long flags); | 86 | unsigned long flags); |
| 70 | struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)( | 87 | |
| 88 | struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)( | ||
| 71 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 89 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
| 72 | size_t period_len, enum dma_data_direction direction); | 90 | size_t period_len, enum dma_data_direction direction); |
| 73 | 91 | ||
| 74 | 4. Submit the transaction and wait for callback notification | 92 | The peripheral driver is expected to have mapped the scatterlist for |
| 75 | To schedule the transaction to be scheduled by dma device, the "descriptor" | 93 | the DMA operation prior to calling device_prep_slave_sg, and must |
| 76 | returned in above (3) needs to be submitted. | 94 | keep the scatterlist mapped until the DMA operation has completed. |
| 77 | To tell the dma driver that a transaction is ready to be serviced, the | 95 | The scatterlist must be mapped using the DMA struct device. So, |
| 78 | descriptor->submit() callback needs to be invoked. This chains the descriptor to | 96 | normal setup should look like this: |
| 79 | the pending queue. | 97 | |
| 80 | The transactions in the pending queue can be activated by calling the | 98 | nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len); |
| 81 | issue_pending API. If channel is idle then the first transaction in queue is | 99 | if (nr_sg == 0) |
| 82 | started and subsequent ones queued up. | 100 | /* error */ |
| 83 | On completion of the DMA operation the next in queue is submitted and a tasklet | 101 | |
| 84 | triggered. The tasklet would then call the client driver completion callback | 102 | desc = chan->device->device_prep_slave_sg(chan, sgl, nr_sg, |
| 85 | routine for notification, if set. | 103 | direction, flags); |
| 86 | Interface: | 104 | |
| 87 | void dma_async_issue_pending(struct dma_chan *chan); | 105 | Once a descriptor has been obtained, the callback information can be |
| 88 | 106 | added and the descriptor must then be submitted. Some DMA engine | |
| 89 | ============================================================================== | 107 | drivers may hold a spinlock between a successful preparation and |
| 90 | 108 | submission so it is important that these two operations are closely | |
| 91 | Additional usage notes for dma driver writers | 109 | paired. |
| 92 | 1/ Although DMA engine specifies that completion callback routines cannot submit | 110 | |
| 93 | any new operations, but typically for slave DMA subsequent transaction may not | 111 | Note: |
| 94 | be available for submit prior to callback routine being called. This requirement | 112 | Although the async_tx API specifies that completion callback |
| 95 | is not a requirement for DMA-slave devices. But they should take care to drop | 113 | routines cannot submit any new operations, this is not the |
| 96 | the spin-lock they might be holding before calling the callback routine | 114 | case for slave/cyclic DMA. |
| 115 | |||
| 116 | For slave DMA, the subsequent transaction may not be available | ||
| 117 | for submission prior to callback function being invoked, so | ||
| 118 | slave DMA callbacks are permitted to prepare and submit a new | ||
| 119 | transaction. | ||
| 120 | |||
| 121 | For cyclic DMA, a callback function may wish to terminate the | ||
| 122 | DMA via dmaengine_terminate_all(). | ||
| 123 | |||
| 124 | Therefore, it is important that DMA engine drivers drop any | ||
| 125 | locks before calling the callback function which may cause a | ||
| 126 | deadlock. | ||
| 127 | |||
| 128 | Note that callbacks will always be invoked from the DMA | ||
| 129 | engines tasklet, never from interrupt context. | ||
| 130 | |||
| 131 | 4. Submit the transaction | ||
| 132 | |||
| 133 | Once the descriptor has been prepared and the callback information | ||
| 134 | added, it must be placed on the DMA engine drivers pending queue. | ||
| 135 | |||
| 136 | Interface: | ||
| 137 | dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) | ||
| 138 | |||
| 139 | This returns a cookie can be used to check the progress of DMA engine | ||
| 140 | activity via other DMA engine calls not covered in this document. | ||
| 141 | |||
| 142 | dmaengine_submit() will not start the DMA operation, it merely adds | ||
| 143 | it to the pending queue. For this, see step 5, dma_async_issue_pending. | ||
| 144 | |||
| 145 | 5. Issue pending DMA requests and wait for callback notification | ||
| 146 | |||
| 147 | The transactions in the pending queue can be activated by calling the | ||
| 148 | issue_pending API. If channel is idle then the first transaction in | ||
| 149 | queue is started and subsequent ones queued up. | ||
| 150 | |||
| 151 | On completion of each DMA operation, the next in queue is started and | ||
| 152 | a tasklet triggered. The tasklet will then call the client driver | ||
| 153 | completion callback routine for notification, if set. | ||
| 154 | |||
| 155 | Interface: | ||
| 156 | void dma_async_issue_pending(struct dma_chan *chan); | ||
| 157 | |||
| 158 | Further APIs: | ||
| 159 | |||
| 160 | 1. int dmaengine_terminate_all(struct dma_chan *chan) | ||
| 161 | |||
| 162 | This causes all activity for the DMA channel to be stopped, and may | ||
| 163 | discard data in the DMA FIFO which hasn't been fully transferred. | ||
| 164 | No callback functions will be called for any incomplete transfers. | ||
| 165 | |||
| 166 | 2. int dmaengine_pause(struct dma_chan *chan) | ||
| 167 | |||
| 168 | This pauses activity on the DMA channel without data loss. | ||
| 169 | |||
| 170 | 3. int dmaengine_resume(struct dma_chan *chan) | ||
| 171 | |||
| 172 | Resume a previously paused DMA channel. It is invalid to resume a | ||
| 173 | channel which is not currently paused. | ||
| 174 | |||
| 175 | 4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | ||
| 176 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | ||
| 177 | |||
| 178 | This can be used to check the status of the channel. Please see | ||
| 179 | the documentation in include/linux/dmaengine.h for a more complete | ||
| 180 | description of this API. | ||
| 181 | |||
| 182 | This can be used in conjunction with dma_async_is_complete() and | ||
| 183 | the cookie returned from 'descriptor->submit()' to check for | ||
| 184 | completion of a specific DMA transaction. | ||
| 185 | |||
| 186 | Note: | ||
| 187 | Not all DMA engine drivers can return reliable information for | ||
| 188 | a running DMA channel. It is recommended that DMA engine users | ||
| 189 | pause or stop (via dmaengine_terminate_all) the channel before | ||
| 190 | using this API. | ||
diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi index 6325f5b48635..d8eb01c15db1 100644 --- a/Documentation/spi/ep93xx_spi +++ b/Documentation/spi/ep93xx_spi | |||
| @@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void) | |||
| 88 | ARRAY_SIZE(ts72xx_spi_devices)); | 88 | ARRAY_SIZE(ts72xx_spi_devices)); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | The driver can use DMA for the transfers also. In this case ts72xx_spi_info | ||
| 92 | becomes: | ||
| 93 | |||
| 94 | static struct ep93xx_spi_info ts72xx_spi_info = { | ||
| 95 | .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices), | ||
| 96 | .use_dma = true; | ||
| 97 | }; | ||
| 98 | |||
| 99 | Note that CONFIG_EP93XX_DMA should be enabled as well. | ||
| 100 | |||
| 91 | Thanks to | 101 | Thanks to |
| 92 | ========= | 102 | ========= |
| 93 | Martin Guy, H. Hartley Sweeten and others who helped me during development of | 103 | Martin Guy, H. Hartley Sweeten and others who helped me during development of |
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 33ee2c863d18..21e721ab7378 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile | |||
| @@ -1,11 +1,13 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
| 3 | # | 3 | # |
| 4 | obj-y := core.o clock.o dma-m2p.o gpio.o | 4 | obj-y := core.o clock.o gpio.o |
| 5 | obj-m := | 5 | obj-m := |
| 6 | obj-n := | 6 | obj-n := |
| 7 | obj- := | 7 | obj- := |
| 8 | 8 | ||
| 9 | obj-$(CONFIG_EP93XX_DMA) += dma.o | ||
| 10 | |||
| 9 | obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o | 11 | obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o |
| 10 | obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o | 12 | obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o |
| 11 | obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o | 13 | obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o |
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 6659a0d137a3..dd87a8272237 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
| @@ -492,11 +492,15 @@ static struct resource ep93xx_spi_resources[] = { | |||
| 492 | }, | 492 | }, |
| 493 | }; | 493 | }; |
| 494 | 494 | ||
| 495 | static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); | ||
| 496 | |||
| 495 | static struct platform_device ep93xx_spi_device = { | 497 | static struct platform_device ep93xx_spi_device = { |
| 496 | .name = "ep93xx-spi", | 498 | .name = "ep93xx-spi", |
| 497 | .id = 0, | 499 | .id = 0, |
| 498 | .dev = { | 500 | .dev = { |
| 499 | .platform_data = &ep93xx_spi_master_data, | 501 | .platform_data = &ep93xx_spi_master_data, |
| 502 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
| 503 | .dma_mask = &ep93xx_spi_dma_mask, | ||
| 500 | }, | 504 | }, |
| 501 | .num_resources = ARRAY_SIZE(ep93xx_spi_resources), | 505 | .num_resources = ARRAY_SIZE(ep93xx_spi_resources), |
| 502 | .resource = ep93xx_spi_resources, | 506 | .resource = ep93xx_spi_resources, |
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c deleted file mode 100644 index a696d354b1f8..000000000000 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ /dev/null | |||
| @@ -1,411 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/mach-ep93xx/dma-m2p.c | ||
| 3 | * M2P DMA handling for Cirrus EP93xx chips. | ||
| 4 | * | ||
| 5 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | ||
| 6 | * Copyright (C) 2006 Applied Data Systems | ||
| 7 | * | ||
| 8 | * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or (at | ||
| 13 | * your option) any later version. | ||
| 14 | */ | ||
| 15 | |||
| 16 | /* | ||
| 17 | * On the EP93xx chip the following peripherals my be allocated to the 10 | ||
| 18 | * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). | ||
| 19 | * | ||
| 20 | * I2S contains 3 Tx and 3 Rx DMA Channels | ||
| 21 | * AAC contains 3 Tx and 3 Rx DMA Channels | ||
| 22 | * UART1 contains 1 Tx and 1 Rx DMA Channels | ||
| 23 | * UART2 contains 1 Tx and 1 Rx DMA Channels | ||
| 24 | * UART3 contains 1 Tx and 1 Rx DMA Channels | ||
| 25 | * IrDA contains 1 Tx and 1 Rx DMA Channels | ||
| 26 | * | ||
| 27 | * SSP and IDE use the Memory to Memory (M2M) channels and are not covered | ||
| 28 | * with this implementation. | ||
| 29 | */ | ||
| 30 | |||
| 31 | #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt | ||
| 32 | |||
| 33 | #include <linux/kernel.h> | ||
| 34 | #include <linux/clk.h> | ||
| 35 | #include <linux/err.h> | ||
| 36 | #include <linux/interrupt.h> | ||
| 37 | #include <linux/module.h> | ||
| 38 | #include <linux/io.h> | ||
| 39 | |||
| 40 | #include <mach/dma.h> | ||
| 41 | #include <mach/hardware.h> | ||
| 42 | |||
| 43 | #define M2P_CONTROL 0x00 | ||
| 44 | #define M2P_CONTROL_STALL_IRQ_EN (1 << 0) | ||
| 45 | #define M2P_CONTROL_NFB_IRQ_EN (1 << 1) | ||
| 46 | #define M2P_CONTROL_ERROR_IRQ_EN (1 << 3) | ||
| 47 | #define M2P_CONTROL_ENABLE (1 << 4) | ||
| 48 | #define M2P_INTERRUPT 0x04 | ||
| 49 | #define M2P_INTERRUPT_STALL (1 << 0) | ||
| 50 | #define M2P_INTERRUPT_NFB (1 << 1) | ||
| 51 | #define M2P_INTERRUPT_ERROR (1 << 3) | ||
| 52 | #define M2P_PPALLOC 0x08 | ||
| 53 | #define M2P_STATUS 0x0c | ||
| 54 | #define M2P_REMAIN 0x14 | ||
| 55 | #define M2P_MAXCNT0 0x20 | ||
| 56 | #define M2P_BASE0 0x24 | ||
| 57 | #define M2P_MAXCNT1 0x30 | ||
| 58 | #define M2P_BASE1 0x34 | ||
| 59 | |||
| 60 | #define STATE_IDLE 0 /* Channel is inactive. */ | ||
| 61 | #define STATE_STALL 1 /* Channel is active, no buffers pending. */ | ||
| 62 | #define STATE_ON 2 /* Channel is active, one buffer pending. */ | ||
| 63 | #define STATE_NEXT 3 /* Channel is active, two buffers pending. */ | ||
| 64 | |||
| 65 | struct m2p_channel { | ||
| 66 | char *name; | ||
| 67 | void __iomem *base; | ||
| 68 | int irq; | ||
| 69 | |||
| 70 | struct clk *clk; | ||
| 71 | spinlock_t lock; | ||
| 72 | |||
| 73 | void *client; | ||
| 74 | unsigned next_slot:1; | ||
| 75 | struct ep93xx_dma_buffer *buffer_xfer; | ||
| 76 | struct ep93xx_dma_buffer *buffer_next; | ||
| 77 | struct list_head buffers_pending; | ||
| 78 | }; | ||
| 79 | |||
| 80 | static struct m2p_channel m2p_rx[] = { | ||
| 81 | {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1}, | ||
| 82 | {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3}, | ||
| 83 | {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5}, | ||
| 84 | {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7}, | ||
| 85 | {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9}, | ||
| 86 | {NULL}, | ||
| 87 | }; | ||
| 88 | |||
| 89 | static struct m2p_channel m2p_tx[] = { | ||
| 90 | {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0}, | ||
| 91 | {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2}, | ||
| 92 | {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4}, | ||
| 93 | {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6}, | ||
| 94 | {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8}, | ||
| 95 | {NULL}, | ||
| 96 | }; | ||
| 97 | |||
| 98 | static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf) | ||
| 99 | { | ||
| 100 | if (ch->next_slot == 0) { | ||
| 101 | writel(buf->size, ch->base + M2P_MAXCNT0); | ||
| 102 | writel(buf->bus_addr, ch->base + M2P_BASE0); | ||
| 103 | } else { | ||
| 104 | writel(buf->size, ch->base + M2P_MAXCNT1); | ||
| 105 | writel(buf->bus_addr, ch->base + M2P_BASE1); | ||
| 106 | } | ||
| 107 | ch->next_slot ^= 1; | ||
| 108 | } | ||
| 109 | |||
| 110 | static void choose_buffer_xfer(struct m2p_channel *ch) | ||
| 111 | { | ||
| 112 | struct ep93xx_dma_buffer *buf; | ||
| 113 | |||
| 114 | ch->buffer_xfer = NULL; | ||
| 115 | if (!list_empty(&ch->buffers_pending)) { | ||
| 116 | buf = list_entry(ch->buffers_pending.next, | ||
| 117 | struct ep93xx_dma_buffer, list); | ||
| 118 | list_del(&buf->list); | ||
| 119 | feed_buf(ch, buf); | ||
| 120 | ch->buffer_xfer = buf; | ||
| 121 | } | ||
| 122 | } | ||
| 123 | |||
| 124 | static void choose_buffer_next(struct m2p_channel *ch) | ||
| 125 | { | ||
| 126 | struct ep93xx_dma_buffer *buf; | ||
| 127 | |||
| 128 | ch->buffer_next = NULL; | ||
| 129 | if (!list_empty(&ch->buffers_pending)) { | ||
| 130 | buf = list_entry(ch->buffers_pending.next, | ||
| 131 | struct ep93xx_dma_buffer, list); | ||
| 132 | list_del(&buf->list); | ||
| 133 | feed_buf(ch, buf); | ||
| 134 | ch->buffer_next = buf; | ||
| 135 | } | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline void m2p_set_control(struct m2p_channel *ch, u32 v) | ||
| 139 | { | ||
| 140 | /* | ||
| 141 | * The control register must be read immediately after being written so | ||
| 142 | * that the internal state machine is correctly updated. See the ep93xx | ||
| 143 | * users' guide for details. | ||
| 144 | */ | ||
| 145 | writel(v, ch->base + M2P_CONTROL); | ||
| 146 | readl(ch->base + M2P_CONTROL); | ||
| 147 | } | ||
| 148 | |||
| 149 | static inline int m2p_channel_state(struct m2p_channel *ch) | ||
| 150 | { | ||
| 151 | return (readl(ch->base + M2P_STATUS) >> 4) & 0x3; | ||
| 152 | } | ||
| 153 | |||
| 154 | static irqreturn_t m2p_irq(int irq, void *dev_id) | ||
| 155 | { | ||
| 156 | struct m2p_channel *ch = dev_id; | ||
| 157 | struct ep93xx_dma_m2p_client *cl; | ||
| 158 | u32 irq_status, v; | ||
| 159 | int error = 0; | ||
| 160 | |||
| 161 | cl = ch->client; | ||
| 162 | |||
| 163 | spin_lock(&ch->lock); | ||
| 164 | irq_status = readl(ch->base + M2P_INTERRUPT); | ||
| 165 | |||
| 166 | if (irq_status & M2P_INTERRUPT_ERROR) { | ||
| 167 | writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT); | ||
| 168 | error = 1; | ||
| 169 | } | ||
| 170 | |||
| 171 | if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) { | ||
| 172 | spin_unlock(&ch->lock); | ||
| 173 | return IRQ_NONE; | ||
| 174 | } | ||
| 175 | |||
| 176 | switch (m2p_channel_state(ch)) { | ||
| 177 | case STATE_IDLE: | ||
| 178 | pr_crit("dma interrupt without a dma buffer\n"); | ||
| 179 | BUG(); | ||
| 180 | break; | ||
| 181 | |||
| 182 | case STATE_STALL: | ||
| 183 | cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); | ||
| 184 | if (ch->buffer_next != NULL) { | ||
| 185 | cl->buffer_finished(cl->cookie, ch->buffer_next, | ||
| 186 | 0, error); | ||
| 187 | } | ||
| 188 | choose_buffer_xfer(ch); | ||
| 189 | choose_buffer_next(ch); | ||
| 190 | if (ch->buffer_xfer != NULL) | ||
| 191 | cl->buffer_started(cl->cookie, ch->buffer_xfer); | ||
| 192 | break; | ||
| 193 | |||
| 194 | case STATE_ON: | ||
| 195 | cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); | ||
| 196 | ch->buffer_xfer = ch->buffer_next; | ||
| 197 | choose_buffer_next(ch); | ||
| 198 | cl->buffer_started(cl->cookie, ch->buffer_xfer); | ||
| 199 | break; | ||
| 200 | |||
| 201 | case STATE_NEXT: | ||
| 202 | pr_crit("dma interrupt while next\n"); | ||
| 203 | BUG(); | ||
| 204 | break; | ||
| 205 | } | ||
| 206 | |||
| 207 | v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN | | ||
| 208 | M2P_CONTROL_NFB_IRQ_EN); | ||
| 209 | if (ch->buffer_xfer != NULL) | ||
| 210 | v |= M2P_CONTROL_STALL_IRQ_EN; | ||
| 211 | if (ch->buffer_next != NULL) | ||
| 212 | v |= M2P_CONTROL_NFB_IRQ_EN; | ||
| 213 | m2p_set_control(ch, v); | ||
| 214 | |||
| 215 | spin_unlock(&ch->lock); | ||
| 216 | return IRQ_HANDLED; | ||
| 217 | } | ||
| 218 | |||
| 219 | static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl) | ||
| 220 | { | ||
| 221 | struct m2p_channel *ch; | ||
| 222 | int i; | ||
| 223 | |||
| 224 | if (cl->flags & EP93XX_DMA_M2P_RX) | ||
| 225 | ch = m2p_rx; | ||
| 226 | else | ||
| 227 | ch = m2p_tx; | ||
| 228 | |||
| 229 | for (i = 0; ch[i].base; i++) { | ||
| 230 | struct ep93xx_dma_m2p_client *client; | ||
| 231 | |||
| 232 | client = ch[i].client; | ||
| 233 | if (client != NULL) { | ||
| 234 | int port; | ||
| 235 | |||
| 236 | port = cl->flags & EP93XX_DMA_M2P_PORT_MASK; | ||
| 237 | if (port == (client->flags & | ||
| 238 | EP93XX_DMA_M2P_PORT_MASK)) { | ||
| 239 | pr_warning("DMA channel already used by %s\n", | ||
| 240 | cl->name ? : "unknown client"); | ||
| 241 | return ERR_PTR(-EBUSY); | ||
| 242 | } | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | for (i = 0; ch[i].base; i++) { | ||
| 247 | if (ch[i].client == NULL) | ||
| 248 | return ch + i; | ||
| 249 | } | ||
| 250 | |||
| 251 | pr_warning("No free DMA channel for %s\n", | ||
| 252 | cl->name ? : "unknown client"); | ||
| 253 | return ERR_PTR(-ENODEV); | ||
| 254 | } | ||
| 255 | |||
| 256 | static void channel_enable(struct m2p_channel *ch) | ||
| 257 | { | ||
| 258 | struct ep93xx_dma_m2p_client *cl = ch->client; | ||
| 259 | u32 v; | ||
| 260 | |||
| 261 | clk_enable(ch->clk); | ||
| 262 | |||
| 263 | v = cl->flags & EP93XX_DMA_M2P_PORT_MASK; | ||
| 264 | writel(v, ch->base + M2P_PPALLOC); | ||
| 265 | |||
| 266 | v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK; | ||
| 267 | v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN; | ||
| 268 | m2p_set_control(ch, v); | ||
| 269 | } | ||
| 270 | |||
| 271 | static void channel_disable(struct m2p_channel *ch) | ||
| 272 | { | ||
| 273 | u32 v; | ||
| 274 | |||
| 275 | v = readl(ch->base + M2P_CONTROL); | ||
| 276 | v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); | ||
| 277 | m2p_set_control(ch, v); | ||
| 278 | |||
| 279 | while (m2p_channel_state(ch) >= STATE_ON) | ||
| 280 | cpu_relax(); | ||
| 281 | |||
| 282 | m2p_set_control(ch, 0x0); | ||
| 283 | |||
| 284 | while (m2p_channel_state(ch) == STATE_STALL) | ||
| 285 | cpu_relax(); | ||
| 286 | |||
| 287 | clk_disable(ch->clk); | ||
| 288 | } | ||
| 289 | |||
| 290 | int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl) | ||
| 291 | { | ||
| 292 | struct m2p_channel *ch; | ||
| 293 | int err; | ||
| 294 | |||
| 295 | ch = find_free_channel(cl); | ||
| 296 | if (IS_ERR(ch)) | ||
| 297 | return PTR_ERR(ch); | ||
| 298 | |||
| 299 | err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch); | ||
| 300 | if (err) | ||
| 301 | return err; | ||
| 302 | |||
| 303 | ch->client = cl; | ||
| 304 | ch->next_slot = 0; | ||
| 305 | ch->buffer_xfer = NULL; | ||
| 306 | ch->buffer_next = NULL; | ||
| 307 | INIT_LIST_HEAD(&ch->buffers_pending); | ||
| 308 | |||
| 309 | cl->channel = ch; | ||
| 310 | |||
| 311 | channel_enable(ch); | ||
| 312 | |||
| 313 | return 0; | ||
| 314 | } | ||
| 315 | EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register); | ||
| 316 | |||
| 317 | void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl) | ||
| 318 | { | ||
| 319 | struct m2p_channel *ch = cl->channel; | ||
| 320 | |||
| 321 | channel_disable(ch); | ||
| 322 | free_irq(ch->irq, ch); | ||
| 323 | ch->client = NULL; | ||
| 324 | } | ||
| 325 | EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister); | ||
| 326 | |||
| 327 | void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl, | ||
| 328 | struct ep93xx_dma_buffer *buf) | ||
| 329 | { | ||
| 330 | struct m2p_channel *ch = cl->channel; | ||
| 331 | unsigned long flags; | ||
| 332 | u32 v; | ||
| 333 | |||
| 334 | spin_lock_irqsave(&ch->lock, flags); | ||
| 335 | v = readl(ch->base + M2P_CONTROL); | ||
| 336 | if (ch->buffer_xfer == NULL) { | ||
| 337 | ch->buffer_xfer = buf; | ||
| 338 | feed_buf(ch, buf); | ||
| 339 | cl->buffer_started(cl->cookie, buf); | ||
| 340 | |||
| 341 | v |= M2P_CONTROL_STALL_IRQ_EN; | ||
| 342 | m2p_set_control(ch, v); | ||
| 343 | |||
| 344 | } else if (ch->buffer_next == NULL) { | ||
| 345 | ch->buffer_next = buf; | ||
| 346 | feed_buf(ch, buf); | ||
| 347 | |||
| 348 | v |= M2P_CONTROL_NFB_IRQ_EN; | ||
| 349 | m2p_set_control(ch, v); | ||
| 350 | } else { | ||
| 351 | list_add_tail(&buf->list, &ch->buffers_pending); | ||
| 352 | } | ||
| 353 | spin_unlock_irqrestore(&ch->lock, flags); | ||
| 354 | } | ||
| 355 | EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit); | ||
| 356 | |||
| 357 | void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl, | ||
| 358 | struct ep93xx_dma_buffer *buf) | ||
| 359 | { | ||
| 360 | struct m2p_channel *ch = cl->channel; | ||
| 361 | |||
| 362 | list_add_tail(&buf->list, &ch->buffers_pending); | ||
| 363 | } | ||
| 364 | EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive); | ||
| 365 | |||
| 366 | void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl) | ||
| 367 | { | ||
| 368 | struct m2p_channel *ch = cl->channel; | ||
| 369 | |||
| 370 | channel_disable(ch); | ||
| 371 | ch->next_slot = 0; | ||
| 372 | ch->buffer_xfer = NULL; | ||
| 373 | ch->buffer_next = NULL; | ||
| 374 | INIT_LIST_HEAD(&ch->buffers_pending); | ||
| 375 | channel_enable(ch); | ||
| 376 | } | ||
| 377 | EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush); | ||
| 378 | |||
| 379 | static int init_channel(struct m2p_channel *ch) | ||
| 380 | { | ||
| 381 | ch->clk = clk_get(NULL, ch->name); | ||
| 382 | if (IS_ERR(ch->clk)) | ||
| 383 | return PTR_ERR(ch->clk); | ||
| 384 | |||
| 385 | spin_lock_init(&ch->lock); | ||
| 386 | ch->client = NULL; | ||
| 387 | |||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | static int __init ep93xx_dma_m2p_init(void) | ||
| 392 | { | ||
| 393 | int i; | ||
| 394 | int ret; | ||
| 395 | |||
| 396 | for (i = 0; m2p_rx[i].base; i++) { | ||
| 397 | ret = init_channel(m2p_rx + i); | ||
| 398 | if (ret) | ||
| 399 | return ret; | ||
| 400 | } | ||
| 401 | |||
| 402 | for (i = 0; m2p_tx[i].base; i++) { | ||
| 403 | ret = init_channel(m2p_tx + i); | ||
| 404 | if (ret) | ||
| 405 | return ret; | ||
| 406 | } | ||
| 407 | |||
| 408 | pr_info("M2P DMA subsystem initialized\n"); | ||
| 409 | return 0; | ||
| 410 | } | ||
| 411 | arch_initcall(ep93xx_dma_m2p_init); | ||
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c new file mode 100644 index 000000000000..5a2570881255 --- /dev/null +++ b/arch/arm/mach-ep93xx/dma.c | |||
| @@ -0,0 +1,108 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/mach-ep93xx/dma.c | ||
| 3 | * | ||
| 4 | * Platform support code for the EP93xx dmaengine driver. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2011 Mika Westerberg | ||
| 7 | * | ||
| 8 | * This work is based on the original dma-m2p implementation with | ||
| 9 | * following copyrights: | ||
| 10 | * | ||
| 11 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | ||
| 12 | * Copyright (C) 2006 Applied Data Systems | ||
| 13 | * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> | ||
| 14 | * | ||
| 15 | * This program is free software; you can redistribute it and/or modify | ||
| 16 | * it under the terms of the GNU General Public License as published by | ||
| 17 | * the Free Software Foundation; either version 2 of the License, or (at | ||
| 18 | * your option) any later version. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/dmaengine.h> | ||
| 22 | #include <linux/dma-mapping.h> | ||
| 23 | #include <linux/init.h> | ||
| 24 | #include <linux/interrupt.h> | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/platform_device.h> | ||
| 27 | |||
| 28 | #include <mach/dma.h> | ||
| 29 | #include <mach/hardware.h> | ||
| 30 | |||
| 31 | #define DMA_CHANNEL(_name, _base, _irq) \ | ||
| 32 | { .name = (_name), .base = (_base), .irq = (_irq) } | ||
| 33 | |||
| 34 | /* | ||
| 35 | * DMA M2P channels. | ||
| 36 | * | ||
| 37 | * On the EP93xx chip the following peripherals my be allocated to the 10 | ||
| 38 | * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). | ||
| 39 | * | ||
| 40 | * I2S contains 3 Tx and 3 Rx DMA Channels | ||
| 41 | * AAC contains 3 Tx and 3 Rx DMA Channels | ||
| 42 | * UART1 contains 1 Tx and 1 Rx DMA Channels | ||
| 43 | * UART2 contains 1 Tx and 1 Rx DMA Channels | ||
| 44 | * UART3 contains 1 Tx and 1 Rx DMA Channels | ||
| 45 | * IrDA contains 1 Tx and 1 Rx DMA Channels | ||
| 46 | * | ||
| 47 | * Registers are mapped statically in ep93xx_map_io(). | ||
| 48 | */ | ||
| 49 | static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { | ||
| 50 | DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), | ||
| 51 | DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), | ||
| 52 | DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), | ||
| 53 | DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), | ||
| 54 | DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), | ||
| 55 | DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), | ||
| 56 | DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), | ||
| 57 | DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), | ||
| 58 | DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), | ||
| 59 | DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), | ||
| 60 | }; | ||
| 61 | |||
| 62 | static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { | ||
| 63 | .channels = ep93xx_dma_m2p_channels, | ||
| 64 | .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), | ||
| 65 | }; | ||
| 66 | |||
| 67 | static struct platform_device ep93xx_dma_m2p_device = { | ||
| 68 | .name = "ep93xx-dma-m2p", | ||
| 69 | .id = -1, | ||
| 70 | .dev = { | ||
| 71 | .platform_data = &ep93xx_dma_m2p_data, | ||
| 72 | }, | ||
| 73 | }; | ||
| 74 | |||
| 75 | /* | ||
| 76 | * DMA M2M channels. | ||
| 77 | * | ||
| 78 | * There are 2 M2M channels which support memcpy/memset and in addition simple | ||
| 79 | * hardware requests from/to SSP and IDE. We do not implement an external | ||
| 80 | * hardware requests. | ||
| 81 | * | ||
| 82 | * Registers are mapped statically in ep93xx_map_io(). | ||
| 83 | */ | ||
| 84 | static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { | ||
| 85 | DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), | ||
| 86 | DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), | ||
| 87 | }; | ||
| 88 | |||
| 89 | static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { | ||
| 90 | .channels = ep93xx_dma_m2m_channels, | ||
| 91 | .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), | ||
| 92 | }; | ||
| 93 | |||
| 94 | static struct platform_device ep93xx_dma_m2m_device = { | ||
| 95 | .name = "ep93xx-dma-m2m", | ||
| 96 | .id = -1, | ||
| 97 | .dev = { | ||
| 98 | .platform_data = &ep93xx_dma_m2m_data, | ||
| 99 | }, | ||
| 100 | }; | ||
| 101 | |||
| 102 | static int __init ep93xx_dma_init(void) | ||
| 103 | { | ||
| 104 | platform_device_register(&ep93xx_dma_m2p_device); | ||
| 105 | platform_device_register(&ep93xx_dma_m2m_device); | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | arch_initcall(ep93xx_dma_init); | ||
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 5e31b2b25da9..46d4d876e6fb 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h | |||
| @@ -1,149 +1,93 @@ | |||
| 1 | /** | ||
| 2 | * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine | ||
| 3 | * | ||
| 4 | * The EP93xx DMA M2P subsystem handles DMA transfers between memory and | ||
| 5 | * peripherals. DMA M2P channels are available for audio, UARTs and IrDA. | ||
| 6 | * See chapter 10 of the EP93xx users guide for full details on the DMA M2P | ||
| 7 | * engine. | ||
| 8 | * | ||
| 9 | * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef __ASM_ARCH_DMA_H | 1 | #ifndef __ASM_ARCH_DMA_H |
| 14 | #define __ASM_ARCH_DMA_H | 2 | #define __ASM_ARCH_DMA_H |
| 15 | 3 | ||
| 16 | #include <linux/list.h> | ||
| 17 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/dmaengine.h> | ||
| 6 | #include <linux/dma-mapping.h> | ||
| 18 | 7 | ||
| 19 | /** | 8 | /* |
| 20 | * struct ep93xx_dma_buffer - Information about a buffer to be transferred | 9 | * M2P channels. |
| 21 | * using the DMA M2P engine | ||
| 22 | * | 10 | * |
| 23 | * @list: Entry in DMA buffer list | 11 | * Note that these values are also directly used for setting the PPALLOC |
| 24 | * @bus_addr: Physical address of the buffer | 12 | * register. |
| 25 | * @size: Size of the buffer in bytes | ||
| 26 | */ | 13 | */ |
| 27 | struct ep93xx_dma_buffer { | 14 | #define EP93XX_DMA_I2S1 0 |
| 28 | struct list_head list; | 15 | #define EP93XX_DMA_I2S2 1 |
| 29 | u32 bus_addr; | 16 | #define EP93XX_DMA_AAC1 2 |
| 30 | u16 size; | 17 | #define EP93XX_DMA_AAC2 3 |
| 31 | }; | 18 | #define EP93XX_DMA_AAC3 4 |
| 19 | #define EP93XX_DMA_I2S3 5 | ||
| 20 | #define EP93XX_DMA_UART1 6 | ||
| 21 | #define EP93XX_DMA_UART2 7 | ||
| 22 | #define EP93XX_DMA_UART3 8 | ||
| 23 | #define EP93XX_DMA_IRDA 9 | ||
| 24 | /* M2M channels */ | ||
| 25 | #define EP93XX_DMA_SSP 10 | ||
| 26 | #define EP93XX_DMA_IDE 11 | ||
| 32 | 27 | ||
| 33 | /** | 28 | /** |
| 34 | * struct ep93xx_dma_m2p_client - Information about a DMA M2P client | 29 | * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine |
| 35 | * | 30 | * @port: peripheral which is requesting the channel |
| 36 | * @name: Unique name for this client | 31 | * @direction: TX/RX channel |
| 37 | * @flags: Client flags | 32 | * @name: optional name for the channel, this is displayed in /proc/interrupts |
| 38 | * @cookie: User data to pass to callback functions | 33 | * |
| 39 | * @buffer_started: Non NULL function to call when a transfer is started. | 34 | * This information is passed as private channel parameter in a filter |
| 40 | * The arguments are the user data cookie and the DMA | 35 | * function. Note that this is only needed for slave/cyclic channels. For |
| 41 | * buffer which is starting. | 36 | * memcpy channels %NULL data should be passed. |
| 42 | * @buffer_finished: Non NULL function to call when a transfer is completed. | ||
| 43 | * The arguments are the user data cookie, the DMA buffer | ||
| 44 | * which has completed, and a boolean flag indicating if | ||
| 45 | * the transfer had an error. | ||
| 46 | */ | 37 | */ |
| 47 | struct ep93xx_dma_m2p_client { | 38 | struct ep93xx_dma_data { |
| 48 | char *name; | 39 | int port; |
| 49 | u8 flags; | 40 | enum dma_data_direction direction; |
| 50 | void *cookie; | 41 | const char *name; |
| 51 | void (*buffer_started)(void *cookie, | ||
| 52 | struct ep93xx_dma_buffer *buf); | ||
| 53 | void (*buffer_finished)(void *cookie, | ||
| 54 | struct ep93xx_dma_buffer *buf, | ||
| 55 | int bytes, int error); | ||
| 56 | |||
| 57 | /* private: Internal use only */ | ||
| 58 | void *channel; | ||
| 59 | }; | 42 | }; |
| 60 | 43 | ||
| 61 | /* DMA M2P ports */ | ||
| 62 | #define EP93XX_DMA_M2P_PORT_I2S1 0x00 | ||
| 63 | #define EP93XX_DMA_M2P_PORT_I2S2 0x01 | ||
| 64 | #define EP93XX_DMA_M2P_PORT_AAC1 0x02 | ||
| 65 | #define EP93XX_DMA_M2P_PORT_AAC2 0x03 | ||
| 66 | #define EP93XX_DMA_M2P_PORT_AAC3 0x04 | ||
| 67 | #define EP93XX_DMA_M2P_PORT_I2S3 0x05 | ||
| 68 | #define EP93XX_DMA_M2P_PORT_UART1 0x06 | ||
| 69 | #define EP93XX_DMA_M2P_PORT_UART2 0x07 | ||
| 70 | #define EP93XX_DMA_M2P_PORT_UART3 0x08 | ||
| 71 | #define EP93XX_DMA_M2P_PORT_IRDA 0x09 | ||
| 72 | #define EP93XX_DMA_M2P_PORT_MASK 0x0f | ||
| 73 | |||
| 74 | /* DMA M2P client flags */ | ||
| 75 | #define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */ | ||
| 76 | #define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */ | ||
| 77 | |||
| 78 | /* | ||
| 79 | * DMA M2P client error handling flags. See the EP93xx users guide | ||
| 80 | * documentation on the DMA M2P CONTROL register for more details | ||
| 81 | */ | ||
| 82 | #define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */ | ||
| 83 | #define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */ | ||
| 84 | #define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */ | ||
| 85 | |||
| 86 | /** | 44 | /** |
| 87 | * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P | 45 | * struct ep93xx_dma_chan_data - platform specific data for a DMA channel |
| 88 | * subsystem | 46 | * @name: name of the channel, used for getting the right clock for the channel |
| 89 | * | 47 | * @base: mapped registers |
| 90 | * @m2p: Client information to register | 48 | * @irq: interrupt number used by this channel |
| 91 | * returns 0 on success | ||
| 92 | * | ||
| 93 | * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA | ||
| 94 | * client | ||
| 95 | */ | 49 | */ |
| 96 | int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); | 50 | struct ep93xx_dma_chan_data { |
| 51 | const char *name; | ||
| 52 | void __iomem *base; | ||
| 53 | int irq; | ||
| 54 | }; | ||
| 97 | 55 | ||
| 98 | /** | 56 | /** |
| 99 | * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P | 57 | * struct ep93xx_dma_platform_data - platform data for the dmaengine driver |
| 100 | * subsystem | 58 | * @channels: array of channels which are passed to the driver |
| 101 | * | 59 | * @num_channels: number of channels in the array |
| 102 | * @m2p: Client to unregister | ||
| 103 | * | 60 | * |
| 104 | * Any transfers currently in progress will be completed in hardware, but | 61 | * This structure is passed to the DMA engine driver via platform data. For |
| 105 | * ignored in software. | 62 | * M2P channels, contract is that even channels are for TX and odd for RX. |
| 63 | * There is no requirement for the M2M channels. | ||
| 106 | */ | 64 | */ |
| 107 | void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); | 65 | struct ep93xx_dma_platform_data { |
| 66 | struct ep93xx_dma_chan_data *channels; | ||
| 67 | size_t num_channels; | ||
| 68 | }; | ||
| 108 | 69 | ||
| 109 | /** | 70 | static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) |
| 110 | * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer | 71 | { |
| 111 | * | 72 | return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); |
| 112 | * @m2p: DMA Client to submit the transfer on | 73 | } |
| 113 | * @buf: DMA Buffer to submit | ||
| 114 | * | ||
| 115 | * If the current or next transfer positions are free on the M2P client then | ||
| 116 | * the transfer is started immediately. If not, the transfer is added to the | ||
| 117 | * list of pending transfers. This function must not be called from the | ||
| 118 | * buffer_finished callback for an M2P channel. | ||
| 119 | * | ||
| 120 | */ | ||
| 121 | void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, | ||
| 122 | struct ep93xx_dma_buffer *buf); | ||
| 123 | 74 | ||
| 124 | /** | 75 | /** |
| 125 | * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list | 76 | * ep93xx_dma_chan_direction - returns direction the channel can be used |
| 126 | * for an M2P channel | 77 | * @chan: channel |
| 127 | * | 78 | * |
| 128 | * @m2p: DMA Client to submit the transfer on | 79 | * This function can be used in filter functions to find out whether the |
| 129 | * @buf: DMA Buffer to submit | 80 | * channel supports given DMA direction. Only M2P channels have such |
| 130 | * | 81 | * limitation, for M2M channels the direction is configurable. |
| 131 | * This function must only be called from the buffer_finished callback for an | ||
| 132 | * M2P channel. It is commonly used to add the next transfer in a chained list | ||
| 133 | * of DMA transfers. | ||
| 134 | */ | 82 | */ |
| 135 | void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, | 83 | static inline enum dma_data_direction |
| 136 | struct ep93xx_dma_buffer *buf); | 84 | ep93xx_dma_chan_direction(struct dma_chan *chan) |
| 85 | { | ||
| 86 | if (!ep93xx_dma_chan_is_m2p(chan)) | ||
| 87 | return DMA_NONE; | ||
| 137 | 88 | ||
| 138 | /** | 89 | /* even channels are for TX, odd for RX */ |
| 139 | * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client | 90 | return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
| 140 | * | 91 | } |
| 141 | * @m2p: DMA client to flush transfers on | ||
| 142 | * | ||
| 143 | * Any transfers currently in progress will be completed in hardware, but | ||
| 144 | * ignored in software. | ||
| 145 | * | ||
| 146 | */ | ||
| 147 | void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); | ||
| 148 | 92 | ||
| 149 | #endif /* __ASM_ARCH_DMA_H */ | 93 | #endif /* __ASM_ARCH_DMA_H */ |
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h index 0a37961b3453..9bb63ac13f04 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h | |||
| @@ -7,9 +7,11 @@ struct spi_device; | |||
| 7 | * struct ep93xx_spi_info - EP93xx specific SPI descriptor | 7 | * struct ep93xx_spi_info - EP93xx specific SPI descriptor |
| 8 | * @num_chipselect: number of chip selects on this board, must be | 8 | * @num_chipselect: number of chip selects on this board, must be |
| 9 | * at least one | 9 | * at least one |
| 10 | * @use_dma: use DMA for the transfers | ||
| 10 | */ | 11 | */ |
| 11 | struct ep93xx_spi_info { | 12 | struct ep93xx_spi_info { |
| 12 | int num_chipselect; | 13 | int num_chipselect; |
| 14 | bool use_dma; | ||
| 13 | }; | 15 | }; |
| 14 | 16 | ||
| 15 | /** | 17 | /** |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25cf327cd1cb..2e3b3d38c465 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -237,6 +237,13 @@ config MXS_DMA | |||
| 237 | Support the MXS DMA engine. This engine including APBH-DMA | 237 | Support the MXS DMA engine. This engine including APBH-DMA |
| 238 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | 238 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. |
| 239 | 239 | ||
| 240 | config EP93XX_DMA | ||
| 241 | bool "Cirrus Logic EP93xx DMA support" | ||
| 242 | depends on ARCH_EP93XX | ||
| 243 | select DMA_ENGINE | ||
| 244 | help | ||
| 245 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. | ||
| 246 | |||
| 240 | config DMA_ENGINE | 247 | config DMA_ENGINE |
| 241 | bool | 248 | bool |
| 242 | 249 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 836095ab3c5c..30cf3b1f0c5c 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | |||
| 25 | obj-$(CONFIG_PL330_DMA) += pl330.o | 25 | obj-$(CONFIG_PL330_DMA) += pl330.o |
| 26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
| 27 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 27 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
| 28 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | ||
diff --git a/drivers/dma/TODO b/drivers/dma/TODO index a4af8589330c..734ed0206cd5 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO | |||
| @@ -9,6 +9,5 @@ TODO for slave dma | |||
| 9 | - mxs-dma.c | 9 | - mxs-dma.c |
| 10 | - dw_dmac | 10 | - dw_dmac |
| 11 | - intel_mid_dma | 11 | - intel_mid_dma |
| 12 | - ste_dma40 | ||
| 13 | 4. Check other subsystems for dma drivers and merge/move to dmaengine | 12 | 4. Check other subsystems for dma drivers and merge/move to dmaengine |
| 14 | 5. Remove dma_slave_config's dma direction. | 13 | 5. Remove dma_slave_config's dma direction. |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e6d7228b1479..196a7378d332 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -156,14 +156,10 @@ struct pl08x_driver_data { | |||
| 156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | 156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ |
| 157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | 157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) |
| 158 | 158 | ||
| 159 | /* Minimum period between work queue runs */ | ||
| 160 | #define PL08X_WQ_PERIODMIN 20 | ||
| 161 | |||
| 162 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 159 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
| 163 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 160 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
| 164 | 161 | ||
| 165 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ | 162 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
| 166 | #define PL08X_MAX_ALLOCS 0x40 | ||
| 167 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) | 163 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) |
| 168 | #define PL08X_ALIGN 8 | 164 | #define PL08X_ALIGN 8 |
| 169 | 165 | ||
| @@ -495,10 +491,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
| 495 | 491 | ||
| 496 | struct pl08x_lli_build_data { | 492 | struct pl08x_lli_build_data { |
| 497 | struct pl08x_txd *txd; | 493 | struct pl08x_txd *txd; |
| 498 | struct pl08x_driver_data *pl08x; | ||
| 499 | struct pl08x_bus_data srcbus; | 494 | struct pl08x_bus_data srcbus; |
| 500 | struct pl08x_bus_data dstbus; | 495 | struct pl08x_bus_data dstbus; |
| 501 | size_t remainder; | 496 | size_t remainder; |
| 497 | u32 lli_bus; | ||
| 502 | }; | 498 | }; |
| 503 | 499 | ||
| 504 | /* | 500 | /* |
| @@ -551,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
| 551 | llis_va[num_llis].src = bd->srcbus.addr; | 547 | llis_va[num_llis].src = bd->srcbus.addr; |
| 552 | llis_va[num_llis].dst = bd->dstbus.addr; | 548 | llis_va[num_llis].dst = bd->dstbus.addr; |
| 553 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | 549 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); |
| 554 | if (bd->pl08x->lli_buses & PL08X_AHB2) | 550 | llis_va[num_llis].lli |= bd->lli_bus; |
| 555 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; | ||
| 556 | 551 | ||
| 557 | if (cctl & PL080_CONTROL_SRC_INCR) | 552 | if (cctl & PL080_CONTROL_SRC_INCR) |
| 558 | bd->srcbus.addr += len; | 553 | bd->srcbus.addr += len; |
| @@ -605,9 +600,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
| 605 | cctl = txd->cctl; | 600 | cctl = txd->cctl; |
| 606 | 601 | ||
| 607 | bd.txd = txd; | 602 | bd.txd = txd; |
| 608 | bd.pl08x = pl08x; | ||
| 609 | bd.srcbus.addr = txd->src_addr; | 603 | bd.srcbus.addr = txd->src_addr; |
| 610 | bd.dstbus.addr = txd->dst_addr; | 604 | bd.dstbus.addr = txd->dst_addr; |
| 605 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; | ||
| 611 | 606 | ||
| 612 | /* Find maximum width of the source bus */ | 607 | /* Find maximum width of the source bus */ |
| 613 | bd.srcbus.maxwidth = | 608 | bd.srcbus.maxwidth = |
| @@ -622,25 +617,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
| 622 | /* Set up the bus widths to the maximum */ | 617 | /* Set up the bus widths to the maximum */ |
| 623 | bd.srcbus.buswidth = bd.srcbus.maxwidth; | 618 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
| 624 | bd.dstbus.buswidth = bd.dstbus.maxwidth; | 619 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
| 625 | dev_vdbg(&pl08x->adev->dev, | ||
| 626 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | ||
| 627 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); | ||
| 628 | |||
| 629 | 620 | ||
| 630 | /* | 621 | /* |
| 631 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | 622 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
| 632 | */ | 623 | */ |
| 633 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * | 624 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
| 634 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 625 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
| 635 | dev_vdbg(&pl08x->adev->dev, | ||
| 636 | "%s max bytes per lli = %zu\n", | ||
| 637 | __func__, max_bytes_per_lli); | ||
| 638 | 626 | ||
| 639 | /* We need to count this down to zero */ | 627 | /* We need to count this down to zero */ |
| 640 | bd.remainder = txd->len; | 628 | bd.remainder = txd->len; |
| 641 | dev_vdbg(&pl08x->adev->dev, | ||
| 642 | "%s remainder = %zu\n", | ||
| 643 | __func__, bd.remainder); | ||
| 644 | 629 | ||
| 645 | /* | 630 | /* |
| 646 | * Choose bus to align to | 631 | * Choose bus to align to |
| @@ -649,6 +634,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
| 649 | */ | 634 | */ |
| 650 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | 635 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); |
| 651 | 636 | ||
| 637 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", | ||
| 638 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", | ||
| 639 | bd.srcbus.buswidth, | ||
| 640 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", | ||
| 641 | bd.dstbus.buswidth, | ||
| 642 | bd.remainder, max_bytes_per_lli); | ||
| 643 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", | ||
| 644 | mbus == &bd.srcbus ? "src" : "dst", | ||
| 645 | sbus == &bd.srcbus ? "src" : "dst"); | ||
| 646 | |||
| 652 | if (txd->len < mbus->buswidth) { | 647 | if (txd->len < mbus->buswidth) { |
| 653 | /* Less than a bus width available - send as single bytes */ | 648 | /* Less than a bus width available - send as single bytes */ |
| 654 | while (bd.remainder) { | 649 | while (bd.remainder) { |
| @@ -840,15 +835,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
| 840 | { | 835 | { |
| 841 | int i; | 836 | int i; |
| 842 | 837 | ||
| 838 | dev_vdbg(&pl08x->adev->dev, | ||
| 839 | "%-3s %-9s %-10s %-10s %-10s %s\n", | ||
| 840 | "lli", "", "csrc", "cdst", "clli", "cctl"); | ||
| 843 | for (i = 0; i < num_llis; i++) { | 841 | for (i = 0; i < num_llis; i++) { |
| 844 | dev_vdbg(&pl08x->adev->dev, | 842 | dev_vdbg(&pl08x->adev->dev, |
| 845 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", | 843 | "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 846 | i, | 844 | i, &llis_va[i], llis_va[i].src, |
| 847 | &llis_va[i], | 845 | llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl |
| 848 | llis_va[i].src, | ||
| 849 | llis_va[i].dst, | ||
| 850 | llis_va[i].cctl, | ||
| 851 | llis_va[i].lli | ||
| 852 | ); | 846 | ); |
| 853 | } | 847 | } |
| 854 | } | 848 | } |
| @@ -1054,64 +1048,105 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
| 1054 | 1048 | ||
| 1055 | /* PrimeCell DMA extension */ | 1049 | /* PrimeCell DMA extension */ |
| 1056 | struct burst_table { | 1050 | struct burst_table { |
| 1057 | int burstwords; | 1051 | u32 burstwords; |
| 1058 | u32 reg; | 1052 | u32 reg; |
| 1059 | }; | 1053 | }; |
| 1060 | 1054 | ||
| 1061 | static const struct burst_table burst_sizes[] = { | 1055 | static const struct burst_table burst_sizes[] = { |
| 1062 | { | 1056 | { |
| 1063 | .burstwords = 256, | 1057 | .burstwords = 256, |
| 1064 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1058 | .reg = PL080_BSIZE_256, |
| 1065 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1066 | }, | 1059 | }, |
| 1067 | { | 1060 | { |
| 1068 | .burstwords = 128, | 1061 | .burstwords = 128, |
| 1069 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1062 | .reg = PL080_BSIZE_128, |
| 1070 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1071 | }, | 1063 | }, |
| 1072 | { | 1064 | { |
| 1073 | .burstwords = 64, | 1065 | .burstwords = 64, |
| 1074 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1066 | .reg = PL080_BSIZE_64, |
| 1075 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1076 | }, | 1067 | }, |
| 1077 | { | 1068 | { |
| 1078 | .burstwords = 32, | 1069 | .burstwords = 32, |
| 1079 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1070 | .reg = PL080_BSIZE_32, |
| 1080 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1081 | }, | 1071 | }, |
| 1082 | { | 1072 | { |
| 1083 | .burstwords = 16, | 1073 | .burstwords = 16, |
| 1084 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1074 | .reg = PL080_BSIZE_16, |
| 1085 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1086 | }, | 1075 | }, |
| 1087 | { | 1076 | { |
| 1088 | .burstwords = 8, | 1077 | .burstwords = 8, |
| 1089 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1078 | .reg = PL080_BSIZE_8, |
| 1090 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1091 | }, | 1079 | }, |
| 1092 | { | 1080 | { |
| 1093 | .burstwords = 4, | 1081 | .burstwords = 4, |
| 1094 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1082 | .reg = PL080_BSIZE_4, |
| 1095 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1096 | }, | 1083 | }, |
| 1097 | { | 1084 | { |
| 1098 | .burstwords = 1, | 1085 | .burstwords = 0, |
| 1099 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1086 | .reg = PL080_BSIZE_1, |
| 1100 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1101 | }, | 1087 | }, |
| 1102 | }; | 1088 | }; |
| 1103 | 1089 | ||
| 1090 | /* | ||
| 1091 | * Given the source and destination available bus masks, select which | ||
| 1092 | * will be routed to each port. We try to have source and destination | ||
| 1093 | * on separate ports, but always respect the allowable settings. | ||
| 1094 | */ | ||
| 1095 | static u32 pl08x_select_bus(u8 src, u8 dst) | ||
| 1096 | { | ||
| 1097 | u32 cctl = 0; | ||
| 1098 | |||
| 1099 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
| 1100 | cctl |= PL080_CONTROL_DST_AHB2; | ||
| 1101 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
| 1102 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
| 1103 | |||
| 1104 | return cctl; | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | static u32 pl08x_cctl(u32 cctl) | ||
| 1108 | { | ||
| 1109 | cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
| 1110 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
| 1111 | PL080_CONTROL_PROT_MASK); | ||
| 1112 | |||
| 1113 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
| 1114 | return cctl | PL080_CONTROL_PROT_SYS; | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | static u32 pl08x_width(enum dma_slave_buswidth width) | ||
| 1118 | { | ||
| 1119 | switch (width) { | ||
| 1120 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 1121 | return PL080_WIDTH_8BIT; | ||
| 1122 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 1123 | return PL080_WIDTH_16BIT; | ||
| 1124 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 1125 | return PL080_WIDTH_32BIT; | ||
| 1126 | default: | ||
| 1127 | return ~0; | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | static u32 pl08x_burst(u32 maxburst) | ||
| 1132 | { | ||
| 1133 | int i; | ||
| 1134 | |||
| 1135 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | ||
| 1136 | if (burst_sizes[i].burstwords <= maxburst) | ||
| 1137 | break; | ||
| 1138 | |||
| 1139 | return burst_sizes[i].reg; | ||
| 1140 | } | ||
| 1141 | |||
| 1104 | static int dma_set_runtime_config(struct dma_chan *chan, | 1142 | static int dma_set_runtime_config(struct dma_chan *chan, |
| 1105 | struct dma_slave_config *config) | 1143 | struct dma_slave_config *config) |
| 1106 | { | 1144 | { |
| 1107 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1145 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
| 1108 | struct pl08x_driver_data *pl08x = plchan->host; | 1146 | struct pl08x_driver_data *pl08x = plchan->host; |
| 1109 | struct pl08x_channel_data *cd = plchan->cd; | ||
| 1110 | enum dma_slave_buswidth addr_width; | 1147 | enum dma_slave_buswidth addr_width; |
| 1111 | dma_addr_t addr; | 1148 | u32 width, burst, maxburst; |
| 1112 | u32 maxburst; | ||
| 1113 | u32 cctl = 0; | 1149 | u32 cctl = 0; |
| 1114 | int i; | ||
| 1115 | 1150 | ||
| 1116 | if (!plchan->slave) | 1151 | if (!plchan->slave) |
| 1117 | return -EINVAL; | 1152 | return -EINVAL; |
| @@ -1119,11 +1154,9 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
| 1119 | /* Transfer direction */ | 1154 | /* Transfer direction */ |
| 1120 | plchan->runtime_direction = config->direction; | 1155 | plchan->runtime_direction = config->direction; |
| 1121 | if (config->direction == DMA_TO_DEVICE) { | 1156 | if (config->direction == DMA_TO_DEVICE) { |
| 1122 | addr = config->dst_addr; | ||
| 1123 | addr_width = config->dst_addr_width; | 1157 | addr_width = config->dst_addr_width; |
| 1124 | maxburst = config->dst_maxburst; | 1158 | maxburst = config->dst_maxburst; |
| 1125 | } else if (config->direction == DMA_FROM_DEVICE) { | 1159 | } else if (config->direction == DMA_FROM_DEVICE) { |
| 1126 | addr = config->src_addr; | ||
| 1127 | addr_width = config->src_addr_width; | 1160 | addr_width = config->src_addr_width; |
| 1128 | maxburst = config->src_maxburst; | 1161 | maxburst = config->src_maxburst; |
| 1129 | } else { | 1162 | } else { |
| @@ -1132,46 +1165,40 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
| 1132 | return -EINVAL; | 1165 | return -EINVAL; |
| 1133 | } | 1166 | } |
| 1134 | 1167 | ||
| 1135 | switch (addr_width) { | 1168 | width = pl08x_width(addr_width); |
| 1136 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 1169 | if (width == ~0) { |
| 1137 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
| 1138 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
| 1139 | break; | ||
| 1140 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 1141 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
| 1142 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
| 1143 | break; | ||
| 1144 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 1145 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
| 1146 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
| 1147 | break; | ||
| 1148 | default: | ||
| 1149 | dev_err(&pl08x->adev->dev, | 1170 | dev_err(&pl08x->adev->dev, |
| 1150 | "bad runtime_config: alien address width\n"); | 1171 | "bad runtime_config: alien address width\n"); |
| 1151 | return -EINVAL; | 1172 | return -EINVAL; |
| 1152 | } | 1173 | } |
| 1153 | 1174 | ||
| 1175 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; | ||
| 1176 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; | ||
| 1177 | |||
| 1154 | /* | 1178 | /* |
| 1155 | * Now decide on a maxburst: | ||
| 1156 | * If this channel will only request single transfers, set this | 1179 | * If this channel will only request single transfers, set this |
| 1157 | * down to ONE element. Also select one element if no maxburst | 1180 | * down to ONE element. Also select one element if no maxburst |
| 1158 | * is specified. | 1181 | * is specified. |
| 1159 | */ | 1182 | */ |
| 1160 | if (plchan->cd->single || maxburst == 0) { | 1183 | if (plchan->cd->single) |
| 1161 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1184 | maxburst = 1; |
| 1162 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | 1185 | |
| 1186 | burst = pl08x_burst(maxburst); | ||
| 1187 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | ||
| 1188 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | ||
| 1189 | |||
| 1190 | if (plchan->runtime_direction == DMA_FROM_DEVICE) { | ||
| 1191 | plchan->src_addr = config->src_addr; | ||
| 1192 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | ||
| 1193 | pl08x_select_bus(plchan->cd->periph_buses, | ||
| 1194 | pl08x->mem_buses); | ||
| 1163 | } else { | 1195 | } else { |
| 1164 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | 1196 | plchan->dst_addr = config->dst_addr; |
| 1165 | if (burst_sizes[i].burstwords <= maxburst) | 1197 | plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | |
| 1166 | break; | 1198 | pl08x_select_bus(pl08x->mem_buses, |
| 1167 | cctl |= burst_sizes[i].reg; | 1199 | plchan->cd->periph_buses); |
| 1168 | } | 1200 | } |
| 1169 | 1201 | ||
| 1170 | plchan->runtime_addr = addr; | ||
| 1171 | |||
| 1172 | /* Modify the default channel data to fit PrimeCell request */ | ||
| 1173 | cd->cctl = cctl; | ||
| 1174 | |||
| 1175 | dev_dbg(&pl08x->adev->dev, | 1202 | dev_dbg(&pl08x->adev->dev, |
| 1176 | "configured channel %s (%s) for %s, data width %d, " | 1203 | "configured channel %s (%s) for %s, data width %d, " |
| 1177 | "maxburst %d words, LE, CCTL=0x%08x\n", | 1204 | "maxburst %d words, LE, CCTL=0x%08x\n", |
| @@ -1270,23 +1297,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
| 1270 | return 0; | 1297 | return 0; |
| 1271 | } | 1298 | } |
| 1272 | 1299 | ||
| 1273 | /* | ||
| 1274 | * Given the source and destination available bus masks, select which | ||
| 1275 | * will be routed to each port. We try to have source and destination | ||
| 1276 | * on separate ports, but always respect the allowable settings. | ||
| 1277 | */ | ||
| 1278 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
| 1279 | { | ||
| 1280 | u32 cctl = 0; | ||
| 1281 | |||
| 1282 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
| 1283 | cctl |= PL080_CONTROL_DST_AHB2; | ||
| 1284 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
| 1285 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
| 1286 | |||
| 1287 | return cctl; | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | 1300 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, |
| 1291 | unsigned long flags) | 1301 | unsigned long flags) |
| 1292 | { | 1302 | { |
| @@ -1338,8 +1348,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
| 1338 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | 1348 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; |
| 1339 | 1349 | ||
| 1340 | if (pl08x->vd->dualmaster) | 1350 | if (pl08x->vd->dualmaster) |
| 1341 | txd->cctl |= pl08x_select_bus(pl08x, | 1351 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, |
| 1342 | pl08x->mem_buses, pl08x->mem_buses); | 1352 | pl08x->mem_buses); |
| 1343 | 1353 | ||
| 1344 | ret = pl08x_prep_channel_resources(plchan, txd); | 1354 | ret = pl08x_prep_channel_resources(plchan, txd); |
| 1345 | if (ret) | 1355 | if (ret) |
| @@ -1356,7 +1366,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
| 1356 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1366 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
| 1357 | struct pl08x_driver_data *pl08x = plchan->host; | 1367 | struct pl08x_driver_data *pl08x = plchan->host; |
| 1358 | struct pl08x_txd *txd; | 1368 | struct pl08x_txd *txd; |
| 1359 | u8 src_buses, dst_buses; | ||
| 1360 | int ret; | 1369 | int ret; |
| 1361 | 1370 | ||
| 1362 | /* | 1371 | /* |
| @@ -1390,42 +1399,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
| 1390 | txd->direction = direction; | 1399 | txd->direction = direction; |
| 1391 | txd->len = sgl->length; | 1400 | txd->len = sgl->length; |
| 1392 | 1401 | ||
| 1393 | txd->cctl = plchan->cd->cctl & | ||
| 1394 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
| 1395 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
| 1396 | PL080_CONTROL_PROT_MASK); | ||
| 1397 | |||
| 1398 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
| 1399 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
| 1400 | |||
| 1401 | if (direction == DMA_TO_DEVICE) { | 1402 | if (direction == DMA_TO_DEVICE) { |
| 1402 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1403 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
| 1403 | txd->cctl |= PL080_CONTROL_SRC_INCR; | 1404 | txd->cctl = plchan->dst_cctl; |
| 1404 | txd->src_addr = sgl->dma_address; | 1405 | txd->src_addr = sgl->dma_address; |
| 1405 | if (plchan->runtime_addr) | 1406 | txd->dst_addr = plchan->dst_addr; |
| 1406 | txd->dst_addr = plchan->runtime_addr; | ||
| 1407 | else | ||
| 1408 | txd->dst_addr = plchan->cd->addr; | ||
| 1409 | src_buses = pl08x->mem_buses; | ||
| 1410 | dst_buses = plchan->cd->periph_buses; | ||
| 1411 | } else if (direction == DMA_FROM_DEVICE) { | 1407 | } else if (direction == DMA_FROM_DEVICE) { |
| 1412 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1408 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
| 1413 | txd->cctl |= PL080_CONTROL_DST_INCR; | 1409 | txd->cctl = plchan->src_cctl; |
| 1414 | if (plchan->runtime_addr) | 1410 | txd->src_addr = plchan->src_addr; |
| 1415 | txd->src_addr = plchan->runtime_addr; | ||
| 1416 | else | ||
| 1417 | txd->src_addr = plchan->cd->addr; | ||
| 1418 | txd->dst_addr = sgl->dma_address; | 1411 | txd->dst_addr = sgl->dma_address; |
| 1419 | src_buses = plchan->cd->periph_buses; | ||
| 1420 | dst_buses = pl08x->mem_buses; | ||
| 1421 | } else { | 1412 | } else { |
| 1422 | dev_err(&pl08x->adev->dev, | 1413 | dev_err(&pl08x->adev->dev, |
| 1423 | "%s direction unsupported\n", __func__); | 1414 | "%s direction unsupported\n", __func__); |
| 1424 | return NULL; | 1415 | return NULL; |
| 1425 | } | 1416 | } |
| 1426 | 1417 | ||
| 1427 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); | ||
| 1428 | |||
| 1429 | ret = pl08x_prep_channel_resources(plchan, txd); | 1418 | ret = pl08x_prep_channel_resources(plchan, txd); |
| 1430 | if (ret) | 1419 | if (ret) |
| 1431 | return NULL; | 1420 | return NULL; |
| @@ -1676,6 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
| 1676 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1665 | return mask ? IRQ_HANDLED : IRQ_NONE; |
| 1677 | } | 1666 | } |
| 1678 | 1667 | ||
| 1668 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | ||
| 1669 | { | ||
| 1670 | u32 cctl = pl08x_cctl(chan->cd->cctl); | ||
| 1671 | |||
| 1672 | chan->slave = true; | ||
| 1673 | chan->name = chan->cd->bus_id; | ||
| 1674 | chan->src_addr = chan->cd->addr; | ||
| 1675 | chan->dst_addr = chan->cd->addr; | ||
| 1676 | chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | | ||
| 1677 | pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); | ||
| 1678 | chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | | ||
| 1679 | pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); | ||
| 1680 | } | ||
| 1681 | |||
| 1679 | /* | 1682 | /* |
| 1680 | * Initialise the DMAC memcpy/slave channels. | 1683 | * Initialise the DMAC memcpy/slave channels. |
| 1681 | * Make a local wrapper to hold required data | 1684 | * Make a local wrapper to hold required data |
| @@ -1707,9 +1710,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
| 1707 | chan->state = PL08X_CHAN_IDLE; | 1710 | chan->state = PL08X_CHAN_IDLE; |
| 1708 | 1711 | ||
| 1709 | if (slave) { | 1712 | if (slave) { |
| 1710 | chan->slave = true; | ||
| 1711 | chan->name = pl08x->pd->slave_channels[i].bus_id; | ||
| 1712 | chan->cd = &pl08x->pd->slave_channels[i]; | 1713 | chan->cd = &pl08x->pd->slave_channels[i]; |
| 1714 | pl08x_dma_slave_init(chan); | ||
| 1713 | } else { | 1715 | } else { |
| 1714 | chan->cd = &pl08x->pd->memcpy_channel; | 1716 | chan->cd = &pl08x->pd->memcpy_channel; |
| 1715 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | 1717 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 36144f88d718..6a483eac7b3f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
| 1216 | atdma->dma_common.cap_mask = pdata->cap_mask; | 1216 | atdma->dma_common.cap_mask = pdata->cap_mask; |
| 1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; |
| 1218 | 1218 | ||
| 1219 | size = io->end - io->start + 1; | 1219 | size = resource_size(io); |
| 1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
| 1221 | err = -EBUSY; | 1221 | err = -EBUSY; |
| 1222 | goto err_kfree; | 1222 | goto err_kfree; |
| @@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) | |||
| 1362 | atdma->regs = NULL; | 1362 | atdma->regs = NULL; |
| 1363 | 1363 | ||
| 1364 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1364 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1365 | release_mem_region(io->start, io->end - io->start + 1); | 1365 | release_mem_region(io->start, resource_size(io)); |
| 1366 | 1366 | ||
| 1367 | kfree(atdma); | 1367 | kfree(atdma); |
| 1368 | 1368 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index af8c0b5ed70f..a7fca1653933 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -40,6 +40,8 @@ struct coh901318_desc { | |||
| 40 | struct coh901318_lli *lli; | 40 | struct coh901318_lli *lli; |
| 41 | enum dma_data_direction dir; | 41 | enum dma_data_direction dir; |
| 42 | unsigned long flags; | 42 | unsigned long flags; |
| 43 | u32 head_config; | ||
| 44 | u32 head_ctrl; | ||
| 43 | }; | 45 | }; |
| 44 | 46 | ||
| 45 | struct coh901318_base { | 47 | struct coh901318_base { |
| @@ -660,6 +662,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) | |||
| 660 | 662 | ||
| 661 | coh901318_desc_submit(cohc, cohd); | 663 | coh901318_desc_submit(cohc, cohd); |
| 662 | 664 | ||
| 665 | /* Program the transaction head */ | ||
| 666 | coh901318_set_conf(cohc, cohd->head_config); | ||
| 667 | coh901318_set_ctrl(cohc, cohd->head_ctrl); | ||
| 663 | coh901318_prep_linked_list(cohc, cohd->lli); | 668 | coh901318_prep_linked_list(cohc, cohd->lli); |
| 664 | 669 | ||
| 665 | /* start dma job on this channel */ | 670 | /* start dma job on this channel */ |
| @@ -1090,8 +1095,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 1090 | } else | 1095 | } else |
| 1091 | goto err_direction; | 1096 | goto err_direction; |
| 1092 | 1097 | ||
| 1093 | coh901318_set_conf(cohc, config); | ||
| 1094 | |||
| 1095 | /* The dma only supports transmitting packages up to | 1098 | /* The dma only supports transmitting packages up to |
| 1096 | * MAX_DMA_PACKET_SIZE. Calculate to total number of | 1099 | * MAX_DMA_PACKET_SIZE. Calculate to total number of |
| 1097 | * dma elemts required to send the entire sg list | 1100 | * dma elemts required to send the entire sg list |
| @@ -1128,16 +1131,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 1128 | if (ret) | 1131 | if (ret) |
| 1129 | goto err_lli_fill; | 1132 | goto err_lli_fill; |
| 1130 | 1133 | ||
| 1131 | /* | ||
| 1132 | * Set the default ctrl for the channel to the one from the lli, | ||
| 1133 | * things may have changed due to odd buffer alignment etc. | ||
| 1134 | */ | ||
| 1135 | coh901318_set_ctrl(cohc, lli->control); | ||
| 1136 | 1134 | ||
| 1137 | COH_DBG(coh901318_list_print(cohc, lli)); | 1135 | COH_DBG(coh901318_list_print(cohc, lli)); |
| 1138 | 1136 | ||
| 1139 | /* Pick a descriptor to handle this transfer */ | 1137 | /* Pick a descriptor to handle this transfer */ |
| 1140 | cohd = coh901318_desc_get(cohc); | 1138 | cohd = coh901318_desc_get(cohc); |
| 1139 | cohd->head_config = config; | ||
| 1140 | /* | ||
| 1141 | * Set the default head ctrl for the channel to the one from the | ||
| 1142 | * lli, things may have changed due to odd buffer alignment | ||
| 1143 | * etc. | ||
| 1144 | */ | ||
| 1145 | cohd->head_ctrl = lli->control; | ||
| 1141 | cohd->dir = direction; | 1146 | cohd->dir = direction; |
| 1142 | cohd->flags = flags; | 1147 | cohd->flags = flags; |
| 1143 | cohd->desc.tx_submit = coh901318_tx_submit; | 1148 | cohd->desc.tx_submit = coh901318_tx_submit; |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8bcb15fb959d..f7f21a5de3e1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -509,8 +509,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
| 509 | dma_chan_name(chan)); | 509 | dma_chan_name(chan)); |
| 510 | list_del_rcu(&device->global_node); | 510 | list_del_rcu(&device->global_node); |
| 511 | } else if (err) | 511 | } else if (err) |
| 512 | pr_err("dmaengine: failed to get %s: (%d)\n", | 512 | pr_debug("dmaengine: failed to get %s: (%d)\n", |
| 513 | dma_chan_name(chan), err); | 513 | dma_chan_name(chan), err); |
| 514 | else | 514 | else |
| 515 | break; | 515 | break; |
| 516 | if (--device->privatecnt == 0) | 516 | if (--device->privatecnt == 0) |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c new file mode 100644 index 000000000000..5d7a49bd7c26 --- /dev/null +++ b/drivers/dma/ep93xx_dma.c | |||
| @@ -0,0 +1,1355 @@ | |||
| 1 | /* | ||
| 2 | * Driver for the Cirrus Logic EP93xx DMA Controller | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Mika Westerberg | ||
| 5 | * | ||
| 6 | * DMA M2P implementation is based on the original | ||
| 7 | * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: | ||
| 8 | * | ||
| 9 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | ||
| 10 | * Copyright (C) 2006 Applied Data Systems | ||
| 11 | * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> | ||
| 12 | * | ||
| 13 | * This driver is based on dw_dmac and amba-pl08x drivers. | ||
| 14 | * | ||
| 15 | * This program is free software; you can redistribute it and/or modify | ||
| 16 | * it under the terms of the GNU General Public License as published by | ||
| 17 | * the Free Software Foundation; either version 2 of the License, or | ||
| 18 | * (at your option) any later version. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/init.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/dmaengine.h> | ||
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | |||
| 28 | #include <mach/dma.h> | ||
| 29 | |||
| 30 | /* M2P registers */ | ||
| 31 | #define M2P_CONTROL 0x0000 | ||
| 32 | #define M2P_CONTROL_STALLINT BIT(0) | ||
| 33 | #define M2P_CONTROL_NFBINT BIT(1) | ||
| 34 | #define M2P_CONTROL_CH_ERROR_INT BIT(3) | ||
| 35 | #define M2P_CONTROL_ENABLE BIT(4) | ||
| 36 | #define M2P_CONTROL_ICE BIT(6) | ||
| 37 | |||
| 38 | #define M2P_INTERRUPT 0x0004 | ||
| 39 | #define M2P_INTERRUPT_STALL BIT(0) | ||
| 40 | #define M2P_INTERRUPT_NFB BIT(1) | ||
| 41 | #define M2P_INTERRUPT_ERROR BIT(3) | ||
| 42 | |||
| 43 | #define M2P_PPALLOC 0x0008 | ||
| 44 | #define M2P_STATUS 0x000c | ||
| 45 | |||
| 46 | #define M2P_MAXCNT0 0x0020 | ||
| 47 | #define M2P_BASE0 0x0024 | ||
| 48 | #define M2P_MAXCNT1 0x0030 | ||
| 49 | #define M2P_BASE1 0x0034 | ||
| 50 | |||
| 51 | #define M2P_STATE_IDLE 0 | ||
| 52 | #define M2P_STATE_STALL 1 | ||
| 53 | #define M2P_STATE_ON 2 | ||
| 54 | #define M2P_STATE_NEXT 3 | ||
| 55 | |||
| 56 | /* M2M registers */ | ||
| 57 | #define M2M_CONTROL 0x0000 | ||
| 58 | #define M2M_CONTROL_DONEINT BIT(2) | ||
| 59 | #define M2M_CONTROL_ENABLE BIT(3) | ||
| 60 | #define M2M_CONTROL_START BIT(4) | ||
| 61 | #define M2M_CONTROL_DAH BIT(11) | ||
| 62 | #define M2M_CONTROL_SAH BIT(12) | ||
| 63 | #define M2M_CONTROL_PW_SHIFT 9 | ||
| 64 | #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) | ||
| 65 | #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) | ||
| 66 | #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) | ||
| 67 | #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) | ||
| 68 | #define M2M_CONTROL_TM_SHIFT 13 | ||
| 69 | #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) | ||
| 70 | #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) | ||
| 71 | #define M2M_CONTROL_RSS_SHIFT 22 | ||
| 72 | #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) | ||
| 73 | #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) | ||
| 74 | #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) | ||
| 75 | #define M2M_CONTROL_NO_HDSK BIT(24) | ||
| 76 | #define M2M_CONTROL_PWSC_SHIFT 25 | ||
| 77 | |||
| 78 | #define M2M_INTERRUPT 0x0004 | ||
| 79 | #define M2M_INTERRUPT_DONEINT BIT(1) | ||
| 80 | |||
| 81 | #define M2M_BCR0 0x0010 | ||
| 82 | #define M2M_BCR1 0x0014 | ||
| 83 | #define M2M_SAR_BASE0 0x0018 | ||
| 84 | #define M2M_SAR_BASE1 0x001c | ||
| 85 | #define M2M_DAR_BASE0 0x002c | ||
| 86 | #define M2M_DAR_BASE1 0x0030 | ||
| 87 | |||
| 88 | #define DMA_MAX_CHAN_BYTES 0xffff | ||
| 89 | #define DMA_MAX_CHAN_DESCRIPTORS 32 | ||
| 90 | |||
| 91 | struct ep93xx_dma_engine; | ||
| 92 | |||
| 93 | /** | ||
| 94 | * struct ep93xx_dma_desc - EP93xx specific transaction descriptor | ||
| 95 | * @src_addr: source address of the transaction | ||
| 96 | * @dst_addr: destination address of the transaction | ||
| 97 | * @size: size of the transaction (in bytes) | ||
| 98 | * @complete: this descriptor is completed | ||
| 99 | * @txd: dmaengine API descriptor | ||
| 100 | * @tx_list: list of linked descriptors | ||
| 101 | * @node: link used for putting this into a channel queue | ||
| 102 | */ | ||
| 103 | struct ep93xx_dma_desc { | ||
| 104 | u32 src_addr; | ||
| 105 | u32 dst_addr; | ||
| 106 | size_t size; | ||
| 107 | bool complete; | ||
| 108 | struct dma_async_tx_descriptor txd; | ||
| 109 | struct list_head tx_list; | ||
| 110 | struct list_head node; | ||
| 111 | }; | ||
| 112 | |||
| 113 | /** | ||
| 114 | * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel | ||
| 115 | * @chan: dmaengine API channel | ||
| 116 | * @edma: pointer to to the engine device | ||
| 117 | * @regs: memory mapped registers | ||
| 118 | * @irq: interrupt number of the channel | ||
| 119 | * @clk: clock used by this channel | ||
| 120 | * @tasklet: channel specific tasklet used for callbacks | ||
| 121 | * @lock: lock protecting the fields following | ||
| 122 | * @flags: flags for the channel | ||
| 123 | * @buffer: which buffer to use next (0/1) | ||
| 124 | * @last_completed: last completed cookie value | ||
| 125 | * @active: flattened chain of descriptors currently being processed | ||
| 126 | * @queue: pending descriptors which are handled next | ||
| 127 | * @free_list: list of free descriptors which can be used | ||
| 128 | * @runtime_addr: physical address currently used as dest/src (M2M only). This | ||
| 129 | * is set via %DMA_SLAVE_CONFIG before slave operation is | ||
| 130 | * prepared | ||
| 131 | * @runtime_ctrl: M2M runtime values for the control register. | ||
| 132 | * | ||
| 133 | * As EP93xx DMA controller doesn't support real chained DMA descriptors we | ||
| 134 | * will have slightly different scheme here: @active points to a head of | ||
| 135 | * flattened DMA descriptor chain. | ||
| 136 | * | ||
| 137 | * @queue holds pending transactions. These are linked through the first | ||
| 138 | * descriptor in the chain. When a descriptor is moved to the @active queue, | ||
| 139 | * the first and chained descriptors are flattened into a single list. | ||
| 140 | * | ||
| 141 | * @chan.private holds pointer to &struct ep93xx_dma_data which contains | ||
| 142 | * necessary channel configuration information. For memcpy channels this must | ||
| 143 | * be %NULL. | ||
| 144 | */ | ||
| 145 | struct ep93xx_dma_chan { | ||
| 146 | struct dma_chan chan; | ||
| 147 | const struct ep93xx_dma_engine *edma; | ||
| 148 | void __iomem *regs; | ||
| 149 | int irq; | ||
| 150 | struct clk *clk; | ||
| 151 | struct tasklet_struct tasklet; | ||
| 152 | /* protects the fields following */ | ||
| 153 | spinlock_t lock; | ||
| 154 | unsigned long flags; | ||
| 155 | /* Channel is configured for cyclic transfers */ | ||
| 156 | #define EP93XX_DMA_IS_CYCLIC 0 | ||
| 157 | |||
| 158 | int buffer; | ||
| 159 | dma_cookie_t last_completed; | ||
| 160 | struct list_head active; | ||
| 161 | struct list_head queue; | ||
| 162 | struct list_head free_list; | ||
| 163 | u32 runtime_addr; | ||
| 164 | u32 runtime_ctrl; | ||
| 165 | }; | ||
| 166 | |||
| 167 | /** | ||
| 168 | * struct ep93xx_dma_engine - the EP93xx DMA engine instance | ||
| 169 | * @dma_dev: holds the dmaengine device | ||
| 170 | * @m2m: is this an M2M or M2P device | ||
| 171 | * @hw_setup: method which sets the channel up for operation | ||
| 172 | * @hw_shutdown: shuts the channel down and flushes whatever is left | ||
| 173 | * @hw_submit: pushes active descriptor(s) to the hardware | ||
| 174 | * @hw_interrupt: handle the interrupt | ||
| 175 | * @num_channels: number of channels for this instance | ||
| 176 | * @channels: array of channels | ||
| 177 | * | ||
| 178 | * There is one instance of this struct for the M2P channels and one for the | ||
| 179 | * M2M channels. hw_xxx() methods are used to perform operations which are | ||
| 180 | * different on M2M and M2P channels. These methods are called with channel | ||
| 181 | * lock held and interrupts disabled so they cannot sleep. | ||
| 182 | */ | ||
| 183 | struct ep93xx_dma_engine { | ||
| 184 | struct dma_device dma_dev; | ||
| 185 | bool m2m; | ||
| 186 | int (*hw_setup)(struct ep93xx_dma_chan *); | ||
| 187 | void (*hw_shutdown)(struct ep93xx_dma_chan *); | ||
| 188 | void (*hw_submit)(struct ep93xx_dma_chan *); | ||
| 189 | int (*hw_interrupt)(struct ep93xx_dma_chan *); | ||
| 190 | #define INTERRUPT_UNKNOWN 0 | ||
| 191 | #define INTERRUPT_DONE 1 | ||
| 192 | #define INTERRUPT_NEXT_BUFFER 2 | ||
| 193 | |||
| 194 | size_t num_channels; | ||
| 195 | struct ep93xx_dma_chan channels[]; | ||
| 196 | }; | ||
| 197 | |||
| 198 | static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) | ||
| 199 | { | ||
| 200 | return &edmac->chan.dev->device; | ||
| 201 | } | ||
| 202 | |||
| 203 | static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) | ||
| 204 | { | ||
| 205 | return container_of(chan, struct ep93xx_dma_chan, chan); | ||
| 206 | } | ||
| 207 | |||
| 208 | /** | ||
| 209 | * ep93xx_dma_set_active - set new active descriptor chain | ||
| 210 | * @edmac: channel | ||
| 211 | * @desc: head of the new active descriptor chain | ||
| 212 | * | ||
| 213 | * Sets @desc to be the head of the new active descriptor chain. This is the | ||
| 214 | * chain which is processed next. The active list must be empty before calling | ||
| 215 | * this function. | ||
| 216 | * | ||
| 217 | * Called with @edmac->lock held and interrupts disabled. | ||
| 218 | */ | ||
| 219 | static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, | ||
| 220 | struct ep93xx_dma_desc *desc) | ||
| 221 | { | ||
| 222 | BUG_ON(!list_empty(&edmac->active)); | ||
| 223 | |||
| 224 | list_add_tail(&desc->node, &edmac->active); | ||
| 225 | |||
| 226 | /* Flatten the @desc->tx_list chain into @edmac->active list */ | ||
| 227 | while (!list_empty(&desc->tx_list)) { | ||
| 228 | struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, | ||
| 229 | struct ep93xx_dma_desc, node); | ||
| 230 | |||
| 231 | /* | ||
| 232 | * We copy the callback parameters from the first descriptor | ||
| 233 | * to all the chained descriptors. This way we can call the | ||
| 234 | * callback without having to find out the first descriptor in | ||
| 235 | * the chain. Useful for cyclic transfers. | ||
| 236 | */ | ||
| 237 | d->txd.callback = desc->txd.callback; | ||
| 238 | d->txd.callback_param = desc->txd.callback_param; | ||
| 239 | |||
| 240 | list_move_tail(&d->node, &edmac->active); | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | /* Called with @edmac->lock held and interrupts disabled */ | ||
| 245 | static struct ep93xx_dma_desc * | ||
| 246 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | ||
| 247 | { | ||
| 248 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); | ||
| 249 | } | ||
| 250 | |||
| 251 | /** | ||
| 252 | * ep93xx_dma_advance_active - advances to the next active descriptor | ||
| 253 | * @edmac: channel | ||
| 254 | * | ||
| 255 | * Function advances active descriptor to the next in the @edmac->active and | ||
| 256 | * returns %true if we still have descriptors in the chain to process. | ||
| 257 | * Otherwise returns %false. | ||
| 258 | * | ||
| 259 | * When the channel is in cyclic mode always returns %true. | ||
| 260 | * | ||
| 261 | * Called with @edmac->lock held and interrupts disabled. | ||
| 262 | */ | ||
| 263 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) | ||
| 264 | { | ||
| 265 | list_rotate_left(&edmac->active); | ||
| 266 | |||
| 267 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | ||
| 268 | return true; | ||
| 269 | |||
| 270 | /* | ||
| 271 | * If txd.cookie is set it means that we are back in the first | ||
| 272 | * descriptor in the chain and hence done with it. | ||
| 273 | */ | ||
| 274 | return !ep93xx_dma_get_active(edmac)->txd.cookie; | ||
| 275 | } | ||
| 276 | |||
| 277 | /* | ||
| 278 | * M2P DMA implementation | ||
| 279 | */ | ||
| 280 | |||
| 281 | static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) | ||
| 282 | { | ||
| 283 | writel(control, edmac->regs + M2P_CONTROL); | ||
| 284 | /* | ||
| 285 | * EP93xx User's Guide states that we must perform a dummy read after | ||
| 286 | * write to the control register. | ||
| 287 | */ | ||
| 288 | readl(edmac->regs + M2P_CONTROL); | ||
| 289 | } | ||
| 290 | |||
| 291 | static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) | ||
| 292 | { | ||
| 293 | struct ep93xx_dma_data *data = edmac->chan.private; | ||
| 294 | u32 control; | ||
| 295 | |||
| 296 | writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); | ||
| 297 | |||
| 298 | control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE | ||
| 299 | | M2P_CONTROL_ENABLE; | ||
| 300 | m2p_set_control(edmac, control); | ||
| 301 | |||
| 302 | return 0; | ||
| 303 | } | ||
| 304 | |||
| 305 | static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) | ||
| 306 | { | ||
| 307 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; | ||
| 308 | } | ||
| 309 | |||
| 310 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | ||
| 311 | { | ||
| 312 | u32 control; | ||
| 313 | |||
| 314 | control = readl(edmac->regs + M2P_CONTROL); | ||
| 315 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | ||
| 316 | m2p_set_control(edmac, control); | ||
| 317 | |||
| 318 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) | ||
| 319 | cpu_relax(); | ||
| 320 | |||
| 321 | m2p_set_control(edmac, 0); | ||
| 322 | |||
| 323 | while (m2p_channel_state(edmac) == M2P_STATE_STALL) | ||
| 324 | cpu_relax(); | ||
| 325 | } | ||
| 326 | |||
| 327 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | ||
| 328 | { | ||
| 329 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | ||
| 330 | u32 bus_addr; | ||
| 331 | |||
| 332 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) | ||
| 333 | bus_addr = desc->src_addr; | ||
| 334 | else | ||
| 335 | bus_addr = desc->dst_addr; | ||
| 336 | |||
| 337 | if (edmac->buffer == 0) { | ||
| 338 | writel(desc->size, edmac->regs + M2P_MAXCNT0); | ||
| 339 | writel(bus_addr, edmac->regs + M2P_BASE0); | ||
| 340 | } else { | ||
| 341 | writel(desc->size, edmac->regs + M2P_MAXCNT1); | ||
| 342 | writel(bus_addr, edmac->regs + M2P_BASE1); | ||
| 343 | } | ||
| 344 | |||
| 345 | edmac->buffer ^= 1; | ||
| 346 | } | ||
| 347 | |||
| 348 | static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) | ||
| 349 | { | ||
| 350 | u32 control = readl(edmac->regs + M2P_CONTROL); | ||
| 351 | |||
| 352 | m2p_fill_desc(edmac); | ||
| 353 | control |= M2P_CONTROL_STALLINT; | ||
| 354 | |||
| 355 | if (ep93xx_dma_advance_active(edmac)) { | ||
| 356 | m2p_fill_desc(edmac); | ||
| 357 | control |= M2P_CONTROL_NFBINT; | ||
| 358 | } | ||
| 359 | |||
| 360 | m2p_set_control(edmac, control); | ||
| 361 | } | ||
| 362 | |||
| 363 | static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) | ||
| 364 | { | ||
| 365 | u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); | ||
| 366 | u32 control; | ||
| 367 | |||
| 368 | if (irq_status & M2P_INTERRUPT_ERROR) { | ||
| 369 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | ||
| 370 | |||
| 371 | /* Clear the error interrupt */ | ||
| 372 | writel(1, edmac->regs + M2P_INTERRUPT); | ||
| 373 | |||
| 374 | /* | ||
| 375 | * It seems that there is no easy way of reporting errors back | ||
| 376 | * to client so we just report the error here and continue as | ||
| 377 | * usual. | ||
| 378 | * | ||
| 379 | * Revisit this when there is a mechanism to report back the | ||
| 380 | * errors. | ||
| 381 | */ | ||
| 382 | dev_err(chan2dev(edmac), | ||
| 383 | "DMA transfer failed! Details:\n" | ||
| 384 | "\tcookie : %d\n" | ||
| 385 | "\tsrc_addr : 0x%08x\n" | ||
| 386 | "\tdst_addr : 0x%08x\n" | ||
| 387 | "\tsize : %zu\n", | ||
| 388 | desc->txd.cookie, desc->src_addr, desc->dst_addr, | ||
| 389 | desc->size); | ||
| 390 | } | ||
| 391 | |||
| 392 | switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { | ||
| 393 | case M2P_INTERRUPT_STALL: | ||
| 394 | /* Disable interrupts */ | ||
| 395 | control = readl(edmac->regs + M2P_CONTROL); | ||
| 396 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | ||
| 397 | m2p_set_control(edmac, control); | ||
| 398 | |||
| 399 | return INTERRUPT_DONE; | ||
| 400 | |||
| 401 | case M2P_INTERRUPT_NFB: | ||
| 402 | if (ep93xx_dma_advance_active(edmac)) | ||
| 403 | m2p_fill_desc(edmac); | ||
| 404 | |||
| 405 | return INTERRUPT_NEXT_BUFFER; | ||
| 406 | } | ||
| 407 | |||
| 408 | return INTERRUPT_UNKNOWN; | ||
| 409 | } | ||
| 410 | |||
| 411 | /* | ||
| 412 | * M2M DMA implementation | ||
| 413 | * | ||
| 414 | * For the M2M transfers we don't use NFB at all. This is because it simply | ||
| 415 | * doesn't work well with memcpy transfers. When you submit both buffers it is | ||
| 416 | * extremely unlikely that you get an NFB interrupt, but it instead reports | ||
| 417 | * DONE interrupt and both buffers are already transferred which means that we | ||
| 418 | * weren't able to update the next buffer. | ||
| 419 | * | ||
| 420 | * So for now we "simulate" NFB by just submitting buffer after buffer | ||
| 421 | * without double buffering. | ||
| 422 | */ | ||
| 423 | |||
| 424 | static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | ||
| 425 | { | ||
| 426 | const struct ep93xx_dma_data *data = edmac->chan.private; | ||
| 427 | u32 control = 0; | ||
| 428 | |||
| 429 | if (!data) { | ||
| 430 | /* This is memcpy channel, nothing to configure */ | ||
| 431 | writel(control, edmac->regs + M2M_CONTROL); | ||
| 432 | return 0; | ||
| 433 | } | ||
| 434 | |||
| 435 | switch (data->port) { | ||
| 436 | case EP93XX_DMA_SSP: | ||
| 437 | /* | ||
| 438 | * This was found via experimenting - anything less than 5 | ||
| 439 | * causes the channel to perform only a partial transfer which | ||
| 440 | * leads to problems since we don't get DONE interrupt then. | ||
| 441 | */ | ||
| 442 | control = (5 << M2M_CONTROL_PWSC_SHIFT); | ||
| 443 | control |= M2M_CONTROL_NO_HDSK; | ||
| 444 | |||
| 445 | if (data->direction == DMA_TO_DEVICE) { | ||
| 446 | control |= M2M_CONTROL_DAH; | ||
| 447 | control |= M2M_CONTROL_TM_TX; | ||
| 448 | control |= M2M_CONTROL_RSS_SSPTX; | ||
| 449 | } else { | ||
| 450 | control |= M2M_CONTROL_SAH; | ||
| 451 | control |= M2M_CONTROL_TM_RX; | ||
| 452 | control |= M2M_CONTROL_RSS_SSPRX; | ||
| 453 | } | ||
| 454 | break; | ||
| 455 | |||
| 456 | case EP93XX_DMA_IDE: | ||
| 457 | /* | ||
| 458 | * This IDE part is totally untested. Values below are taken | ||
| 459 | * from the EP93xx Users's Guide and might not be correct. | ||
| 460 | */ | ||
| 461 | control |= M2M_CONTROL_NO_HDSK; | ||
| 462 | control |= M2M_CONTROL_RSS_IDE; | ||
| 463 | control |= M2M_CONTROL_PW_16; | ||
| 464 | |||
| 465 | if (data->direction == DMA_TO_DEVICE) { | ||
| 466 | /* Worst case from the UG */ | ||
| 467 | control = (3 << M2M_CONTROL_PWSC_SHIFT); | ||
| 468 | control |= M2M_CONTROL_DAH; | ||
| 469 | control |= M2M_CONTROL_TM_TX; | ||
| 470 | } else { | ||
| 471 | control = (2 << M2M_CONTROL_PWSC_SHIFT); | ||
| 472 | control |= M2M_CONTROL_SAH; | ||
| 473 | control |= M2M_CONTROL_TM_RX; | ||
| 474 | } | ||
| 475 | break; | ||
| 476 | |||
| 477 | default: | ||
| 478 | return -EINVAL; | ||
| 479 | } | ||
| 480 | |||
| 481 | writel(control, edmac->regs + M2M_CONTROL); | ||
| 482 | return 0; | ||
| 483 | } | ||
| 484 | |||
| 485 | static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) | ||
| 486 | { | ||
| 487 | /* Just disable the channel */ | ||
| 488 | writel(0, edmac->regs + M2M_CONTROL); | ||
| 489 | } | ||
| 490 | |||
| 491 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) | ||
| 492 | { | ||
| 493 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | ||
| 494 | |||
| 495 | if (edmac->buffer == 0) { | ||
| 496 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); | ||
| 497 | writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); | ||
| 498 | writel(desc->size, edmac->regs + M2M_BCR0); | ||
| 499 | } else { | ||
| 500 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); | ||
| 501 | writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); | ||
| 502 | writel(desc->size, edmac->regs + M2M_BCR1); | ||
| 503 | } | ||
| 504 | |||
| 505 | edmac->buffer ^= 1; | ||
| 506 | } | ||
| 507 | |||
| 508 | static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) | ||
| 509 | { | ||
| 510 | struct ep93xx_dma_data *data = edmac->chan.private; | ||
| 511 | u32 control = readl(edmac->regs + M2M_CONTROL); | ||
| 512 | |||
| 513 | /* | ||
| 514 | * Since we allow clients to configure PW (peripheral width) we always | ||
| 515 | * clear PW bits here and then set them according what is given in | ||
| 516 | * the runtime configuration. | ||
| 517 | */ | ||
| 518 | control &= ~M2M_CONTROL_PW_MASK; | ||
| 519 | control |= edmac->runtime_ctrl; | ||
| 520 | |||
| 521 | m2m_fill_desc(edmac); | ||
| 522 | control |= M2M_CONTROL_DONEINT; | ||
| 523 | |||
| 524 | /* | ||
| 525 | * Now we can finally enable the channel. For M2M channel this must be | ||
| 526 | * done _after_ the BCRx registers are programmed. | ||
| 527 | */ | ||
| 528 | control |= M2M_CONTROL_ENABLE; | ||
| 529 | writel(control, edmac->regs + M2M_CONTROL); | ||
| 530 | |||
| 531 | if (!data) { | ||
| 532 | /* | ||
| 533 | * For memcpy channels the software trigger must be asserted | ||
| 534 | * in order to start the memcpy operation. | ||
| 535 | */ | ||
| 536 | control |= M2M_CONTROL_START; | ||
| 537 | writel(control, edmac->regs + M2M_CONTROL); | ||
| 538 | } | ||
| 539 | } | ||
| 540 | |||
| 541 | static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) | ||
| 542 | { | ||
| 543 | u32 control; | ||
| 544 | |||
| 545 | if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) | ||
| 546 | return INTERRUPT_UNKNOWN; | ||
| 547 | |||
| 548 | /* Clear the DONE bit */ | ||
| 549 | writel(0, edmac->regs + M2M_INTERRUPT); | ||
| 550 | |||
| 551 | /* Disable interrupts and the channel */ | ||
| 552 | control = readl(edmac->regs + M2M_CONTROL); | ||
| 553 | control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); | ||
| 554 | writel(control, edmac->regs + M2M_CONTROL); | ||
| 555 | |||
| 556 | /* | ||
| 557 | * Since we only get DONE interrupt we have to find out ourselves | ||
| 558 | * whether there still is something to process. So we try to advance | ||
| 559 | * the chain an see whether it succeeds. | ||
| 560 | */ | ||
| 561 | if (ep93xx_dma_advance_active(edmac)) { | ||
| 562 | edmac->edma->hw_submit(edmac); | ||
| 563 | return INTERRUPT_NEXT_BUFFER; | ||
| 564 | } | ||
| 565 | |||
| 566 | return INTERRUPT_DONE; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* | ||
| 570 | * DMA engine API implementation | ||
| 571 | */ | ||
| 572 | |||
| 573 | static struct ep93xx_dma_desc * | ||
| 574 | ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) | ||
| 575 | { | ||
| 576 | struct ep93xx_dma_desc *desc, *_desc; | ||
| 577 | struct ep93xx_dma_desc *ret = NULL; | ||
| 578 | unsigned long flags; | ||
| 579 | |||
| 580 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 581 | list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { | ||
| 582 | if (async_tx_test_ack(&desc->txd)) { | ||
| 583 | list_del_init(&desc->node); | ||
| 584 | |||
| 585 | /* Re-initialize the descriptor */ | ||
| 586 | desc->src_addr = 0; | ||
| 587 | desc->dst_addr = 0; | ||
| 588 | desc->size = 0; | ||
| 589 | desc->complete = false; | ||
| 590 | desc->txd.cookie = 0; | ||
| 591 | desc->txd.callback = NULL; | ||
| 592 | desc->txd.callback_param = NULL; | ||
| 593 | |||
| 594 | ret = desc; | ||
| 595 | break; | ||
| 596 | } | ||
| 597 | } | ||
| 598 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 599 | return ret; | ||
| 600 | } | ||
| 601 | |||
| 602 | static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, | ||
| 603 | struct ep93xx_dma_desc *desc) | ||
| 604 | { | ||
| 605 | if (desc) { | ||
| 606 | unsigned long flags; | ||
| 607 | |||
| 608 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 609 | list_splice_init(&desc->tx_list, &edmac->free_list); | ||
| 610 | list_add(&desc->node, &edmac->free_list); | ||
| 611 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 612 | } | ||
| 613 | } | ||
| 614 | |||
| 615 | /** | ||
| 616 | * ep93xx_dma_advance_work - start processing the next pending transaction | ||
| 617 | * @edmac: channel | ||
| 618 | * | ||
| 619 | * If we have pending transactions queued and we are currently idling, this | ||
| 620 | * function takes the next queued transaction from the @edmac->queue and | ||
| 621 | * pushes it to the hardware for execution. | ||
| 622 | */ | ||
| 623 | static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) | ||
| 624 | { | ||
| 625 | struct ep93xx_dma_desc *new; | ||
| 626 | unsigned long flags; | ||
| 627 | |||
| 628 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 629 | if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { | ||
| 630 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 631 | return; | ||
| 632 | } | ||
| 633 | |||
| 634 | /* Take the next descriptor from the pending queue */ | ||
| 635 | new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); | ||
| 636 | list_del_init(&new->node); | ||
| 637 | |||
| 638 | ep93xx_dma_set_active(edmac, new); | ||
| 639 | |||
| 640 | /* Push it to the hardware */ | ||
| 641 | edmac->edma->hw_submit(edmac); | ||
| 642 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 643 | } | ||
| 644 | |||
| 645 | static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) | ||
| 646 | { | ||
| 647 | struct device *dev = desc->txd.chan->device->dev; | ||
| 648 | |||
| 649 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 650 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 651 | dma_unmap_single(dev, desc->src_addr, desc->size, | ||
| 652 | DMA_TO_DEVICE); | ||
| 653 | else | ||
| 654 | dma_unmap_page(dev, desc->src_addr, desc->size, | ||
| 655 | DMA_TO_DEVICE); | ||
| 656 | } | ||
| 657 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 658 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 659 | dma_unmap_single(dev, desc->dst_addr, desc->size, | ||
| 660 | DMA_FROM_DEVICE); | ||
| 661 | else | ||
| 662 | dma_unmap_page(dev, desc->dst_addr, desc->size, | ||
| 663 | DMA_FROM_DEVICE); | ||
| 664 | } | ||
| 665 | } | ||
| 666 | |||
| 667 | static void ep93xx_dma_tasklet(unsigned long data) | ||
| 668 | { | ||
| 669 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | ||
| 670 | struct ep93xx_dma_desc *desc, *d; | ||
| 671 | dma_async_tx_callback callback; | ||
| 672 | void *callback_param; | ||
| 673 | LIST_HEAD(list); | ||
| 674 | |||
| 675 | spin_lock_irq(&edmac->lock); | ||
| 676 | desc = ep93xx_dma_get_active(edmac); | ||
| 677 | if (desc->complete) { | ||
| 678 | edmac->last_completed = desc->txd.cookie; | ||
| 679 | list_splice_init(&edmac->active, &list); | ||
| 680 | } | ||
| 681 | spin_unlock_irq(&edmac->lock); | ||
| 682 | |||
| 683 | /* Pick up the next descriptor from the queue */ | ||
| 684 | ep93xx_dma_advance_work(edmac); | ||
| 685 | |||
| 686 | callback = desc->txd.callback; | ||
| 687 | callback_param = desc->txd.callback_param; | ||
| 688 | |||
| 689 | /* Now we can release all the chained descriptors */ | ||
| 690 | list_for_each_entry_safe(desc, d, &list, node) { | ||
| 691 | /* | ||
| 692 | * For the memcpy channels the API requires us to unmap the | ||
| 693 | * buffers unless requested otherwise. | ||
| 694 | */ | ||
| 695 | if (!edmac->chan.private) | ||
| 696 | ep93xx_dma_unmap_buffers(desc); | ||
| 697 | |||
| 698 | ep93xx_dma_desc_put(edmac, desc); | ||
| 699 | } | ||
| 700 | |||
| 701 | if (callback) | ||
| 702 | callback(callback_param); | ||
| 703 | } | ||
| 704 | |||
| 705 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) | ||
| 706 | { | ||
| 707 | struct ep93xx_dma_chan *edmac = dev_id; | ||
| 708 | irqreturn_t ret = IRQ_HANDLED; | ||
| 709 | |||
| 710 | spin_lock(&edmac->lock); | ||
| 711 | |||
| 712 | switch (edmac->edma->hw_interrupt(edmac)) { | ||
| 713 | case INTERRUPT_DONE: | ||
| 714 | ep93xx_dma_get_active(edmac)->complete = true; | ||
| 715 | tasklet_schedule(&edmac->tasklet); | ||
| 716 | break; | ||
| 717 | |||
| 718 | case INTERRUPT_NEXT_BUFFER: | ||
| 719 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | ||
| 720 | tasklet_schedule(&edmac->tasklet); | ||
| 721 | break; | ||
| 722 | |||
| 723 | default: | ||
| 724 | dev_warn(chan2dev(edmac), "unknown interrupt!\n"); | ||
| 725 | ret = IRQ_NONE; | ||
| 726 | break; | ||
| 727 | } | ||
| 728 | |||
| 729 | spin_unlock(&edmac->lock); | ||
| 730 | return ret; | ||
| 731 | } | ||
| 732 | |||
| 733 | /** | ||
| 734 | * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed | ||
| 735 | * @tx: descriptor to be executed | ||
| 736 | * | ||
| 737 | * Function will execute given descriptor on the hardware or if the hardware | ||
| 738 | * is busy, queue the descriptor to be executed later on. Returns cookie which | ||
| 739 | * can be used to poll the status of the descriptor. | ||
| 740 | */ | ||
| 741 | static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 742 | { | ||
| 743 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); | ||
| 744 | struct ep93xx_dma_desc *desc; | ||
| 745 | dma_cookie_t cookie; | ||
| 746 | unsigned long flags; | ||
| 747 | |||
| 748 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 749 | |||
| 750 | cookie = edmac->chan.cookie; | ||
| 751 | |||
| 752 | if (++cookie < 0) | ||
| 753 | cookie = 1; | ||
| 754 | |||
| 755 | desc = container_of(tx, struct ep93xx_dma_desc, txd); | ||
| 756 | |||
| 757 | edmac->chan.cookie = cookie; | ||
| 758 | desc->txd.cookie = cookie; | ||
| 759 | |||
| 760 | /* | ||
| 761 | * If nothing is currently prosessed, we push this descriptor | ||
| 762 | * directly to the hardware. Otherwise we put the descriptor | ||
| 763 | * to the pending queue. | ||
| 764 | */ | ||
| 765 | if (list_empty(&edmac->active)) { | ||
| 766 | ep93xx_dma_set_active(edmac, desc); | ||
| 767 | edmac->edma->hw_submit(edmac); | ||
| 768 | } else { | ||
| 769 | list_add_tail(&desc->node, &edmac->queue); | ||
| 770 | } | ||
| 771 | |||
| 772 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 773 | return cookie; | ||
| 774 | } | ||
| 775 | |||
| 776 | /** | ||
| 777 | * ep93xx_dma_alloc_chan_resources - allocate resources for the channel | ||
| 778 | * @chan: channel to allocate resources | ||
| 779 | * | ||
| 780 | * Function allocates necessary resources for the given DMA channel and | ||
| 781 | * returns number of allocated descriptors for the channel. Negative errno | ||
| 782 | * is returned in case of failure. | ||
| 783 | */ | ||
| 784 | static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 785 | { | ||
| 786 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 787 | struct ep93xx_dma_data *data = chan->private; | ||
| 788 | const char *name = dma_chan_name(chan); | ||
| 789 | int ret, i; | ||
| 790 | |||
| 791 | /* Sanity check the channel parameters */ | ||
| 792 | if (!edmac->edma->m2m) { | ||
| 793 | if (!data) | ||
| 794 | return -EINVAL; | ||
| 795 | if (data->port < EP93XX_DMA_I2S1 || | ||
| 796 | data->port > EP93XX_DMA_IRDA) | ||
| 797 | return -EINVAL; | ||
| 798 | if (data->direction != ep93xx_dma_chan_direction(chan)) | ||
| 799 | return -EINVAL; | ||
| 800 | } else { | ||
| 801 | if (data) { | ||
| 802 | switch (data->port) { | ||
| 803 | case EP93XX_DMA_SSP: | ||
| 804 | case EP93XX_DMA_IDE: | ||
| 805 | if (data->direction != DMA_TO_DEVICE && | ||
| 806 | data->direction != DMA_FROM_DEVICE) | ||
| 807 | return -EINVAL; | ||
| 808 | break; | ||
| 809 | default: | ||
| 810 | return -EINVAL; | ||
| 811 | } | ||
| 812 | } | ||
| 813 | } | ||
| 814 | |||
| 815 | if (data && data->name) | ||
| 816 | name = data->name; | ||
| 817 | |||
| 818 | ret = clk_enable(edmac->clk); | ||
| 819 | if (ret) | ||
| 820 | return ret; | ||
| 821 | |||
| 822 | ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); | ||
| 823 | if (ret) | ||
| 824 | goto fail_clk_disable; | ||
| 825 | |||
| 826 | spin_lock_irq(&edmac->lock); | ||
| 827 | edmac->last_completed = 1; | ||
| 828 | edmac->chan.cookie = 1; | ||
| 829 | ret = edmac->edma->hw_setup(edmac); | ||
| 830 | spin_unlock_irq(&edmac->lock); | ||
| 831 | |||
| 832 | if (ret) | ||
| 833 | goto fail_free_irq; | ||
| 834 | |||
| 835 | for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { | ||
| 836 | struct ep93xx_dma_desc *desc; | ||
| 837 | |||
| 838 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | ||
| 839 | if (!desc) { | ||
| 840 | dev_warn(chan2dev(edmac), "not enough descriptors\n"); | ||
| 841 | break; | ||
| 842 | } | ||
| 843 | |||
| 844 | INIT_LIST_HEAD(&desc->tx_list); | ||
| 845 | |||
| 846 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
| 847 | desc->txd.flags = DMA_CTRL_ACK; | ||
| 848 | desc->txd.tx_submit = ep93xx_dma_tx_submit; | ||
| 849 | |||
| 850 | ep93xx_dma_desc_put(edmac, desc); | ||
| 851 | } | ||
| 852 | |||
| 853 | return i; | ||
| 854 | |||
| 855 | fail_free_irq: | ||
| 856 | free_irq(edmac->irq, edmac); | ||
| 857 | fail_clk_disable: | ||
| 858 | clk_disable(edmac->clk); | ||
| 859 | |||
| 860 | return ret; | ||
| 861 | } | ||
| 862 | |||
| 863 | /** | ||
| 864 | * ep93xx_dma_free_chan_resources - release resources for the channel | ||
| 865 | * @chan: channel | ||
| 866 | * | ||
| 867 | * Function releases all the resources allocated for the given channel. | ||
| 868 | * The channel must be idle when this is called. | ||
| 869 | */ | ||
| 870 | static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) | ||
| 871 | { | ||
| 872 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 873 | struct ep93xx_dma_desc *desc, *d; | ||
| 874 | unsigned long flags; | ||
| 875 | LIST_HEAD(list); | ||
| 876 | |||
| 877 | BUG_ON(!list_empty(&edmac->active)); | ||
| 878 | BUG_ON(!list_empty(&edmac->queue)); | ||
| 879 | |||
| 880 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 881 | edmac->edma->hw_shutdown(edmac); | ||
| 882 | edmac->runtime_addr = 0; | ||
| 883 | edmac->runtime_ctrl = 0; | ||
| 884 | edmac->buffer = 0; | ||
| 885 | list_splice_init(&edmac->free_list, &list); | ||
| 886 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 887 | |||
| 888 | list_for_each_entry_safe(desc, d, &list, node) | ||
| 889 | kfree(desc); | ||
| 890 | |||
| 891 | clk_disable(edmac->clk); | ||
| 892 | free_irq(edmac->irq, edmac); | ||
| 893 | } | ||
| 894 | |||
| 895 | /** | ||
| 896 | * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation | ||
| 897 | * @chan: channel | ||
| 898 | * @dest: destination bus address | ||
| 899 | * @src: source bus address | ||
| 900 | * @len: size of the transaction | ||
| 901 | * @flags: flags for the descriptor | ||
| 902 | * | ||
| 903 | * Returns a valid DMA descriptor or %NULL in case of failure. | ||
| 904 | */ | ||
| 905 | static struct dma_async_tx_descriptor * | ||
| 906 | ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | ||
| 907 | dma_addr_t src, size_t len, unsigned long flags) | ||
| 908 | { | ||
| 909 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 910 | struct ep93xx_dma_desc *desc, *first; | ||
| 911 | size_t bytes, offset; | ||
| 912 | |||
| 913 | first = NULL; | ||
| 914 | for (offset = 0; offset < len; offset += bytes) { | ||
| 915 | desc = ep93xx_dma_desc_get(edmac); | ||
| 916 | if (!desc) { | ||
| 917 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | ||
| 918 | goto fail; | ||
| 919 | } | ||
| 920 | |||
| 921 | bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); | ||
| 922 | |||
| 923 | desc->src_addr = src + offset; | ||
| 924 | desc->dst_addr = dest + offset; | ||
| 925 | desc->size = bytes; | ||
| 926 | |||
| 927 | if (!first) | ||
| 928 | first = desc; | ||
| 929 | else | ||
| 930 | list_add_tail(&desc->node, &first->tx_list); | ||
| 931 | } | ||
| 932 | |||
| 933 | first->txd.cookie = -EBUSY; | ||
| 934 | first->txd.flags = flags; | ||
| 935 | |||
| 936 | return &first->txd; | ||
| 937 | fail: | ||
| 938 | ep93xx_dma_desc_put(edmac, first); | ||
| 939 | return NULL; | ||
| 940 | } | ||
| 941 | |||
| 942 | /** | ||
| 943 | * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation | ||
| 944 | * @chan: channel | ||
| 945 | * @sgl: list of buffers to transfer | ||
| 946 | * @sg_len: number of entries in @sgl | ||
| 947 | * @dir: direction of tha DMA transfer | ||
| 948 | * @flags: flags for the descriptor | ||
| 949 | * | ||
| 950 | * Returns a valid DMA descriptor or %NULL in case of failure. | ||
| 951 | */ | ||
| 952 | static struct dma_async_tx_descriptor * | ||
| 953 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
| 954 | unsigned int sg_len, enum dma_data_direction dir, | ||
| 955 | unsigned long flags) | ||
| 956 | { | ||
| 957 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 958 | struct ep93xx_dma_desc *desc, *first; | ||
| 959 | struct scatterlist *sg; | ||
| 960 | int i; | ||
| 961 | |||
| 962 | if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { | ||
| 963 | dev_warn(chan2dev(edmac), | ||
| 964 | "channel was configured with different direction\n"); | ||
| 965 | return NULL; | ||
| 966 | } | ||
| 967 | |||
| 968 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { | ||
| 969 | dev_warn(chan2dev(edmac), | ||
| 970 | "channel is already used for cyclic transfers\n"); | ||
| 971 | return NULL; | ||
| 972 | } | ||
| 973 | |||
| 974 | first = NULL; | ||
| 975 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 976 | size_t sg_len = sg_dma_len(sg); | ||
| 977 | |||
| 978 | if (sg_len > DMA_MAX_CHAN_BYTES) { | ||
| 979 | dev_warn(chan2dev(edmac), "too big transfer size %d\n", | ||
| 980 | sg_len); | ||
| 981 | goto fail; | ||
| 982 | } | ||
| 983 | |||
| 984 | desc = ep93xx_dma_desc_get(edmac); | ||
| 985 | if (!desc) { | ||
| 986 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | ||
| 987 | goto fail; | ||
| 988 | } | ||
| 989 | |||
| 990 | if (dir == DMA_TO_DEVICE) { | ||
| 991 | desc->src_addr = sg_dma_address(sg); | ||
| 992 | desc->dst_addr = edmac->runtime_addr; | ||
| 993 | } else { | ||
| 994 | desc->src_addr = edmac->runtime_addr; | ||
| 995 | desc->dst_addr = sg_dma_address(sg); | ||
| 996 | } | ||
| 997 | desc->size = sg_len; | ||
| 998 | |||
| 999 | if (!first) | ||
| 1000 | first = desc; | ||
| 1001 | else | ||
| 1002 | list_add_tail(&desc->node, &first->tx_list); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | first->txd.cookie = -EBUSY; | ||
| 1006 | first->txd.flags = flags; | ||
| 1007 | |||
| 1008 | return &first->txd; | ||
| 1009 | |||
| 1010 | fail: | ||
| 1011 | ep93xx_dma_desc_put(edmac, first); | ||
| 1012 | return NULL; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | /** | ||
| 1016 | * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation | ||
| 1017 | * @chan: channel | ||
| 1018 | * @dma_addr: DMA mapped address of the buffer | ||
| 1019 | * @buf_len: length of the buffer (in bytes) | ||
| 1020 | * @period_len: lenght of a single period | ||
| 1021 | * @dir: direction of the operation | ||
| 1022 | * | ||
| 1023 | * Prepares a descriptor for cyclic DMA operation. This means that once the | ||
| 1024 | * descriptor is submitted, we will be submitting in a @period_len sized | ||
| 1025 | * buffers and calling callback once the period has been elapsed. Transfer | ||
| 1026 | * terminates only when client calls dmaengine_terminate_all() for this | ||
| 1027 | * channel. | ||
| 1028 | * | ||
| 1029 | * Returns a valid DMA descriptor or %NULL in case of failure. | ||
| 1030 | */ | ||
| 1031 | static struct dma_async_tx_descriptor * | ||
| 1032 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | ||
| 1033 | size_t buf_len, size_t period_len, | ||
| 1034 | enum dma_data_direction dir) | ||
| 1035 | { | ||
| 1036 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 1037 | struct ep93xx_dma_desc *desc, *first; | ||
| 1038 | size_t offset = 0; | ||
| 1039 | |||
| 1040 | if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { | ||
| 1041 | dev_warn(chan2dev(edmac), | ||
| 1042 | "channel was configured with different direction\n"); | ||
| 1043 | return NULL; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { | ||
| 1047 | dev_warn(chan2dev(edmac), | ||
| 1048 | "channel is already used for cyclic transfers\n"); | ||
| 1049 | return NULL; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | if (period_len > DMA_MAX_CHAN_BYTES) { | ||
| 1053 | dev_warn(chan2dev(edmac), "too big period length %d\n", | ||
| 1054 | period_len); | ||
| 1055 | return NULL; | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | /* Split the buffer into period size chunks */ | ||
| 1059 | first = NULL; | ||
| 1060 | for (offset = 0; offset < buf_len; offset += period_len) { | ||
| 1061 | desc = ep93xx_dma_desc_get(edmac); | ||
| 1062 | if (!desc) { | ||
| 1063 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | ||
| 1064 | goto fail; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | if (dir == DMA_TO_DEVICE) { | ||
| 1068 | desc->src_addr = dma_addr + offset; | ||
| 1069 | desc->dst_addr = edmac->runtime_addr; | ||
| 1070 | } else { | ||
| 1071 | desc->src_addr = edmac->runtime_addr; | ||
| 1072 | desc->dst_addr = dma_addr + offset; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | desc->size = period_len; | ||
| 1076 | |||
| 1077 | if (!first) | ||
| 1078 | first = desc; | ||
| 1079 | else | ||
| 1080 | list_add_tail(&desc->node, &first->tx_list); | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | first->txd.cookie = -EBUSY; | ||
| 1084 | |||
| 1085 | return &first->txd; | ||
| 1086 | |||
| 1087 | fail: | ||
| 1088 | ep93xx_dma_desc_put(edmac, first); | ||
| 1089 | return NULL; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | /** | ||
| 1093 | * ep93xx_dma_terminate_all - terminate all transactions | ||
| 1094 | * @edmac: channel | ||
| 1095 | * | ||
| 1096 | * Stops all DMA transactions. All descriptors are put back to the | ||
| 1097 | * @edmac->free_list and callbacks are _not_ called. | ||
| 1098 | */ | ||
| 1099 | static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) | ||
| 1100 | { | ||
| 1101 | struct ep93xx_dma_desc *desc, *_d; | ||
| 1102 | unsigned long flags; | ||
| 1103 | LIST_HEAD(list); | ||
| 1104 | |||
| 1105 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 1106 | /* First we disable and flush the DMA channel */ | ||
| 1107 | edmac->edma->hw_shutdown(edmac); | ||
| 1108 | clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); | ||
| 1109 | list_splice_init(&edmac->active, &list); | ||
| 1110 | list_splice_init(&edmac->queue, &list); | ||
| 1111 | /* | ||
| 1112 | * We then re-enable the channel. This way we can continue submitting | ||
| 1113 | * the descriptors by just calling ->hw_submit() again. | ||
| 1114 | */ | ||
| 1115 | edmac->edma->hw_setup(edmac); | ||
| 1116 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 1117 | |||
| 1118 | list_for_each_entry_safe(desc, _d, &list, node) | ||
| 1119 | ep93xx_dma_desc_put(edmac, desc); | ||
| 1120 | |||
| 1121 | return 0; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | ||
| 1125 | struct dma_slave_config *config) | ||
| 1126 | { | ||
| 1127 | enum dma_slave_buswidth width; | ||
| 1128 | unsigned long flags; | ||
| 1129 | u32 addr, ctrl; | ||
| 1130 | |||
| 1131 | if (!edmac->edma->m2m) | ||
| 1132 | return -EINVAL; | ||
| 1133 | |||
| 1134 | switch (config->direction) { | ||
| 1135 | case DMA_FROM_DEVICE: | ||
| 1136 | width = config->src_addr_width; | ||
| 1137 | addr = config->src_addr; | ||
| 1138 | break; | ||
| 1139 | |||
| 1140 | case DMA_TO_DEVICE: | ||
| 1141 | width = config->dst_addr_width; | ||
| 1142 | addr = config->dst_addr; | ||
| 1143 | break; | ||
| 1144 | |||
| 1145 | default: | ||
| 1146 | return -EINVAL; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | switch (width) { | ||
| 1150 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 1151 | ctrl = 0; | ||
| 1152 | break; | ||
| 1153 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 1154 | ctrl = M2M_CONTROL_PW_16; | ||
| 1155 | break; | ||
| 1156 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 1157 | ctrl = M2M_CONTROL_PW_32; | ||
| 1158 | break; | ||
| 1159 | default: | ||
| 1160 | return -EINVAL; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 1164 | edmac->runtime_addr = addr; | ||
| 1165 | edmac->runtime_ctrl = ctrl; | ||
| 1166 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 1167 | |||
| 1168 | return 0; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | /** | ||
| 1172 | * ep93xx_dma_control - manipulate all pending operations on a channel | ||
| 1173 | * @chan: channel | ||
| 1174 | * @cmd: control command to perform | ||
| 1175 | * @arg: optional argument | ||
| 1176 | * | ||
| 1177 | * Controls the channel. Function returns %0 in case of success or negative | ||
| 1178 | * error in case of failure. | ||
| 1179 | */ | ||
| 1180 | static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 1181 | unsigned long arg) | ||
| 1182 | { | ||
| 1183 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 1184 | struct dma_slave_config *config; | ||
| 1185 | |||
| 1186 | switch (cmd) { | ||
| 1187 | case DMA_TERMINATE_ALL: | ||
| 1188 | return ep93xx_dma_terminate_all(edmac); | ||
| 1189 | |||
| 1190 | case DMA_SLAVE_CONFIG: | ||
| 1191 | config = (struct dma_slave_config *)arg; | ||
| 1192 | return ep93xx_dma_slave_config(edmac, config); | ||
| 1193 | |||
| 1194 | default: | ||
| 1195 | break; | ||
| 1196 | } | ||
| 1197 | |||
| 1198 | return -ENOSYS; | ||
| 1199 | } | ||
| 1200 | |||
| 1201 | /** | ||
| 1202 | * ep93xx_dma_tx_status - check if a transaction is completed | ||
| 1203 | * @chan: channel | ||
| 1204 | * @cookie: transaction specific cookie | ||
| 1205 | * @state: state of the transaction is stored here if given | ||
| 1206 | * | ||
| 1207 | * This function can be used to query state of a given transaction. | ||
| 1208 | */ | ||
| 1209 | static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, | ||
| 1210 | dma_cookie_t cookie, | ||
| 1211 | struct dma_tx_state *state) | ||
| 1212 | { | ||
| 1213 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 1214 | dma_cookie_t last_used, last_completed; | ||
| 1215 | enum dma_status ret; | ||
| 1216 | unsigned long flags; | ||
| 1217 | |||
| 1218 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 1219 | last_used = chan->cookie; | ||
| 1220 | last_completed = edmac->last_completed; | ||
| 1221 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 1222 | |||
| 1223 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
| 1224 | dma_set_tx_state(state, last_completed, last_used, 0); | ||
| 1225 | |||
| 1226 | return ret; | ||
| 1227 | } | ||
| 1228 | |||
| 1229 | /** | ||
| 1230 | * ep93xx_dma_issue_pending - push pending transactions to the hardware | ||
| 1231 | * @chan: channel | ||
| 1232 | * | ||
| 1233 | * When this function is called, all pending transactions are pushed to the | ||
| 1234 | * hardware and executed. | ||
| 1235 | */ | ||
| 1236 | static void ep93xx_dma_issue_pending(struct dma_chan *chan) | ||
| 1237 | { | ||
| 1238 | ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); | ||
| 1239 | } | ||
| 1240 | |||
| 1241 | static int __init ep93xx_dma_probe(struct platform_device *pdev) | ||
| 1242 | { | ||
| 1243 | struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); | ||
| 1244 | struct ep93xx_dma_engine *edma; | ||
| 1245 | struct dma_device *dma_dev; | ||
| 1246 | size_t edma_size; | ||
| 1247 | int ret, i; | ||
| 1248 | |||
| 1249 | edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); | ||
| 1250 | edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); | ||
| 1251 | if (!edma) | ||
| 1252 | return -ENOMEM; | ||
| 1253 | |||
| 1254 | dma_dev = &edma->dma_dev; | ||
| 1255 | edma->m2m = platform_get_device_id(pdev)->driver_data; | ||
| 1256 | edma->num_channels = pdata->num_channels; | ||
| 1257 | |||
| 1258 | INIT_LIST_HEAD(&dma_dev->channels); | ||
| 1259 | for (i = 0; i < pdata->num_channels; i++) { | ||
| 1260 | const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; | ||
| 1261 | struct ep93xx_dma_chan *edmac = &edma->channels[i]; | ||
| 1262 | |||
| 1263 | edmac->chan.device = dma_dev; | ||
| 1264 | edmac->regs = cdata->base; | ||
| 1265 | edmac->irq = cdata->irq; | ||
| 1266 | edmac->edma = edma; | ||
| 1267 | |||
| 1268 | edmac->clk = clk_get(NULL, cdata->name); | ||
| 1269 | if (IS_ERR(edmac->clk)) { | ||
| 1270 | dev_warn(&pdev->dev, "failed to get clock for %s\n", | ||
| 1271 | cdata->name); | ||
| 1272 | continue; | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | spin_lock_init(&edmac->lock); | ||
| 1276 | INIT_LIST_HEAD(&edmac->active); | ||
| 1277 | INIT_LIST_HEAD(&edmac->queue); | ||
| 1278 | INIT_LIST_HEAD(&edmac->free_list); | ||
| 1279 | tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, | ||
| 1280 | (unsigned long)edmac); | ||
| 1281 | |||
| 1282 | list_add_tail(&edmac->chan.device_node, | ||
| 1283 | &dma_dev->channels); | ||
| 1284 | } | ||
| 1285 | |||
| 1286 | dma_cap_zero(dma_dev->cap_mask); | ||
| 1287 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
| 1288 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); | ||
| 1289 | |||
| 1290 | dma_dev->dev = &pdev->dev; | ||
| 1291 | dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; | ||
| 1292 | dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; | ||
| 1293 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; | ||
| 1294 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; | ||
| 1295 | dma_dev->device_control = ep93xx_dma_control; | ||
| 1296 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; | ||
| 1297 | dma_dev->device_tx_status = ep93xx_dma_tx_status; | ||
| 1298 | |||
| 1299 | dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); | ||
| 1300 | |||
| 1301 | if (edma->m2m) { | ||
| 1302 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
| 1303 | dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; | ||
| 1304 | |||
| 1305 | edma->hw_setup = m2m_hw_setup; | ||
| 1306 | edma->hw_shutdown = m2m_hw_shutdown; | ||
| 1307 | edma->hw_submit = m2m_hw_submit; | ||
| 1308 | edma->hw_interrupt = m2m_hw_interrupt; | ||
| 1309 | } else { | ||
| 1310 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | ||
| 1311 | |||
| 1312 | edma->hw_setup = m2p_hw_setup; | ||
| 1313 | edma->hw_shutdown = m2p_hw_shutdown; | ||
| 1314 | edma->hw_submit = m2p_hw_submit; | ||
| 1315 | edma->hw_interrupt = m2p_hw_interrupt; | ||
| 1316 | } | ||
| 1317 | |||
| 1318 | ret = dma_async_device_register(dma_dev); | ||
| 1319 | if (unlikely(ret)) { | ||
| 1320 | for (i = 0; i < edma->num_channels; i++) { | ||
| 1321 | struct ep93xx_dma_chan *edmac = &edma->channels[i]; | ||
| 1322 | if (!IS_ERR_OR_NULL(edmac->clk)) | ||
| 1323 | clk_put(edmac->clk); | ||
| 1324 | } | ||
| 1325 | kfree(edma); | ||
| 1326 | } else { | ||
| 1327 | dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", | ||
| 1328 | edma->m2m ? "M" : "P"); | ||
| 1329 | } | ||
| 1330 | |||
| 1331 | return ret; | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | static struct platform_device_id ep93xx_dma_driver_ids[] = { | ||
| 1335 | { "ep93xx-dma-m2p", 0 }, | ||
| 1336 | { "ep93xx-dma-m2m", 1 }, | ||
| 1337 | { }, | ||
| 1338 | }; | ||
| 1339 | |||
| 1340 | static struct platform_driver ep93xx_dma_driver = { | ||
| 1341 | .driver = { | ||
| 1342 | .name = "ep93xx-dma", | ||
| 1343 | }, | ||
| 1344 | .id_table = ep93xx_dma_driver_ids, | ||
| 1345 | }; | ||
| 1346 | |||
| 1347 | static int __init ep93xx_dma_module_init(void) | ||
| 1348 | { | ||
| 1349 | return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); | ||
| 1350 | } | ||
| 1351 | subsys_initcall(ep93xx_dma_module_init); | ||
| 1352 | |||
| 1353 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); | ||
| 1354 | MODULE_DESCRIPTION("EP93xx DMA driver"); | ||
| 1355 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b6d1455fa936..ec53980f8fcf 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -1281,8 +1281,10 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
| 1281 | goto err_request_irq; | 1281 | goto err_request_irq; |
| 1282 | 1282 | ||
| 1283 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); | 1283 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); |
| 1284 | if (!sdma->script_addrs) | 1284 | if (!sdma->script_addrs) { |
| 1285 | ret = -ENOMEM; | ||
| 1285 | goto err_alloc; | 1286 | goto err_alloc; |
| 1287 | } | ||
| 1286 | 1288 | ||
| 1287 | sdma->version = pdata->sdma_version; | 1289 | sdma->version = pdata->sdma_version; |
| 1288 | 1290 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index f653517ef744..8a3fdd87db97 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
| @@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) | |||
| 1351 | return -EAGAIN; | 1351 | return -EAGAIN; |
| 1352 | } | 1352 | } |
| 1353 | device->state = SUSPENDED; | 1353 | device->state = SUSPENDED; |
| 1354 | pci_set_drvdata(pci, device); | ||
| 1355 | pci_save_state(pci); | 1354 | pci_save_state(pci); |
| 1356 | pci_disable_device(pci); | 1355 | pci_disable_device(pci); |
| 1357 | pci_set_power_state(pci, PCI_D3hot); | 1356 | pci_set_power_state(pci, PCI_D3hot); |
| @@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci) | |||
| 1380 | } | 1379 | } |
| 1381 | device->state = RUNNING; | 1380 | device->state = RUNNING; |
| 1382 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | 1381 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); |
| 1383 | pci_set_drvdata(pci, device); | ||
| 1384 | return 0; | 1382 | return 0; |
| 1385 | } | 1383 | } |
| 1386 | 1384 | ||
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index c1a125e7d1df..25447a8ca282 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
| @@ -1705,16 +1705,14 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
| 1705 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); | 1705 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); |
| 1706 | 1706 | ||
| 1707 | /* Remap IPU common registers */ | 1707 | /* Remap IPU common registers */ |
| 1708 | ipu_data.reg_ipu = ioremap(mem_ipu->start, | 1708 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); |
| 1709 | mem_ipu->end - mem_ipu->start + 1); | ||
| 1710 | if (!ipu_data.reg_ipu) { | 1709 | if (!ipu_data.reg_ipu) { |
| 1711 | ret = -ENOMEM; | 1710 | ret = -ENOMEM; |
| 1712 | goto err_ioremap_ipu; | 1711 | goto err_ioremap_ipu; |
| 1713 | } | 1712 | } |
| 1714 | 1713 | ||
| 1715 | /* Remap Image Converter and Image DMA Controller registers */ | 1714 | /* Remap Image Converter and Image DMA Controller registers */ |
| 1716 | ipu_data.reg_ic = ioremap(mem_ic->start, | 1715 | ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); |
| 1717 | mem_ic->end - mem_ic->start + 1); | ||
| 1718 | if (!ipu_data.reg_ic) { | 1716 | if (!ipu_data.reg_ic) { |
| 1719 | ret = -ENOMEM; | 1717 | ret = -ENOMEM; |
| 1720 | goto err_ioremap_ic; | 1718 | goto err_ioremap_ic; |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 954e334e01bb..9a353c2216d0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -1305,7 +1305,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev) | |||
| 1305 | return -ENODEV; | 1305 | return -ENODEV; |
| 1306 | 1306 | ||
| 1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, | 1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, |
| 1308 | res->end - res->start + 1); | 1308 | resource_size(res)); |
| 1309 | if (!msp->xor_base) | 1309 | if (!msp->xor_base) |
| 1310 | return -EBUSY; | 1310 | return -EBUSY; |
| 1311 | 1311 | ||
| @@ -1314,7 +1314,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev) | |||
| 1314 | return -ENODEV; | 1314 | return -ENODEV; |
| 1315 | 1315 | ||
| 1316 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, | 1316 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
| 1317 | res->end - res->start + 1); | 1317 | resource_size(res)); |
| 1318 | if (!msp->xor_high_base) | 1318 | if (!msp->xor_high_base) |
| 1319 | return -EBUSY; | 1319 | return -EBUSY; |
| 1320 | 1320 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 88aad4f54002..be641cbd36fc 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
| @@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 327 | 327 | ||
| 328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | 328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); |
| 329 | 329 | ||
| 330 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 330 | if (mxs_chan->chan_irq != NO_IRQ) { |
| 331 | 0, "mxs-dma", mxs_dma); | 331 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
| 332 | if (ret) | 332 | 0, "mxs-dma", mxs_dma); |
| 333 | goto err_irq; | 333 | if (ret) |
| 334 | goto err_irq; | ||
| 335 | } | ||
| 334 | 336 | ||
| 335 | ret = clk_enable(mxs_dma->clk); | 337 | ret = clk_enable(mxs_dma->clk); |
| 336 | if (ret) | 338 | if (ret) |
| @@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 535 | switch (cmd) { | 537 | switch (cmd) { |
| 536 | case DMA_TERMINATE_ALL: | 538 | case DMA_TERMINATE_ALL: |
| 537 | mxs_dma_disable_chan(mxs_chan); | 539 | mxs_dma_disable_chan(mxs_chan); |
| 540 | mxs_dma_reset_chan(mxs_chan); | ||
| 538 | break; | 541 | break; |
| 539 | case DMA_PAUSE: | 542 | case DMA_PAUSE: |
| 540 | mxs_dma_pause_chan(mxs_chan); | 543 | mxs_dma_pause_chan(mxs_chan); |
| @@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = { | |||
| 707 | }, { | 710 | }, { |
| 708 | .name = "mxs-dma-apbx", | 711 | .name = "mxs-dma-apbx", |
| 709 | .driver_data = MXS_DMA_APBX, | 712 | .driver_data = MXS_DMA_APBX, |
| 713 | }, { | ||
| 714 | /* end of list */ | ||
| 710 | } | 715 | } |
| 711 | }; | 716 | }; |
| 712 | 717 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ff5b38f9d45b..1ac8d4b580b7 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
| @@ -45,7 +45,8 @@ | |||
| 45 | #define DMA_STATUS_MASK_BITS 0x3 | 45 | #define DMA_STATUS_MASK_BITS 0x3 |
| 46 | #define DMA_STATUS_SHIFT_BITS 16 | 46 | #define DMA_STATUS_SHIFT_BITS 16 |
| 47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | 47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) |
| 48 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) | 48 | #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) |
| 49 | #define DMA_STATUS2_ERR(x) (0x1 << (x)) | ||
| 49 | 50 | ||
| 50 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | 51 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 |
| 51 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | 52 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) |
| @@ -61,6 +62,9 @@ | |||
| 61 | 62 | ||
| 62 | #define MAX_CHAN_NR 8 | 63 | #define MAX_CHAN_NR 8 |
| 63 | 64 | ||
| 65 | #define DMA_MASK_CTL0_MODE 0x33333333 | ||
| 66 | #define DMA_MASK_CTL2_MODE 0x00003333 | ||
| 67 | |||
| 64 | static unsigned int init_nr_desc_per_channel = 64; | 68 | static unsigned int init_nr_desc_per_channel = 64; |
| 65 | module_param(init_nr_desc_per_channel, uint, 0644); | 69 | module_param(init_nr_desc_per_channel, uint, 0644); |
| 66 | MODULE_PARM_DESC(init_nr_desc_per_channel, | 70 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
| @@ -133,6 +137,7 @@ struct pch_dma { | |||
| 133 | #define PCH_DMA_CTL3 0x0C | 137 | #define PCH_DMA_CTL3 0x0C |
| 134 | #define PCH_DMA_STS0 0x10 | 138 | #define PCH_DMA_STS0 0x10 |
| 135 | #define PCH_DMA_STS1 0x14 | 139 | #define PCH_DMA_STS1 0x14 |
| 140 | #define PCH_DMA_STS2 0x18 | ||
| 136 | 141 | ||
| 137 | #define dma_readl(pd, name) \ | 142 | #define dma_readl(pd, name) \ |
| 138 | readl((pd)->membase + PCH_DMA_##name) | 143 | readl((pd)->membase + PCH_DMA_##name) |
| @@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable) | |||
| 183 | { | 188 | { |
| 184 | struct pch_dma *pd = to_pd(chan->device); | 189 | struct pch_dma *pd = to_pd(chan->device); |
| 185 | u32 val; | 190 | u32 val; |
| 191 | int pos; | ||
| 192 | |||
| 193 | if (chan->chan_id < 8) | ||
| 194 | pos = chan->chan_id; | ||
| 195 | else | ||
| 196 | pos = chan->chan_id + 8; | ||
| 186 | 197 | ||
| 187 | val = dma_readl(pd, CTL2); | 198 | val = dma_readl(pd, CTL2); |
| 188 | 199 | ||
| 189 | if (enable) | 200 | if (enable) |
| 190 | val |= 0x1 << chan->chan_id; | 201 | val |= 0x1 << pos; |
| 191 | else | 202 | else |
| 192 | val &= ~(0x1 << chan->chan_id); | 203 | val &= ~(0x1 << pos); |
| 193 | 204 | ||
| 194 | dma_writel(pd, CTL2, val); | 205 | dma_writel(pd, CTL2, val); |
| 195 | 206 | ||
| @@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
| 202 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 213 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 203 | struct pch_dma *pd = to_pd(chan->device); | 214 | struct pch_dma *pd = to_pd(chan->device); |
| 204 | u32 val; | 215 | u32 val; |
| 216 | u32 mask_mode; | ||
| 217 | u32 mask_ctl; | ||
| 205 | 218 | ||
| 206 | if (chan->chan_id < 8) { | 219 | if (chan->chan_id < 8) { |
| 207 | val = dma_readl(pd, CTL0); | 220 | val = dma_readl(pd, CTL0); |
| 208 | 221 | ||
| 222 | mask_mode = DMA_CTL0_MODE_MASK_BITS << | ||
| 223 | (DMA_CTL0_BITS_PER_CH * chan->chan_id); | ||
| 224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
| 225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
| 226 | val &= mask_mode; | ||
| 209 | if (pd_chan->dir == DMA_TO_DEVICE) | 227 | if (pd_chan->dir == DMA_TO_DEVICE) |
| 210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
| 211 | DMA_CTL0_DIR_SHIFT_BITS); | 229 | DMA_CTL0_DIR_SHIFT_BITS); |
| @@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
| 213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 231 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
| 214 | DMA_CTL0_DIR_SHIFT_BITS)); | 232 | DMA_CTL0_DIR_SHIFT_BITS)); |
| 215 | 233 | ||
| 234 | val |= mask_ctl; | ||
| 216 | dma_writel(pd, CTL0, val); | 235 | dma_writel(pd, CTL0, val); |
| 217 | } else { | 236 | } else { |
| 218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 237 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
| 219 | val = dma_readl(pd, CTL3); | 238 | val = dma_readl(pd, CTL3); |
| 220 | 239 | ||
| 240 | mask_mode = DMA_CTL0_MODE_MASK_BITS << | ||
| 241 | (DMA_CTL0_BITS_PER_CH * ch); | ||
| 242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
| 243 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
| 244 | val &= mask_mode; | ||
| 221 | if (pd_chan->dir == DMA_TO_DEVICE) | 245 | if (pd_chan->dir == DMA_TO_DEVICE) |
| 222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
| 223 | DMA_CTL0_DIR_SHIFT_BITS); | 247 | DMA_CTL0_DIR_SHIFT_BITS); |
| 224 | else | 248 | else |
| 225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 249 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
| 226 | DMA_CTL0_DIR_SHIFT_BITS)); | 250 | DMA_CTL0_DIR_SHIFT_BITS)); |
| 227 | 251 | val |= mask_ctl; | |
| 228 | dma_writel(pd, CTL3, val); | 252 | dma_writel(pd, CTL3, val); |
| 229 | } | 253 | } |
| 230 | 254 | ||
| @@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |||
| 236 | { | 260 | { |
| 237 | struct pch_dma *pd = to_pd(chan->device); | 261 | struct pch_dma *pd = to_pd(chan->device); |
| 238 | u32 val; | 262 | u32 val; |
| 263 | u32 mask_ctl; | ||
| 264 | u32 mask_dir; | ||
| 239 | 265 | ||
| 240 | if (chan->chan_id < 8) { | 266 | if (chan->chan_id < 8) { |
| 267 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
| 268 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
| 269 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ | ||
| 270 | DMA_CTL0_DIR_SHIFT_BITS); | ||
| 241 | val = dma_readl(pd, CTL0); | 271 | val = dma_readl(pd, CTL0); |
| 242 | 272 | val &= mask_dir; | |
| 243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
| 244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
| 245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | 273 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
| 246 | 274 | val |= mask_ctl; | |
| 247 | dma_writel(pd, CTL0, val); | 275 | dma_writel(pd, CTL0, val); |
| 248 | } else { | 276 | } else { |
| 249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 277 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
| 250 | 278 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | |
| 279 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
| 280 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ | ||
| 281 | DMA_CTL0_DIR_SHIFT_BITS); | ||
| 251 | val = dma_readl(pd, CTL3); | 282 | val = dma_readl(pd, CTL3); |
| 252 | 283 | val &= mask_dir; | |
| 253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
| 254 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
| 255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | 284 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); |
| 256 | 285 | val |= mask_ctl; | |
| 257 | dma_writel(pd, CTL3, val); | 286 | dma_writel(pd, CTL3, val); |
| 258 | |||
| 259 | } | 287 | } |
| 260 | 288 | ||
| 261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | 289 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", |
| 262 | chan->chan_id, val); | 290 | chan->chan_id, val); |
| 263 | } | 291 | } |
| 264 | 292 | ||
| 265 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | 293 | static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) |
| 266 | { | 294 | { |
| 267 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | 295 | struct pch_dma *pd = to_pd(pd_chan->chan.device); |
| 268 | u32 val; | 296 | u32 val; |
| @@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | |||
| 272 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | 300 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); |
| 273 | } | 301 | } |
| 274 | 302 | ||
| 303 | static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) | ||
| 304 | { | ||
| 305 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | ||
| 306 | u32 val; | ||
| 307 | |||
| 308 | val = dma_readl(pd, STS2); | ||
| 309 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | ||
| 310 | DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); | ||
| 311 | } | ||
| 312 | |||
| 275 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | 313 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
| 276 | { | 314 | { |
| 277 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) | 315 | u32 sts; |
| 316 | |||
| 317 | if (pd_chan->chan.chan_id < 8) | ||
| 318 | sts = pdc_get_status0(pd_chan); | ||
| 319 | else | ||
| 320 | sts = pdc_get_status2(pd_chan); | ||
| 321 | |||
| 322 | |||
| 323 | if (sts == DMA_STATUS_IDLE) | ||
| 278 | return true; | 324 | return true; |
| 279 | else | 325 | else |
| 280 | return false; | 326 | return false; |
| @@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
| 495 | list_add_tail(&desc->desc_node, &tmp_list); | 541 | list_add_tail(&desc->desc_node, &tmp_list); |
| 496 | } | 542 | } |
| 497 | 543 | ||
| 498 | spin_lock_bh(&pd_chan->lock); | 544 | spin_lock_irq(&pd_chan->lock); |
| 499 | list_splice(&tmp_list, &pd_chan->free_list); | 545 | list_splice(&tmp_list, &pd_chan->free_list); |
| 500 | pd_chan->descs_allocated = i; | 546 | pd_chan->descs_allocated = i; |
| 501 | pd_chan->completed_cookie = chan->cookie = 1; | 547 | pd_chan->completed_cookie = chan->cookie = 1; |
| 502 | spin_unlock_bh(&pd_chan->lock); | 548 | spin_unlock_irq(&pd_chan->lock); |
| 503 | 549 | ||
| 504 | pdc_enable_irq(chan, 1); | 550 | pdc_enable_irq(chan, 1); |
| 505 | 551 | ||
| @@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan) | |||
| 517 | BUG_ON(!list_empty(&pd_chan->active_list)); | 563 | BUG_ON(!list_empty(&pd_chan->active_list)); |
| 518 | BUG_ON(!list_empty(&pd_chan->queue)); | 564 | BUG_ON(!list_empty(&pd_chan->queue)); |
| 519 | 565 | ||
| 520 | spin_lock_bh(&pd_chan->lock); | 566 | spin_lock_irq(&pd_chan->lock); |
| 521 | list_splice_init(&pd_chan->free_list, &tmp_list); | 567 | list_splice_init(&pd_chan->free_list, &tmp_list); |
| 522 | pd_chan->descs_allocated = 0; | 568 | pd_chan->descs_allocated = 0; |
| 523 | spin_unlock_bh(&pd_chan->lock); | 569 | spin_unlock_irq(&pd_chan->lock); |
| 524 | 570 | ||
| 525 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | 571 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
| 526 | pci_pool_free(pd->pool, desc, desc->txd.phys); | 572 | pci_pool_free(pd->pool, desc, desc->txd.phys); |
| @@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 536 | dma_cookie_t last_completed; | 582 | dma_cookie_t last_completed; |
| 537 | int ret; | 583 | int ret; |
| 538 | 584 | ||
| 539 | spin_lock_bh(&pd_chan->lock); | 585 | spin_lock_irq(&pd_chan->lock); |
| 540 | last_completed = pd_chan->completed_cookie; | 586 | last_completed = pd_chan->completed_cookie; |
| 541 | last_used = chan->cookie; | 587 | last_used = chan->cookie; |
| 542 | spin_unlock_bh(&pd_chan->lock); | 588 | spin_unlock_irq(&pd_chan->lock); |
| 543 | 589 | ||
| 544 | ret = dma_async_is_complete(cookie, last_completed, last_used); | 590 | ret = dma_async_is_complete(cookie, last_completed, last_used); |
| 545 | 591 | ||
| @@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 654 | if (cmd != DMA_TERMINATE_ALL) | 700 | if (cmd != DMA_TERMINATE_ALL) |
| 655 | return -ENXIO; | 701 | return -ENXIO; |
| 656 | 702 | ||
| 657 | spin_lock_bh(&pd_chan->lock); | 703 | spin_lock_irq(&pd_chan->lock); |
| 658 | 704 | ||
| 659 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | 705 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); |
| 660 | 706 | ||
| @@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 664 | list_for_each_entry_safe(desc, _d, &list, desc_node) | 710 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
| 665 | pdc_chain_complete(pd_chan, desc); | 711 | pdc_chain_complete(pd_chan, desc); |
| 666 | 712 | ||
| 667 | spin_unlock_bh(&pd_chan->lock); | 713 | spin_unlock_irq(&pd_chan->lock); |
| 668 | 714 | ||
| 669 | return 0; | 715 | return 0; |
| 670 | } | 716 | } |
| @@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid) | |||
| 693 | struct pch_dma *pd = (struct pch_dma *)devid; | 739 | struct pch_dma *pd = (struct pch_dma *)devid; |
| 694 | struct pch_dma_chan *pd_chan; | 740 | struct pch_dma_chan *pd_chan; |
| 695 | u32 sts0; | 741 | u32 sts0; |
| 742 | u32 sts2; | ||
| 696 | int i; | 743 | int i; |
| 697 | int ret = IRQ_NONE; | 744 | int ret0 = IRQ_NONE; |
| 745 | int ret2 = IRQ_NONE; | ||
| 698 | 746 | ||
| 699 | sts0 = dma_readl(pd, STS0); | 747 | sts0 = dma_readl(pd, STS0); |
| 748 | sts2 = dma_readl(pd, STS2); | ||
| 700 | 749 | ||
| 701 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | 750 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); |
| 702 | 751 | ||
| 703 | for (i = 0; i < pd->dma.chancnt; i++) { | 752 | for (i = 0; i < pd->dma.chancnt; i++) { |
| 704 | pd_chan = &pd->channels[i]; | 753 | pd_chan = &pd->channels[i]; |
| 705 | 754 | ||
| 706 | if (sts0 & DMA_STATUS_IRQ(i)) { | 755 | if (i < 8) { |
| 707 | if (sts0 & DMA_STATUS_ERR(i)) | 756 | if (sts0 & DMA_STATUS_IRQ(i)) { |
| 708 | set_bit(0, &pd_chan->err_status); | 757 | if (sts0 & DMA_STATUS0_ERR(i)) |
| 758 | set_bit(0, &pd_chan->err_status); | ||
| 709 | 759 | ||
| 710 | tasklet_schedule(&pd_chan->tasklet); | 760 | tasklet_schedule(&pd_chan->tasklet); |
| 711 | ret = IRQ_HANDLED; | 761 | ret0 = IRQ_HANDLED; |
| 712 | } | 762 | } |
| 763 | } else { | ||
| 764 | if (sts2 & DMA_STATUS_IRQ(i - 8)) { | ||
| 765 | if (sts2 & DMA_STATUS2_ERR(i)) | ||
| 766 | set_bit(0, &pd_chan->err_status); | ||
| 713 | 767 | ||
| 768 | tasklet_schedule(&pd_chan->tasklet); | ||
| 769 | ret2 = IRQ_HANDLED; | ||
| 770 | } | ||
| 771 | } | ||
| 714 | } | 772 | } |
| 715 | 773 | ||
| 716 | /* clear interrupt bits in status register */ | 774 | /* clear interrupt bits in status register */ |
| 717 | dma_writel(pd, STS0, sts0); | 775 | if (ret0) |
| 776 | dma_writel(pd, STS0, sts0); | ||
| 777 | if (ret2) | ||
| 778 | dma_writel(pd, STS2, sts2); | ||
| 718 | 779 | ||
| 719 | return ret; | 780 | return ret0 | ret2; |
| 720 | } | 781 | } |
| 721 | 782 | ||
| 722 | #ifdef CONFIG_PM | 783 | #ifdef CONFIG_PM |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6abe1ec1f2ce..00eee59e8b33 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -82,7 +82,7 @@ struct dma_pl330_dmac { | |||
| 82 | spinlock_t pool_lock; | 82 | spinlock_t pool_lock; |
| 83 | 83 | ||
| 84 | /* Peripheral channels connected to this DMAC */ | 84 | /* Peripheral channels connected to this DMAC */ |
| 85 | struct dma_pl330_chan peripherals[0]; /* keep at end */ | 85 | struct dma_pl330_chan *peripherals; /* keep at end */ |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | struct dma_pl330_desc { | 88 | struct dma_pl330_desc { |
| @@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
| 451 | desc->txd.cookie = 0; | 451 | desc->txd.cookie = 0; |
| 452 | async_tx_ack(&desc->txd); | 452 | async_tx_ack(&desc->txd); |
| 453 | 453 | ||
| 454 | desc->req.rqtype = peri->rqtype; | 454 | if (peri) { |
| 455 | desc->req.peri = peri->peri_id; | 455 | desc->req.rqtype = peri->rqtype; |
| 456 | desc->req.peri = peri->peri_id; | ||
| 457 | } else { | ||
| 458 | desc->req.rqtype = MEMTOMEM; | ||
| 459 | desc->req.peri = 0; | ||
| 460 | } | ||
| 456 | 461 | ||
| 457 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 462 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
| 458 | 463 | ||
| @@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
| 529 | struct pl330_info *pi; | 534 | struct pl330_info *pi; |
| 530 | int burst; | 535 | int burst; |
| 531 | 536 | ||
| 532 | if (unlikely(!pch || !len || !peri)) | 537 | if (unlikely(!pch || !len)) |
| 533 | return NULL; | 538 | return NULL; |
| 534 | 539 | ||
| 535 | if (peri->rqtype != MEMTOMEM) | 540 | if (peri && peri->rqtype != MEMTOMEM) |
| 536 | return NULL; | 541 | return NULL; |
| 537 | 542 | ||
| 538 | pi = &pch->dmac->pif; | 543 | pi = &pch->dmac->pif; |
| @@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 577 | int i, burst_size; | 582 | int i, burst_size; |
| 578 | dma_addr_t addr; | 583 | dma_addr_t addr; |
| 579 | 584 | ||
| 580 | if (unlikely(!pch || !sgl || !sg_len)) | 585 | if (unlikely(!pch || !sgl || !sg_len || !peri)) |
| 581 | return NULL; | 586 | return NULL; |
| 582 | 587 | ||
| 583 | /* Make sure the direction is consistent */ | 588 | /* Make sure the direction is consistent */ |
| @@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 666 | struct dma_device *pd; | 671 | struct dma_device *pd; |
| 667 | struct resource *res; | 672 | struct resource *res; |
| 668 | int i, ret, irq; | 673 | int i, ret, irq; |
| 674 | int num_chan; | ||
| 669 | 675 | ||
| 670 | pdat = adev->dev.platform_data; | 676 | pdat = adev->dev.platform_data; |
| 671 | 677 | ||
| 672 | if (!pdat || !pdat->nr_valid_peri) { | ||
| 673 | dev_err(&adev->dev, "platform data missing\n"); | ||
| 674 | return -ENODEV; | ||
| 675 | } | ||
| 676 | |||
| 677 | /* Allocate a new DMAC and its Channels */ | 678 | /* Allocate a new DMAC and its Channels */ |
| 678 | pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) | 679 | pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); |
| 679 | + sizeof(*pdmac), GFP_KERNEL); | ||
| 680 | if (!pdmac) { | 680 | if (!pdmac) { |
| 681 | dev_err(&adev->dev, "unable to allocate mem\n"); | 681 | dev_err(&adev->dev, "unable to allocate mem\n"); |
| 682 | return -ENOMEM; | 682 | return -ENOMEM; |
| @@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 685 | pi = &pdmac->pif; | 685 | pi = &pdmac->pif; |
| 686 | pi->dev = &adev->dev; | 686 | pi->dev = &adev->dev; |
| 687 | pi->pl330_data = NULL; | 687 | pi->pl330_data = NULL; |
| 688 | pi->mcbufsz = pdat->mcbuf_sz; | 688 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
| 689 | 689 | ||
| 690 | res = &adev->res; | 690 | res = &adev->res; |
| 691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); | 691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); |
| @@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 717 | INIT_LIST_HEAD(&pd->channels); | 717 | INIT_LIST_HEAD(&pd->channels); |
| 718 | 718 | ||
| 719 | /* Initialize channel parameters */ | 719 | /* Initialize channel parameters */ |
| 720 | for (i = 0; i < pdat->nr_valid_peri; i++) { | 720 | num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); |
| 721 | struct dma_pl330_peri *peri = &pdat->peri[i]; | 721 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
| 722 | pch = &pdmac->peripherals[i]; | ||
| 723 | 722 | ||
| 724 | switch (peri->rqtype) { | 723 | for (i = 0; i < num_chan; i++) { |
| 725 | case MEMTOMEM: | 724 | pch = &pdmac->peripherals[i]; |
| 725 | if (pdat) { | ||
| 726 | struct dma_pl330_peri *peri = &pdat->peri[i]; | ||
| 727 | |||
| 728 | switch (peri->rqtype) { | ||
| 729 | case MEMTOMEM: | ||
| 730 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
| 731 | break; | ||
| 732 | case MEMTODEV: | ||
| 733 | case DEVTOMEM: | ||
| 734 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
| 735 | break; | ||
| 736 | default: | ||
| 737 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
| 738 | continue; | ||
| 739 | } | ||
| 740 | pch->chan.private = peri; | ||
| 741 | } else { | ||
| 726 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | 742 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
| 727 | break; | 743 | pch->chan.private = NULL; |
| 728 | case MEMTODEV: | ||
| 729 | case DEVTOMEM: | ||
| 730 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
| 731 | break; | ||
| 732 | default: | ||
| 733 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
| 734 | continue; | ||
| 735 | } | 744 | } |
| 736 | 745 | ||
| 737 | INIT_LIST_HEAD(&pch->work_list); | 746 | INIT_LIST_HEAD(&pch->work_list); |
| 738 | spin_lock_init(&pch->lock); | 747 | spin_lock_init(&pch->lock); |
| 739 | pch->pl330_chid = NULL; | 748 | pch->pl330_chid = NULL; |
| 740 | pch->chan.private = peri; | ||
| 741 | pch->chan.device = pd; | 749 | pch->chan.device = pd; |
| 742 | pch->chan.chan_id = i; | 750 | pch->chan.chan_id = i; |
| 743 | pch->dmac = pdmac; | 751 | pch->dmac = pdmac; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8f222d4db7de..75ba5865d7a4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 16 | #include <linux/amba/bus.h> | ||
| 16 | 17 | ||
| 17 | #include <plat/ste_dma40.h> | 18 | #include <plat/ste_dma40.h> |
| 18 | 19 | ||
| @@ -44,9 +45,6 @@ | |||
| 44 | #define D40_ALLOC_PHY (1 << 30) | 45 | #define D40_ALLOC_PHY (1 << 30) |
| 45 | #define D40_ALLOC_LOG_FREE 0 | 46 | #define D40_ALLOC_LOG_FREE 0 |
| 46 | 47 | ||
| 47 | /* Hardware designer of the block */ | ||
| 48 | #define D40_HW_DESIGNER 0x8 | ||
| 49 | |||
| 50 | /** | 48 | /** |
| 51 | * enum 40_command - The different commands and/or statuses. | 49 | * enum 40_command - The different commands and/or statuses. |
| 52 | * | 50 | * |
| @@ -185,6 +183,8 @@ struct d40_base; | |||
| 185 | * @log_def: Default logical channel settings. | 183 | * @log_def: Default logical channel settings. |
| 186 | * @lcla: Space for one dst src pair for logical channel transfers. | 184 | * @lcla: Space for one dst src pair for logical channel transfers. |
| 187 | * @lcpa: Pointer to dst and src lcpa settings. | 185 | * @lcpa: Pointer to dst and src lcpa settings. |
| 186 | * @runtime_addr: runtime configured address. | ||
| 187 | * @runtime_direction: runtime configured direction. | ||
| 188 | * | 188 | * |
| 189 | * This struct can either "be" a logical or a physical channel. | 189 | * This struct can either "be" a logical or a physical channel. |
| 190 | */ | 190 | */ |
| @@ -199,6 +199,7 @@ struct d40_chan { | |||
| 199 | struct dma_chan chan; | 199 | struct dma_chan chan; |
| 200 | struct tasklet_struct tasklet; | 200 | struct tasklet_struct tasklet; |
| 201 | struct list_head client; | 201 | struct list_head client; |
| 202 | struct list_head pending_queue; | ||
| 202 | struct list_head active; | 203 | struct list_head active; |
| 203 | struct list_head queue; | 204 | struct list_head queue; |
| 204 | struct stedma40_chan_cfg dma_cfg; | 205 | struct stedma40_chan_cfg dma_cfg; |
| @@ -644,7 +645,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | |||
| 644 | 645 | ||
| 645 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 646 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
| 646 | { | 647 | { |
| 647 | list_add_tail(&desc->node, &d40c->queue); | 648 | list_add_tail(&desc->node, &d40c->pending_queue); |
| 649 | } | ||
| 650 | |||
| 651 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) | ||
| 652 | { | ||
| 653 | struct d40_desc *d; | ||
| 654 | |||
| 655 | if (list_empty(&d40c->pending_queue)) | ||
| 656 | return NULL; | ||
| 657 | |||
| 658 | d = list_first_entry(&d40c->pending_queue, | ||
| 659 | struct d40_desc, | ||
| 660 | node); | ||
| 661 | return d; | ||
| 648 | } | 662 | } |
| 649 | 663 | ||
| 650 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | 664 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) |
| @@ -801,6 +815,11 @@ static void d40_term_all(struct d40_chan *d40c) | |||
| 801 | d40_desc_free(d40c, d40d); | 815 | d40_desc_free(d40c, d40d); |
| 802 | } | 816 | } |
| 803 | 817 | ||
| 818 | /* Release pending descriptors */ | ||
| 819 | while ((d40d = d40_first_pending(d40c))) { | ||
| 820 | d40_desc_remove(d40d); | ||
| 821 | d40_desc_free(d40c, d40d); | ||
| 822 | } | ||
| 804 | 823 | ||
| 805 | d40c->pending_tx = 0; | 824 | d40c->pending_tx = 0; |
| 806 | d40c->busy = false; | 825 | d40c->busy = false; |
| @@ -2091,7 +2110,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
| 2091 | struct scatterlist *sg; | 2110 | struct scatterlist *sg; |
| 2092 | int i; | 2111 | int i; |
| 2093 | 2112 | ||
| 2094 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); | 2113 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
| 2095 | for (i = 0; i < periods; i++) { | 2114 | for (i = 0; i < periods; i++) { |
| 2096 | sg_dma_address(&sg[i]) = dma_addr; | 2115 | sg_dma_address(&sg[i]) = dma_addr; |
| 2097 | sg_dma_len(&sg[i]) = period_len; | 2116 | sg_dma_len(&sg[i]) = period_len; |
| @@ -2151,24 +2170,87 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
| 2151 | 2170 | ||
| 2152 | spin_lock_irqsave(&d40c->lock, flags); | 2171 | spin_lock_irqsave(&d40c->lock, flags); |
| 2153 | 2172 | ||
| 2154 | /* Busy means that pending jobs are already being processed */ | 2173 | list_splice_tail_init(&d40c->pending_queue, &d40c->queue); |
| 2174 | |||
| 2175 | /* Busy means that queued jobs are already being processed */ | ||
| 2155 | if (!d40c->busy) | 2176 | if (!d40c->busy) |
| 2156 | (void) d40_queue_start(d40c); | 2177 | (void) d40_queue_start(d40c); |
| 2157 | 2178 | ||
| 2158 | spin_unlock_irqrestore(&d40c->lock, flags); | 2179 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 2159 | } | 2180 | } |
| 2160 | 2181 | ||
| 2182 | static int | ||
| 2183 | dma40_config_to_halfchannel(struct d40_chan *d40c, | ||
| 2184 | struct stedma40_half_channel_info *info, | ||
| 2185 | enum dma_slave_buswidth width, | ||
| 2186 | u32 maxburst) | ||
| 2187 | { | ||
| 2188 | enum stedma40_periph_data_width addr_width; | ||
| 2189 | int psize; | ||
| 2190 | |||
| 2191 | switch (width) { | ||
| 2192 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 2193 | addr_width = STEDMA40_BYTE_WIDTH; | ||
| 2194 | break; | ||
| 2195 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 2196 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
| 2197 | break; | ||
| 2198 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 2199 | addr_width = STEDMA40_WORD_WIDTH; | ||
| 2200 | break; | ||
| 2201 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
| 2202 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
| 2203 | break; | ||
| 2204 | default: | ||
| 2205 | dev_err(d40c->base->dev, | ||
| 2206 | "illegal peripheral address width " | ||
| 2207 | "requested (%d)\n", | ||
| 2208 | width); | ||
| 2209 | return -EINVAL; | ||
| 2210 | } | ||
| 2211 | |||
| 2212 | if (chan_is_logical(d40c)) { | ||
| 2213 | if (maxburst >= 16) | ||
| 2214 | psize = STEDMA40_PSIZE_LOG_16; | ||
| 2215 | else if (maxburst >= 8) | ||
| 2216 | psize = STEDMA40_PSIZE_LOG_8; | ||
| 2217 | else if (maxburst >= 4) | ||
| 2218 | psize = STEDMA40_PSIZE_LOG_4; | ||
| 2219 | else | ||
| 2220 | psize = STEDMA40_PSIZE_LOG_1; | ||
| 2221 | } else { | ||
| 2222 | if (maxburst >= 16) | ||
| 2223 | psize = STEDMA40_PSIZE_PHY_16; | ||
| 2224 | else if (maxburst >= 8) | ||
| 2225 | psize = STEDMA40_PSIZE_PHY_8; | ||
| 2226 | else if (maxburst >= 4) | ||
| 2227 | psize = STEDMA40_PSIZE_PHY_4; | ||
| 2228 | else | ||
| 2229 | psize = STEDMA40_PSIZE_PHY_1; | ||
| 2230 | } | ||
| 2231 | |||
| 2232 | info->data_width = addr_width; | ||
| 2233 | info->psize = psize; | ||
| 2234 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
| 2235 | |||
| 2236 | return 0; | ||
| 2237 | } | ||
| 2238 | |||
| 2161 | /* Runtime reconfiguration extension */ | 2239 | /* Runtime reconfiguration extension */ |
| 2162 | static void d40_set_runtime_config(struct dma_chan *chan, | 2240 | static int d40_set_runtime_config(struct dma_chan *chan, |
| 2163 | struct dma_slave_config *config) | 2241 | struct dma_slave_config *config) |
| 2164 | { | 2242 | { |
| 2165 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2243 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
| 2166 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | 2244 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; |
| 2167 | enum dma_slave_buswidth config_addr_width; | 2245 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
| 2168 | dma_addr_t config_addr; | 2246 | dma_addr_t config_addr; |
| 2169 | u32 config_maxburst; | 2247 | u32 src_maxburst, dst_maxburst; |
| 2170 | enum stedma40_periph_data_width addr_width; | 2248 | int ret; |
| 2171 | int psize; | 2249 | |
| 2250 | src_addr_width = config->src_addr_width; | ||
| 2251 | src_maxburst = config->src_maxburst; | ||
| 2252 | dst_addr_width = config->dst_addr_width; | ||
| 2253 | dst_maxburst = config->dst_maxburst; | ||
| 2172 | 2254 | ||
| 2173 | if (config->direction == DMA_FROM_DEVICE) { | 2255 | if (config->direction == DMA_FROM_DEVICE) { |
| 2174 | dma_addr_t dev_addr_rx = | 2256 | dma_addr_t dev_addr_rx = |
| @@ -2187,8 +2269,11 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
| 2187 | cfg->dir); | 2269 | cfg->dir); |
| 2188 | cfg->dir = STEDMA40_PERIPH_TO_MEM; | 2270 | cfg->dir = STEDMA40_PERIPH_TO_MEM; |
| 2189 | 2271 | ||
| 2190 | config_addr_width = config->src_addr_width; | 2272 | /* Configure the memory side */ |
| 2191 | config_maxburst = config->src_maxburst; | 2273 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
| 2274 | dst_addr_width = src_addr_width; | ||
| 2275 | if (dst_maxburst == 0) | ||
| 2276 | dst_maxburst = src_maxburst; | ||
| 2192 | 2277 | ||
| 2193 | } else if (config->direction == DMA_TO_DEVICE) { | 2278 | } else if (config->direction == DMA_TO_DEVICE) { |
| 2194 | dma_addr_t dev_addr_tx = | 2279 | dma_addr_t dev_addr_tx = |
| @@ -2207,68 +2292,39 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
| 2207 | cfg->dir); | 2292 | cfg->dir); |
| 2208 | cfg->dir = STEDMA40_MEM_TO_PERIPH; | 2293 | cfg->dir = STEDMA40_MEM_TO_PERIPH; |
| 2209 | 2294 | ||
| 2210 | config_addr_width = config->dst_addr_width; | 2295 | /* Configure the memory side */ |
| 2211 | config_maxburst = config->dst_maxburst; | 2296 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
| 2212 | 2297 | src_addr_width = dst_addr_width; | |
| 2298 | if (src_maxburst == 0) | ||
| 2299 | src_maxburst = dst_maxburst; | ||
| 2213 | } else { | 2300 | } else { |
| 2214 | dev_err(d40c->base->dev, | 2301 | dev_err(d40c->base->dev, |
| 2215 | "unrecognized channel direction %d\n", | 2302 | "unrecognized channel direction %d\n", |
| 2216 | config->direction); | 2303 | config->direction); |
| 2217 | return; | 2304 | return -EINVAL; |
| 2218 | } | 2305 | } |
| 2219 | 2306 | ||
| 2220 | switch (config_addr_width) { | 2307 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
| 2221 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 2222 | addr_width = STEDMA40_BYTE_WIDTH; | ||
| 2223 | break; | ||
| 2224 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 2225 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
| 2226 | break; | ||
| 2227 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 2228 | addr_width = STEDMA40_WORD_WIDTH; | ||
| 2229 | break; | ||
| 2230 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
| 2231 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
| 2232 | break; | ||
| 2233 | default: | ||
| 2234 | dev_err(d40c->base->dev, | 2308 | dev_err(d40c->base->dev, |
| 2235 | "illegal peripheral address width " | 2309 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
| 2236 | "requested (%d)\n", | 2310 | src_maxburst, |
| 2237 | config->src_addr_width); | 2311 | src_addr_width, |
| 2238 | return; | 2312 | dst_maxburst, |
| 2313 | dst_addr_width); | ||
| 2314 | return -EINVAL; | ||
| 2239 | } | 2315 | } |
| 2240 | 2316 | ||
| 2241 | if (chan_is_logical(d40c)) { | 2317 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
| 2242 | if (config_maxburst >= 16) | 2318 | src_addr_width, |
| 2243 | psize = STEDMA40_PSIZE_LOG_16; | 2319 | src_maxburst); |
| 2244 | else if (config_maxburst >= 8) | 2320 | if (ret) |
| 2245 | psize = STEDMA40_PSIZE_LOG_8; | 2321 | return ret; |
| 2246 | else if (config_maxburst >= 4) | ||
| 2247 | psize = STEDMA40_PSIZE_LOG_4; | ||
| 2248 | else | ||
| 2249 | psize = STEDMA40_PSIZE_LOG_1; | ||
| 2250 | } else { | ||
| 2251 | if (config_maxburst >= 16) | ||
| 2252 | psize = STEDMA40_PSIZE_PHY_16; | ||
| 2253 | else if (config_maxburst >= 8) | ||
| 2254 | psize = STEDMA40_PSIZE_PHY_8; | ||
| 2255 | else if (config_maxburst >= 4) | ||
| 2256 | psize = STEDMA40_PSIZE_PHY_4; | ||
| 2257 | else if (config_maxburst >= 2) | ||
| 2258 | psize = STEDMA40_PSIZE_PHY_2; | ||
| 2259 | else | ||
| 2260 | psize = STEDMA40_PSIZE_PHY_1; | ||
| 2261 | } | ||
| 2262 | 2322 | ||
| 2263 | /* Set up all the endpoint configs */ | 2323 | ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, |
| 2264 | cfg->src_info.data_width = addr_width; | 2324 | dst_addr_width, |
| 2265 | cfg->src_info.psize = psize; | 2325 | dst_maxburst); |
| 2266 | cfg->src_info.big_endian = false; | 2326 | if (ret) |
| 2267 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2327 | return ret; |
| 2268 | cfg->dst_info.data_width = addr_width; | ||
| 2269 | cfg->dst_info.psize = psize; | ||
| 2270 | cfg->dst_info.big_endian = false; | ||
| 2271 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
| 2272 | 2328 | ||
| 2273 | /* Fill in register values */ | 2329 | /* Fill in register values */ |
| 2274 | if (chan_is_logical(d40c)) | 2330 | if (chan_is_logical(d40c)) |
| @@ -2281,12 +2337,14 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
| 2281 | d40c->runtime_addr = config_addr; | 2337 | d40c->runtime_addr = config_addr; |
| 2282 | d40c->runtime_direction = config->direction; | 2338 | d40c->runtime_direction = config->direction; |
| 2283 | dev_dbg(d40c->base->dev, | 2339 | dev_dbg(d40c->base->dev, |
| 2284 | "configured channel %s for %s, data width %d, " | 2340 | "configured channel %s for %s, data width %d/%d, " |
| 2285 | "maxburst %d bytes, LE, no flow control\n", | 2341 | "maxburst %d/%d elements, LE, no flow control\n", |
| 2286 | dma_chan_name(chan), | 2342 | dma_chan_name(chan), |
| 2287 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 2343 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
| 2288 | config_addr_width, | 2344 | src_addr_width, dst_addr_width, |
| 2289 | config_maxburst); | 2345 | src_maxburst, dst_maxburst); |
| 2346 | |||
| 2347 | return 0; | ||
| 2290 | } | 2348 | } |
| 2291 | 2349 | ||
| 2292 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2350 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
| @@ -2307,9 +2365,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 2307 | case DMA_RESUME: | 2365 | case DMA_RESUME: |
| 2308 | return d40_resume(d40c); | 2366 | return d40_resume(d40c); |
| 2309 | case DMA_SLAVE_CONFIG: | 2367 | case DMA_SLAVE_CONFIG: |
| 2310 | d40_set_runtime_config(chan, | 2368 | return d40_set_runtime_config(chan, |
| 2311 | (struct dma_slave_config *) arg); | 2369 | (struct dma_slave_config *) arg); |
| 2312 | return 0; | ||
| 2313 | default: | 2370 | default: |
| 2314 | break; | 2371 | break; |
| 2315 | } | 2372 | } |
| @@ -2340,6 +2397,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
| 2340 | 2397 | ||
| 2341 | INIT_LIST_HEAD(&d40c->active); | 2398 | INIT_LIST_HEAD(&d40c->active); |
| 2342 | INIT_LIST_HEAD(&d40c->queue); | 2399 | INIT_LIST_HEAD(&d40c->queue); |
| 2400 | INIT_LIST_HEAD(&d40c->pending_queue); | ||
| 2343 | INIT_LIST_HEAD(&d40c->client); | 2401 | INIT_LIST_HEAD(&d40c->client); |
| 2344 | 2402 | ||
| 2345 | tasklet_init(&d40c->tasklet, dma_tasklet, | 2403 | tasklet_init(&d40c->tasklet, dma_tasklet, |
| @@ -2501,25 +2559,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
| 2501 | 2559 | ||
| 2502 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 2560 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
| 2503 | { | 2561 | { |
| 2504 | static const struct d40_reg_val dma_id_regs[] = { | ||
| 2505 | /* Peripheral Id */ | ||
| 2506 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, | ||
| 2507 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | ||
| 2508 | /* | ||
| 2509 | * D40_DREG_PERIPHID2 Depends on HW revision: | ||
| 2510 | * DB8500ed has 0x0008, | ||
| 2511 | * ? has 0x0018, | ||
| 2512 | * DB8500v1 has 0x0028 | ||
| 2513 | * DB8500v2 has 0x0038 | ||
| 2514 | */ | ||
| 2515 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | ||
| 2516 | |||
| 2517 | /* PCell Id */ | ||
| 2518 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, | ||
| 2519 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, | ||
| 2520 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, | ||
| 2521 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} | ||
| 2522 | }; | ||
| 2523 | struct stedma40_platform_data *plat_data; | 2562 | struct stedma40_platform_data *plat_data; |
| 2524 | struct clk *clk = NULL; | 2563 | struct clk *clk = NULL; |
| 2525 | void __iomem *virtbase = NULL; | 2564 | void __iomem *virtbase = NULL; |
| @@ -2528,8 +2567,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2528 | int num_log_chans = 0; | 2567 | int num_log_chans = 0; |
| 2529 | int num_phy_chans; | 2568 | int num_phy_chans; |
| 2530 | int i; | 2569 | int i; |
| 2531 | u32 val; | 2570 | u32 pid; |
| 2532 | u32 rev; | 2571 | u32 cid; |
| 2572 | u8 rev; | ||
| 2533 | 2573 | ||
| 2534 | clk = clk_get(&pdev->dev, NULL); | 2574 | clk = clk_get(&pdev->dev, NULL); |
| 2535 | 2575 | ||
| @@ -2553,32 +2593,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2553 | if (!virtbase) | 2593 | if (!virtbase) |
| 2554 | goto failure; | 2594 | goto failure; |
| 2555 | 2595 | ||
| 2556 | /* HW version check */ | 2596 | /* This is just a regular AMBA PrimeCell ID actually */ |
| 2557 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2597 | for (pid = 0, i = 0; i < 4; i++) |
| 2558 | if (dma_id_regs[i].val != | 2598 | pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) |
| 2559 | readl(virtbase + dma_id_regs[i].reg)) { | 2599 | & 255) << (i * 8); |
| 2560 | d40_err(&pdev->dev, | 2600 | for (cid = 0, i = 0; i < 4; i++) |
| 2561 | "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2601 | cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) |
| 2562 | dma_id_regs[i].val, | 2602 | & 255) << (i * 8); |
| 2563 | dma_id_regs[i].reg, | ||
| 2564 | readl(virtbase + dma_id_regs[i].reg)); | ||
| 2565 | goto failure; | ||
| 2566 | } | ||
| 2567 | } | ||
| 2568 | 2603 | ||
| 2569 | /* Get silicon revision and designer */ | 2604 | if (cid != AMBA_CID) { |
| 2570 | val = readl(virtbase + D40_DREG_PERIPHID2); | 2605 | d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); |
| 2571 | 2606 | goto failure; | |
| 2572 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != | 2607 | } |
| 2573 | D40_HW_DESIGNER) { | 2608 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { |
| 2574 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", | 2609 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
| 2575 | val & D40_DREG_PERIPHID2_DESIGNER_MASK, | 2610 | AMBA_MANF_BITS(pid), |
| 2576 | D40_HW_DESIGNER); | 2611 | AMBA_VENDOR_ST); |
| 2577 | goto failure; | 2612 | goto failure; |
| 2578 | } | 2613 | } |
| 2579 | 2614 | /* | |
| 2580 | rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> | 2615 | * HW revision: |
| 2581 | D40_DREG_PERIPHID2_REV_POS; | 2616 | * DB8500ed has revision 0 |
| 2617 | * ? has revision 1 | ||
| 2618 | * DB8500v1 has revision 2 | ||
| 2619 | * DB8500v2 has revision 3 | ||
| 2620 | */ | ||
| 2621 | rev = AMBA_REV_BITS(pid); | ||
| 2582 | 2622 | ||
| 2583 | /* The number of physical channels on this HW */ | 2623 | /* The number of physical channels on this HW */ |
| 2584 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2624 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 195ee65ee7f3..b44c455158de 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
| @@ -184,9 +184,6 @@ | |||
| 184 | #define D40_DREG_PERIPHID0 0xFE0 | 184 | #define D40_DREG_PERIPHID0 0xFE0 |
| 185 | #define D40_DREG_PERIPHID1 0xFE4 | 185 | #define D40_DREG_PERIPHID1 0xFE4 |
| 186 | #define D40_DREG_PERIPHID2 0xFE8 | 186 | #define D40_DREG_PERIPHID2 0xFE8 |
| 187 | #define D40_DREG_PERIPHID2_REV_POS 4 | ||
| 188 | #define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) | ||
| 189 | #define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf | ||
| 190 | #define D40_DREG_PERIPHID3 0xFEC | 187 | #define D40_DREG_PERIPHID3 0xFEC |
| 191 | #define D40_DREG_CELLID0 0xFF0 | 188 | #define D40_DREG_CELLID0 0xFF0 |
| 192 | #define D40_DREG_CELLID1 0xFF4 | 189 | #define D40_DREG_CELLID1 0xFF4 |
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c index d3570071e98f..1cf645479bfe 100644 --- a/drivers/spi/ep93xx_spi.c +++ b/drivers/spi/ep93xx_spi.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Driver for Cirrus Logic EP93xx SPI controller. | 2 | * Driver for Cirrus Logic EP93xx SPI controller. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2010 Mika Westerberg | 4 | * Copyright (C) 2010-2011 Mika Westerberg |
| 5 | * | 5 | * |
| 6 | * Explicit FIFO handling code was inspired by amba-pl022 driver. | 6 | * Explicit FIFO handling code was inspired by amba-pl022 driver. |
| 7 | * | 7 | * |
| @@ -21,13 +21,16 @@ | |||
| 21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
| 22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
| 23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
| 24 | #include <linux/dmaengine.h> | ||
| 24 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
| 25 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 27 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
| 28 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/scatterlist.h> | ||
| 29 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
| 30 | 32 | ||
| 33 | #include <mach/dma.h> | ||
| 31 | #include <mach/ep93xx_spi.h> | 34 | #include <mach/ep93xx_spi.h> |
| 32 | 35 | ||
| 33 | #define SSPCR0 0x0000 | 36 | #define SSPCR0 0x0000 |
| @@ -71,6 +74,7 @@ | |||
| 71 | * @pdev: pointer to platform device | 74 | * @pdev: pointer to platform device |
| 72 | * @clk: clock for the controller | 75 | * @clk: clock for the controller |
| 73 | * @regs_base: pointer to ioremap()'d registers | 76 | * @regs_base: pointer to ioremap()'d registers |
| 77 | * @sspdr_phys: physical address of the SSPDR register | ||
| 74 | * @irq: IRQ number used by the driver | 78 | * @irq: IRQ number used by the driver |
| 75 | * @min_rate: minimum clock rate (in Hz) supported by the controller | 79 | * @min_rate: minimum clock rate (in Hz) supported by the controller |
| 76 | * @max_rate: maximum clock rate (in Hz) supported by the controller | 80 | * @max_rate: maximum clock rate (in Hz) supported by the controller |
| @@ -84,6 +88,14 @@ | |||
| 84 | * @rx: current byte in transfer to receive | 88 | * @rx: current byte in transfer to receive |
| 85 | * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one | 89 | * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one |
| 86 | * frame decreases this level and sending one frame increases it. | 90 | * frame decreases this level and sending one frame increases it. |
| 91 | * @dma_rx: RX DMA channel | ||
| 92 | * @dma_tx: TX DMA channel | ||
| 93 | * @dma_rx_data: RX parameters passed to the DMA engine | ||
| 94 | * @dma_tx_data: TX parameters passed to the DMA engine | ||
| 95 | * @rx_sgt: sg table for RX transfers | ||
| 96 | * @tx_sgt: sg table for TX transfers | ||
| 97 | * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by | ||
| 98 | * the client | ||
| 87 | * | 99 | * |
| 88 | * This structure holds EP93xx SPI controller specific information. When | 100 | * This structure holds EP93xx SPI controller specific information. When |
| 89 | * @running is %true, driver accepts transfer requests from protocol drivers. | 101 | * @running is %true, driver accepts transfer requests from protocol drivers. |
| @@ -100,6 +112,7 @@ struct ep93xx_spi { | |||
| 100 | const struct platform_device *pdev; | 112 | const struct platform_device *pdev; |
| 101 | struct clk *clk; | 113 | struct clk *clk; |
| 102 | void __iomem *regs_base; | 114 | void __iomem *regs_base; |
| 115 | unsigned long sspdr_phys; | ||
| 103 | int irq; | 116 | int irq; |
| 104 | unsigned long min_rate; | 117 | unsigned long min_rate; |
| 105 | unsigned long max_rate; | 118 | unsigned long max_rate; |
| @@ -112,6 +125,13 @@ struct ep93xx_spi { | |||
| 112 | size_t tx; | 125 | size_t tx; |
| 113 | size_t rx; | 126 | size_t rx; |
| 114 | size_t fifo_level; | 127 | size_t fifo_level; |
| 128 | struct dma_chan *dma_rx; | ||
| 129 | struct dma_chan *dma_tx; | ||
| 130 | struct ep93xx_dma_data dma_rx_data; | ||
| 131 | struct ep93xx_dma_data dma_tx_data; | ||
| 132 | struct sg_table rx_sgt; | ||
| 133 | struct sg_table tx_sgt; | ||
| 134 | void *zeropage; | ||
| 115 | }; | 135 | }; |
| 116 | 136 | ||
| 117 | /** | 137 | /** |
| @@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi) | |||
| 496 | espi->fifo_level++; | 516 | espi->fifo_level++; |
| 497 | } | 517 | } |
| 498 | 518 | ||
| 499 | if (espi->rx == t->len) { | 519 | if (espi->rx == t->len) |
| 500 | msg->actual_length += t->len; | ||
| 501 | return 0; | 520 | return 0; |
| 502 | } | ||
| 503 | 521 | ||
| 504 | return -EINPROGRESS; | 522 | return -EINPROGRESS; |
| 505 | } | 523 | } |
| 506 | 524 | ||
| 525 | static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) | ||
| 526 | { | ||
| 527 | /* | ||
| 528 | * Now everything is set up for the current transfer. We prime the TX | ||
| 529 | * FIFO, enable interrupts, and wait for the transfer to complete. | ||
| 530 | */ | ||
| 531 | if (ep93xx_spi_read_write(espi)) { | ||
| 532 | ep93xx_spi_enable_interrupts(espi); | ||
| 533 | wait_for_completion(&espi->wait); | ||
| 534 | } | ||
| 535 | } | ||
| 536 | |||
| 537 | /** | ||
| 538 | * ep93xx_spi_dma_prepare() - prepares a DMA transfer | ||
| 539 | * @espi: ep93xx SPI controller struct | ||
| 540 | * @dir: DMA transfer direction | ||
| 541 | * | ||
| 542 | * Function configures the DMA, maps the buffer and prepares the DMA | ||
| 543 | * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR | ||
| 544 | * in case of failure. | ||
| 545 | */ | ||
| 546 | static struct dma_async_tx_descriptor * | ||
| 547 | ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | ||
| 548 | { | ||
| 549 | struct spi_transfer *t = espi->current_msg->state; | ||
| 550 | struct dma_async_tx_descriptor *txd; | ||
| 551 | enum dma_slave_buswidth buswidth; | ||
| 552 | struct dma_slave_config conf; | ||
| 553 | struct scatterlist *sg; | ||
| 554 | struct sg_table *sgt; | ||
| 555 | struct dma_chan *chan; | ||
| 556 | const void *buf, *pbuf; | ||
| 557 | size_t len = t->len; | ||
| 558 | int i, ret, nents; | ||
| 559 | |||
| 560 | if (bits_per_word(espi) > 8) | ||
| 561 | buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
| 562 | else | ||
| 563 | buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
| 564 | |||
| 565 | memset(&conf, 0, sizeof(conf)); | ||
| 566 | conf.direction = dir; | ||
| 567 | |||
| 568 | if (dir == DMA_FROM_DEVICE) { | ||
| 569 | chan = espi->dma_rx; | ||
| 570 | buf = t->rx_buf; | ||
| 571 | sgt = &espi->rx_sgt; | ||
| 572 | |||
| 573 | conf.src_addr = espi->sspdr_phys; | ||
| 574 | conf.src_addr_width = buswidth; | ||
| 575 | } else { | ||
| 576 | chan = espi->dma_tx; | ||
| 577 | buf = t->tx_buf; | ||
| 578 | sgt = &espi->tx_sgt; | ||
| 579 | |||
| 580 | conf.dst_addr = espi->sspdr_phys; | ||
| 581 | conf.dst_addr_width = buswidth; | ||
| 582 | } | ||
| 583 | |||
| 584 | ret = dmaengine_slave_config(chan, &conf); | ||
| 585 | if (ret) | ||
| 586 | return ERR_PTR(ret); | ||
| 587 | |||
| 588 | /* | ||
| 589 | * We need to split the transfer into PAGE_SIZE'd chunks. This is | ||
| 590 | * because we are using @espi->zeropage to provide a zero RX buffer | ||
| 591 | * for the TX transfers and we have only allocated one page for that. | ||
| 592 | * | ||
| 593 | * For performance reasons we allocate a new sg_table only when | ||
| 594 | * needed. Otherwise we will re-use the current one. Eventually the | ||
| 595 | * last sg_table is released in ep93xx_spi_release_dma(). | ||
| 596 | */ | ||
| 597 | |||
| 598 | nents = DIV_ROUND_UP(len, PAGE_SIZE); | ||
| 599 | if (nents != sgt->nents) { | ||
| 600 | sg_free_table(sgt); | ||
| 601 | |||
| 602 | ret = sg_alloc_table(sgt, nents, GFP_KERNEL); | ||
| 603 | if (ret) | ||
| 604 | return ERR_PTR(ret); | ||
| 605 | } | ||
| 606 | |||
| 607 | pbuf = buf; | ||
| 608 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 609 | size_t bytes = min_t(size_t, len, PAGE_SIZE); | ||
| 610 | |||
| 611 | if (buf) { | ||
| 612 | sg_set_page(sg, virt_to_page(pbuf), bytes, | ||
| 613 | offset_in_page(pbuf)); | ||
| 614 | } else { | ||
| 615 | sg_set_page(sg, virt_to_page(espi->zeropage), | ||
| 616 | bytes, 0); | ||
| 617 | } | ||
| 618 | |||
| 619 | pbuf += bytes; | ||
| 620 | len -= bytes; | ||
| 621 | } | ||
| 622 | |||
| 623 | if (WARN_ON(len)) { | ||
| 624 | dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); | ||
| 625 | return ERR_PTR(-EINVAL); | ||
| 626 | } | ||
| 627 | |||
| 628 | nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | ||
| 629 | if (!nents) | ||
| 630 | return ERR_PTR(-ENOMEM); | ||
| 631 | |||
| 632 | txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, | ||
| 633 | dir, DMA_CTRL_ACK); | ||
| 634 | if (!txd) { | ||
| 635 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | ||
| 636 | return ERR_PTR(-ENOMEM); | ||
| 637 | } | ||
| 638 | return txd; | ||
| 639 | } | ||
| 640 | |||
| 641 | /** | ||
| 642 | * ep93xx_spi_dma_finish() - finishes with a DMA transfer | ||
| 643 | * @espi: ep93xx SPI controller struct | ||
| 644 | * @dir: DMA transfer direction | ||
| 645 | * | ||
| 646 | * Function finishes with the DMA transfer. After this, the DMA buffer is | ||
| 647 | * unmapped. | ||
| 648 | */ | ||
| 649 | static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, | ||
| 650 | enum dma_data_direction dir) | ||
| 651 | { | ||
| 652 | struct dma_chan *chan; | ||
| 653 | struct sg_table *sgt; | ||
| 654 | |||
| 655 | if (dir == DMA_FROM_DEVICE) { | ||
| 656 | chan = espi->dma_rx; | ||
| 657 | sgt = &espi->rx_sgt; | ||
| 658 | } else { | ||
| 659 | chan = espi->dma_tx; | ||
| 660 | sgt = &espi->tx_sgt; | ||
| 661 | } | ||
| 662 | |||
| 663 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | ||
| 664 | } | ||
| 665 | |||
| 666 | static void ep93xx_spi_dma_callback(void *callback_param) | ||
| 667 | { | ||
| 668 | complete(callback_param); | ||
| 669 | } | ||
| 670 | |||
| 671 | static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) | ||
| 672 | { | ||
| 673 | struct spi_message *msg = espi->current_msg; | ||
| 674 | struct dma_async_tx_descriptor *rxd, *txd; | ||
| 675 | |||
| 676 | rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); | ||
| 677 | if (IS_ERR(rxd)) { | ||
| 678 | dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); | ||
| 679 | msg->status = PTR_ERR(rxd); | ||
| 680 | return; | ||
| 681 | } | ||
| 682 | |||
| 683 | txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); | ||
| 684 | if (IS_ERR(txd)) { | ||
| 685 | ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); | ||
| 686 | dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); | ||
| 687 | msg->status = PTR_ERR(txd); | ||
| 688 | return; | ||
| 689 | } | ||
| 690 | |||
| 691 | /* We are ready when RX is done */ | ||
| 692 | rxd->callback = ep93xx_spi_dma_callback; | ||
| 693 | rxd->callback_param = &espi->wait; | ||
| 694 | |||
| 695 | /* Now submit both descriptors and wait while they finish */ | ||
| 696 | dmaengine_submit(rxd); | ||
| 697 | dmaengine_submit(txd); | ||
| 698 | |||
| 699 | dma_async_issue_pending(espi->dma_rx); | ||
| 700 | dma_async_issue_pending(espi->dma_tx); | ||
| 701 | |||
| 702 | wait_for_completion(&espi->wait); | ||
| 703 | |||
| 704 | ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); | ||
| 705 | ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); | ||
| 706 | } | ||
| 707 | |||
| 507 | /** | 708 | /** |
| 508 | * ep93xx_spi_process_transfer() - processes one SPI transfer | 709 | * ep93xx_spi_process_transfer() - processes one SPI transfer |
| 509 | * @espi: ep93xx SPI controller struct | 710 | * @espi: ep93xx SPI controller struct |
| @@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | |||
| 556 | espi->tx = 0; | 757 | espi->tx = 0; |
| 557 | 758 | ||
| 558 | /* | 759 | /* |
| 559 | * Now everything is set up for the current transfer. We prime the TX | 760 | * There is no point of setting up DMA for the transfers which will |
| 560 | * FIFO, enable interrupts, and wait for the transfer to complete. | 761 | * fit into the FIFO and can be transferred with a single interrupt. |
| 762 | * So in these cases we will be using PIO and don't bother for DMA. | ||
| 561 | */ | 763 | */ |
| 562 | if (ep93xx_spi_read_write(espi)) { | 764 | if (espi->dma_rx && t->len > SPI_FIFO_SIZE) |
| 563 | ep93xx_spi_enable_interrupts(espi); | 765 | ep93xx_spi_dma_transfer(espi); |
| 564 | wait_for_completion(&espi->wait); | 766 | else |
| 565 | } | 767 | ep93xx_spi_pio_transfer(espi); |
| 566 | 768 | ||
| 567 | /* | 769 | /* |
| 568 | * In case of error during transmit, we bail out from processing | 770 | * In case of error during transmit, we bail out from processing |
| @@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | |||
| 571 | if (msg->status) | 773 | if (msg->status) |
| 572 | return; | 774 | return; |
| 573 | 775 | ||
| 776 | msg->actual_length += t->len; | ||
| 777 | |||
| 574 | /* | 778 | /* |
| 575 | * After this transfer is finished, perform any possible | 779 | * After this transfer is finished, perform any possible |
| 576 | * post-transfer actions requested by the protocol driver. | 780 | * post-transfer actions requested by the protocol driver. |
| @@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) | |||
| 752 | return IRQ_HANDLED; | 956 | return IRQ_HANDLED; |
| 753 | } | 957 | } |
| 754 | 958 | ||
| 959 | static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) | ||
| 960 | { | ||
| 961 | if (ep93xx_dma_chan_is_m2p(chan)) | ||
| 962 | return false; | ||
| 963 | |||
| 964 | chan->private = filter_param; | ||
| 965 | return true; | ||
| 966 | } | ||
| 967 | |||
| 968 | static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) | ||
| 969 | { | ||
| 970 | dma_cap_mask_t mask; | ||
| 971 | int ret; | ||
| 972 | |||
| 973 | espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); | ||
| 974 | if (!espi->zeropage) | ||
| 975 | return -ENOMEM; | ||
| 976 | |||
| 977 | dma_cap_zero(mask); | ||
| 978 | dma_cap_set(DMA_SLAVE, mask); | ||
| 979 | |||
| 980 | espi->dma_rx_data.port = EP93XX_DMA_SSP; | ||
| 981 | espi->dma_rx_data.direction = DMA_FROM_DEVICE; | ||
| 982 | espi->dma_rx_data.name = "ep93xx-spi-rx"; | ||
| 983 | |||
| 984 | espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, | ||
| 985 | &espi->dma_rx_data); | ||
| 986 | if (!espi->dma_rx) { | ||
| 987 | ret = -ENODEV; | ||
| 988 | goto fail_free_page; | ||
| 989 | } | ||
| 990 | |||
| 991 | espi->dma_tx_data.port = EP93XX_DMA_SSP; | ||
| 992 | espi->dma_tx_data.direction = DMA_TO_DEVICE; | ||
| 993 | espi->dma_tx_data.name = "ep93xx-spi-tx"; | ||
| 994 | |||
| 995 | espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, | ||
| 996 | &espi->dma_tx_data); | ||
| 997 | if (!espi->dma_tx) { | ||
| 998 | ret = -ENODEV; | ||
| 999 | goto fail_release_rx; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | return 0; | ||
| 1003 | |||
| 1004 | fail_release_rx: | ||
| 1005 | dma_release_channel(espi->dma_rx); | ||
| 1006 | espi->dma_rx = NULL; | ||
| 1007 | fail_free_page: | ||
| 1008 | free_page((unsigned long)espi->zeropage); | ||
| 1009 | |||
| 1010 | return ret; | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) | ||
| 1014 | { | ||
| 1015 | if (espi->dma_rx) { | ||
| 1016 | dma_release_channel(espi->dma_rx); | ||
| 1017 | sg_free_table(&espi->rx_sgt); | ||
| 1018 | } | ||
| 1019 | if (espi->dma_tx) { | ||
| 1020 | dma_release_channel(espi->dma_tx); | ||
| 1021 | sg_free_table(&espi->tx_sgt); | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | if (espi->zeropage) | ||
| 1025 | free_page((unsigned long)espi->zeropage); | ||
| 1026 | } | ||
| 1027 | |||
| 755 | static int __init ep93xx_spi_probe(struct platform_device *pdev) | 1028 | static int __init ep93xx_spi_probe(struct platform_device *pdev) |
| 756 | { | 1029 | { |
| 757 | struct spi_master *master; | 1030 | struct spi_master *master; |
| @@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) | |||
| 818 | goto fail_put_clock; | 1091 | goto fail_put_clock; |
| 819 | } | 1092 | } |
| 820 | 1093 | ||
| 1094 | espi->sspdr_phys = res->start + SSPDR; | ||
| 821 | espi->regs_base = ioremap(res->start, resource_size(res)); | 1095 | espi->regs_base = ioremap(res->start, resource_size(res)); |
| 822 | if (!espi->regs_base) { | 1096 | if (!espi->regs_base) { |
| 823 | dev_err(&pdev->dev, "failed to map resources\n"); | 1097 | dev_err(&pdev->dev, "failed to map resources\n"); |
| @@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) | |||
| 832 | goto fail_unmap_regs; | 1106 | goto fail_unmap_regs; |
| 833 | } | 1107 | } |
| 834 | 1108 | ||
| 1109 | if (info->use_dma && ep93xx_spi_setup_dma(espi)) | ||
| 1110 | dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); | ||
| 1111 | |||
| 835 | espi->wq = create_singlethread_workqueue("ep93xx_spid"); | 1112 | espi->wq = create_singlethread_workqueue("ep93xx_spid"); |
| 836 | if (!espi->wq) { | 1113 | if (!espi->wq) { |
| 837 | dev_err(&pdev->dev, "unable to create workqueue\n"); | 1114 | dev_err(&pdev->dev, "unable to create workqueue\n"); |
| 838 | goto fail_free_irq; | 1115 | goto fail_free_dma; |
| 839 | } | 1116 | } |
| 840 | INIT_WORK(&espi->msg_work, ep93xx_spi_work); | 1117 | INIT_WORK(&espi->msg_work, ep93xx_spi_work); |
| 841 | INIT_LIST_HEAD(&espi->msg_queue); | 1118 | INIT_LIST_HEAD(&espi->msg_queue); |
| @@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) | |||
| 857 | 1134 | ||
| 858 | fail_free_queue: | 1135 | fail_free_queue: |
| 859 | destroy_workqueue(espi->wq); | 1136 | destroy_workqueue(espi->wq); |
| 860 | fail_free_irq: | 1137 | fail_free_dma: |
| 1138 | ep93xx_spi_release_dma(espi); | ||
| 861 | free_irq(espi->irq, espi); | 1139 | free_irq(espi->irq, espi); |
| 862 | fail_unmap_regs: | 1140 | fail_unmap_regs: |
| 863 | iounmap(espi->regs_base); | 1141 | iounmap(espi->regs_base); |
| @@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev) | |||
| 901 | } | 1179 | } |
| 902 | spin_unlock_irq(&espi->lock); | 1180 | spin_unlock_irq(&espi->lock); |
| 903 | 1181 | ||
| 1182 | ep93xx_spi_release_dma(espi); | ||
| 904 | free_irq(espi->irq, espi); | 1183 | free_irq(espi->irq, espi); |
| 905 | iounmap(espi->regs_base); | 1184 | iounmap(espi->regs_base); |
| 906 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1185 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 3111385b8ca7..e6e28f37d8ec 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
| @@ -172,8 +172,11 @@ struct pl08x_dma_chan { | |||
| 172 | int phychan_hold; | 172 | int phychan_hold; |
| 173 | struct tasklet_struct tasklet; | 173 | struct tasklet_struct tasklet; |
| 174 | char *name; | 174 | char *name; |
| 175 | struct pl08x_channel_data *cd; | 175 | const struct pl08x_channel_data *cd; |
| 176 | dma_addr_t runtime_addr; | 176 | dma_addr_t src_addr; |
| 177 | dma_addr_t dst_addr; | ||
| 178 | u32 src_cctl; | ||
| 179 | u32 dst_cctl; | ||
| 177 | enum dma_data_direction runtime_direction; | 180 | enum dma_data_direction runtime_direction; |
| 178 | dma_cookie_t lc; | 181 | dma_cookie_t lc; |
| 179 | struct list_head pend_list; | 182 | struct list_head pend_list; |
| @@ -202,7 +205,7 @@ struct pl08x_dma_chan { | |||
| 202 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 | 205 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 |
| 203 | */ | 206 | */ |
| 204 | struct pl08x_platform_data { | 207 | struct pl08x_platform_data { |
| 205 | struct pl08x_channel_data *slave_channels; | 208 | const struct pl08x_channel_data *slave_channels; |
| 206 | unsigned int num_slave_channels; | 209 | unsigned int num_slave_channels; |
| 207 | struct pl08x_channel_data memcpy_channel; | 210 | struct pl08x_channel_data memcpy_channel; |
| 208 | int (*get_signal)(struct pl08x_dma_chan *); | 211 | int (*get_signal)(struct pl08x_dma_chan *); |
diff --git a/sound/soc/ep93xx/ep93xx-ac97.c b/sound/soc/ep93xx/ep93xx-ac97.c index 104e95cda0ad..c7417c76552b 100644 --- a/sound/soc/ep93xx/ep93xx-ac97.c +++ b/sound/soc/ep93xx/ep93xx-ac97.c | |||
| @@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info; | |||
| 106 | 106 | ||
| 107 | static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = { | 107 | static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = { |
| 108 | .name = "ac97-pcm-out", | 108 | .name = "ac97-pcm-out", |
| 109 | .dma_port = EP93XX_DMA_M2P_PORT_AAC1, | 109 | .dma_port = EP93XX_DMA_AAC1, |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = { | 112 | static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = { |
| 113 | .name = "ac97-pcm-in", | 113 | .name = "ac97-pcm-in", |
| 114 | .dma_port = EP93XX_DMA_M2P_PORT_AAC1, | 114 | .dma_port = EP93XX_DMA_AAC1, |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info, | 117 | static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info, |
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c index 042f4e93746f..30df42568dbb 100644 --- a/sound/soc/ep93xx/ep93xx-i2s.c +++ b/sound/soc/ep93xx/ep93xx-i2s.c | |||
| @@ -70,11 +70,11 @@ struct ep93xx_i2s_info { | |||
| 70 | struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = { | 70 | struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = { |
| 71 | [SNDRV_PCM_STREAM_PLAYBACK] = { | 71 | [SNDRV_PCM_STREAM_PLAYBACK] = { |
| 72 | .name = "i2s-pcm-out", | 72 | .name = "i2s-pcm-out", |
| 73 | .dma_port = EP93XX_DMA_M2P_PORT_I2S1, | 73 | .dma_port = EP93XX_DMA_I2S1, |
| 74 | }, | 74 | }, |
| 75 | [SNDRV_PCM_STREAM_CAPTURE] = { | 75 | [SNDRV_PCM_STREAM_CAPTURE] = { |
| 76 | .name = "i2s-pcm-in", | 76 | .name = "i2s-pcm-in", |
| 77 | .dma_port = EP93XX_DMA_M2P_PORT_I2S1, | 77 | .dma_port = EP93XX_DMA_I2S1, |
| 78 | }, | 78 | }, |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c index a456e491155f..a07f99c9c375 100644 --- a/sound/soc/ep93xx/ep93xx-pcm.c +++ b/sound/soc/ep93xx/ep93xx-pcm.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
| 18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| 19 | #include <linux/dmaengine.h> | ||
| 19 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
| 20 | 21 | ||
| 21 | #include <sound/core.h> | 22 | #include <sound/core.h> |
| @@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = { | |||
| 53 | 54 | ||
| 54 | struct ep93xx_runtime_data | 55 | struct ep93xx_runtime_data |
| 55 | { | 56 | { |
| 56 | struct ep93xx_dma_m2p_client cl; | ||
| 57 | struct ep93xx_pcm_dma_params *params; | ||
| 58 | int pointer_bytes; | 57 | int pointer_bytes; |
| 59 | struct tasklet_struct period_tasklet; | ||
| 60 | int periods; | 58 | int periods; |
| 61 | struct ep93xx_dma_buffer buf[32]; | 59 | int period_bytes; |
| 60 | struct dma_chan *dma_chan; | ||
| 61 | struct ep93xx_dma_data dma_data; | ||
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | static void ep93xx_pcm_period_elapsed(unsigned long data) | 64 | static void ep93xx_pcm_dma_callback(void *data) |
| 65 | { | 65 | { |
| 66 | struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; | 66 | struct snd_pcm_substream *substream = data; |
| 67 | snd_pcm_period_elapsed(substream); | 67 | struct ep93xx_runtime_data *rtd = substream->runtime->private_data; |
| 68 | } | ||
| 69 | 68 | ||
| 70 | static void ep93xx_pcm_buffer_started(void *cookie, | 69 | rtd->pointer_bytes += rtd->period_bytes; |
| 71 | struct ep93xx_dma_buffer *buf) | 70 | rtd->pointer_bytes %= rtd->period_bytes * rtd->periods; |
| 72 | { | 71 | |
| 72 | snd_pcm_period_elapsed(substream); | ||
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | static void ep93xx_pcm_buffer_finished(void *cookie, | 75 | static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param) |
| 76 | struct ep93xx_dma_buffer *buf, | ||
| 77 | int bytes, int error) | ||
| 78 | { | 76 | { |
| 79 | struct snd_pcm_substream *substream = cookie; | 77 | struct ep93xx_dma_data *data = filter_param; |
| 80 | struct ep93xx_runtime_data *rtd = substream->runtime->private_data; | ||
| 81 | |||
| 82 | if (buf == rtd->buf + rtd->periods - 1) | ||
| 83 | rtd->pointer_bytes = 0; | ||
| 84 | else | ||
| 85 | rtd->pointer_bytes += buf->size; | ||
| 86 | 78 | ||
| 87 | if (!error) { | 79 | if (data->direction == ep93xx_dma_chan_direction(chan)) { |
| 88 | ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf); | 80 | chan->private = data; |
| 89 | tasklet_schedule(&rtd->period_tasklet); | 81 | return true; |
| 90 | } else { | ||
| 91 | snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); | ||
| 92 | } | 82 | } |
| 83 | |||
| 84 | return false; | ||
| 93 | } | 85 | } |
| 94 | 86 | ||
| 95 | static int ep93xx_pcm_open(struct snd_pcm_substream *substream) | 87 | static int ep93xx_pcm_open(struct snd_pcm_substream *substream) |
| @@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream) | |||
| 98 | struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai; | 90 | struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai; |
| 99 | struct ep93xx_pcm_dma_params *dma_params; | 91 | struct ep93xx_pcm_dma_params *dma_params; |
| 100 | struct ep93xx_runtime_data *rtd; | 92 | struct ep93xx_runtime_data *rtd; |
| 93 | dma_cap_mask_t mask; | ||
| 101 | int ret; | 94 | int ret; |
| 102 | 95 | ||
| 103 | dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); | 96 | ret = snd_pcm_hw_constraint_integer(substream->runtime, |
| 97 | SNDRV_PCM_HW_PARAM_PERIODS); | ||
| 98 | if (ret < 0) | ||
| 99 | return ret; | ||
| 100 | |||
| 104 | snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); | 101 | snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); |
| 105 | 102 | ||
| 106 | rtd = kmalloc(sizeof(*rtd), GFP_KERNEL); | 103 | rtd = kmalloc(sizeof(*rtd), GFP_KERNEL); |
| 107 | if (!rtd) | 104 | if (!rtd) |
| 108 | return -ENOMEM; | 105 | return -ENOMEM; |
| 109 | 106 | ||
| 110 | memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet)); | 107 | dma_cap_zero(mask); |
| 111 | rtd->period_tasklet.func = ep93xx_pcm_period_elapsed; | 108 | dma_cap_set(DMA_SLAVE, mask); |
| 112 | rtd->period_tasklet.data = (unsigned long)substream; | 109 | dma_cap_set(DMA_CYCLIC, mask); |
| 113 | 110 | ||
| 114 | rtd->cl.name = dma_params->name; | 111 | dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); |
| 115 | rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR | | 112 | rtd->dma_data.port = dma_params->dma_port; |
| 116 | ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? | 113 | rtd->dma_data.name = dma_params->name; |
| 117 | EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX); | 114 | |
| 118 | rtd->cl.cookie = substream; | 115 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 119 | rtd->cl.buffer_started = ep93xx_pcm_buffer_started; | 116 | rtd->dma_data.direction = DMA_TO_DEVICE; |
| 120 | rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished; | 117 | else |
| 121 | ret = ep93xx_dma_m2p_client_register(&rtd->cl); | 118 | rtd->dma_data.direction = DMA_FROM_DEVICE; |
| 122 | if (ret < 0) { | 119 | |
| 120 | rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter, | ||
| 121 | &rtd->dma_data); | ||
| 122 | if (!rtd->dma_chan) { | ||
| 123 | kfree(rtd); | 123 | kfree(rtd); |
| 124 | return ret; | 124 | return -EINVAL; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | substream->runtime->private_data = rtd; | 127 | substream->runtime->private_data = rtd; |
| @@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream) | |||
| 132 | { | 132 | { |
| 133 | struct ep93xx_runtime_data *rtd = substream->runtime->private_data; | 133 | struct ep93xx_runtime_data *rtd = substream->runtime->private_data; |
| 134 | 134 | ||
| 135 | ep93xx_dma_m2p_client_unregister(&rtd->cl); | 135 | dma_release_channel(rtd->dma_chan); |
| 136 | kfree(rtd); | 136 | kfree(rtd); |
| 137 | return 0; | 137 | return 0; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream) | ||
| 141 | { | ||
| 142 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
| 143 | struct ep93xx_runtime_data *rtd = runtime->private_data; | ||
| 144 | struct dma_chan *chan = rtd->dma_chan; | ||
| 145 | struct dma_device *dma_dev = chan->device; | ||
| 146 | struct dma_async_tx_descriptor *desc; | ||
| 147 | |||
| 148 | rtd->pointer_bytes = 0; | ||
| 149 | desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr, | ||
| 150 | rtd->period_bytes * rtd->periods, | ||
| 151 | rtd->period_bytes, | ||
| 152 | rtd->dma_data.direction); | ||
| 153 | if (!desc) | ||
| 154 | return -EINVAL; | ||
| 155 | |||
| 156 | desc->callback = ep93xx_pcm_dma_callback; | ||
| 157 | desc->callback_param = substream; | ||
| 158 | |||
| 159 | dmaengine_submit(desc); | ||
| 160 | return 0; | ||
| 161 | } | ||
| 162 | |||
| 163 | static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream) | ||
| 164 | { | ||
| 165 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
| 166 | struct ep93xx_runtime_data *rtd = runtime->private_data; | ||
| 167 | |||
| 168 | dmaengine_terminate_all(rtd->dma_chan); | ||
| 169 | } | ||
| 170 | |||
| 140 | static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream, | 171 | static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream, |
| 141 | struct snd_pcm_hw_params *params) | 172 | struct snd_pcm_hw_params *params) |
| 142 | { | 173 | { |
| 143 | struct snd_pcm_runtime *runtime = substream->runtime; | 174 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 144 | struct ep93xx_runtime_data *rtd = runtime->private_data; | 175 | struct ep93xx_runtime_data *rtd = runtime->private_data; |
| 145 | size_t totsize = params_buffer_bytes(params); | ||
| 146 | size_t period = params_period_bytes(params); | ||
| 147 | int i; | ||
| 148 | 176 | ||
| 149 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); | 177 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
| 150 | runtime->dma_bytes = totsize; | ||
| 151 | |||
| 152 | rtd->periods = (totsize + period - 1) / period; | ||
| 153 | for (i = 0; i < rtd->periods; i++) { | ||
| 154 | rtd->buf[i].bus_addr = runtime->dma_addr + (i * period); | ||
| 155 | rtd->buf[i].size = period; | ||
| 156 | if ((i + 1) * period > totsize) | ||
| 157 | rtd->buf[i].size = totsize - (i * period); | ||
| 158 | } | ||
| 159 | 178 | ||
| 179 | rtd->periods = params_periods(params); | ||
| 180 | rtd->period_bytes = params_period_bytes(params); | ||
| 160 | return 0; | 181 | return 0; |
| 161 | } | 182 | } |
| 162 | 183 | ||
| @@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream) | |||
| 168 | 189 | ||
| 169 | static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | 190 | static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) |
| 170 | { | 191 | { |
| 171 | struct ep93xx_runtime_data *rtd = substream->runtime->private_data; | ||
| 172 | int ret; | 192 | int ret; |
| 173 | int i; | ||
| 174 | 193 | ||
| 175 | ret = 0; | 194 | ret = 0; |
| 176 | switch (cmd) { | 195 | switch (cmd) { |
| 177 | case SNDRV_PCM_TRIGGER_START: | 196 | case SNDRV_PCM_TRIGGER_START: |
| 178 | case SNDRV_PCM_TRIGGER_RESUME: | 197 | case SNDRV_PCM_TRIGGER_RESUME: |
| 179 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 198 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
| 180 | rtd->pointer_bytes = 0; | 199 | ret = ep93xx_pcm_dma_submit(substream); |
| 181 | for (i = 0; i < rtd->periods; i++) | ||
| 182 | ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i); | ||
| 183 | break; | 200 | break; |
| 184 | 201 | ||
| 185 | case SNDRV_PCM_TRIGGER_STOP: | 202 | case SNDRV_PCM_TRIGGER_STOP: |
| 186 | case SNDRV_PCM_TRIGGER_SUSPEND: | 203 | case SNDRV_PCM_TRIGGER_SUSPEND: |
| 187 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 204 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
| 188 | ep93xx_dma_m2p_flush(&rtd->cl); | 205 | ep93xx_pcm_dma_flush(substream); |
| 189 | break; | 206 | break; |
| 190 | 207 | ||
| 191 | default: | 208 | default: |
