aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2012-06-29 07:30:07 -0400
committerStephen Warren <swarren@nvidia.com>2012-07-06 13:49:13 -0400
commit5b39fc0bf17028a6b18e57d33f0dcf71f28a8223 (patch)
tree9cbdabf6c3f6db0782a547ddec0fe78478e90943 /arch/arm/mach-tegra
parentb861c275ea5cfeab32241c3c92a203579d5699ff (diff)
ARM: tegra: apbio: use dmaengine based dma driver
Use the dmaengine based Tegra APB DMA driver for apbio access in place of legacy Tegra APB DMA. The new driver is selected if legacy driver is not selected and new DMA driver is enabled through config file. Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com> [swarren: s/pr_err/pr_debug/ in tegra_apb_dma_init; this condition is expected to fire repeatedly before the DMA driver is available] Signed-off-by: Stephen Warren <swarren@nvidia.com>
Diffstat (limited to 'arch/arm/mach-tegra')
-rw-r--r--arch/arm/mach-tegra/apbio.c139
1 files changed, 135 insertions, 4 deletions
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
index 74ac0db53739..dc0fe389be56 100644
--- a/arch/arm/mach-tegra/apbio.c
+++ b/arch/arm/mach-tegra/apbio.c
@@ -17,8 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <mach/iomap.h> 18#include <mach/iomap.h>
19#include <linux/of.h> 19#include <linux/of.h>
20 20#include <linux/dmaengine.h>
21#ifdef CONFIG_TEGRA_SYSTEM_DMA
22#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
24#include <linux/completion.h> 23#include <linux/completion.h>
@@ -29,9 +28,8 @@
29 28
30#include "apbio.h" 29#include "apbio.h"
31 30
31#if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA)
32static DEFINE_MUTEX(tegra_apb_dma_lock); 32static DEFINE_MUTEX(tegra_apb_dma_lock);
33
34static struct tegra_dma_channel *tegra_apb_dma;
35static u32 *tegra_apb_bb; 33static u32 *tegra_apb_bb;
36static dma_addr_t tegra_apb_bb_phys; 34static dma_addr_t tegra_apb_bb_phys;
37static DECLARE_COMPLETION(tegra_apb_wait); 35static DECLARE_COMPLETION(tegra_apb_wait);
@@ -39,6 +37,9 @@ static DECLARE_COMPLETION(tegra_apb_wait);
39static u32 tegra_apb_readl_direct(unsigned long offset); 37static u32 tegra_apb_readl_direct(unsigned long offset);
40static void tegra_apb_writel_direct(u32 value, unsigned long offset); 38static void tegra_apb_writel_direct(u32 value, unsigned long offset);
41 39
40#if defined(CONFIG_TEGRA_SYSTEM_DMA)
41static struct tegra_dma_channel *tegra_apb_dma;
42
42bool tegra_apb_init(void) 43bool tegra_apb_init(void)
43{ 44{
44 struct tegra_dma_channel *ch; 45 struct tegra_dma_channel *ch;
@@ -149,6 +150,136 @@ static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
149 150
150 mutex_unlock(&tegra_apb_dma_lock); 151 mutex_unlock(&tegra_apb_dma_lock);
151} 152}
153
154#else
155static struct dma_chan *tegra_apb_dma_chan;
156static struct dma_slave_config dma_sconfig;
157
158bool tegra_apb_dma_init(void)
159{
160 dma_cap_mask_t mask;
161
162 mutex_lock(&tegra_apb_dma_lock);
163
164 /* Check to see if we raced to setup */
165 if (tegra_apb_dma_chan)
166 goto skip_init;
167
168 dma_cap_zero(mask);
169 dma_cap_set(DMA_SLAVE, mask);
170 tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
171 if (!tegra_apb_dma_chan) {
172 /*
173 * This is common until the device is probed, so don't
174 * shout about it.
175 */
176 pr_debug("%s: can not allocate dma channel\n", __func__);
177 goto err_dma_alloc;
178 }
179
180 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
181 &tegra_apb_bb_phys, GFP_KERNEL);
182 if (!tegra_apb_bb) {
183 pr_err("%s: can not allocate bounce buffer\n", __func__);
184 goto err_buff_alloc;
185 }
186
187 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
188 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
189 dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
190 dma_sconfig.src_maxburst = 1;
191 dma_sconfig.dst_maxburst = 1;
192
193skip_init:
194 mutex_unlock(&tegra_apb_dma_lock);
195 return true;
196
197err_buff_alloc:
198 dma_release_channel(tegra_apb_dma_chan);
199 tegra_apb_dma_chan = NULL;
200
201err_dma_alloc:
202 mutex_unlock(&tegra_apb_dma_lock);
203 return false;
204}
205
206static void apb_dma_complete(void *args)
207{
208 complete(&tegra_apb_wait);
209}
210
211static int do_dma_transfer(unsigned long apb_add,
212 enum dma_transfer_direction dir)
213{
214 struct dma_async_tx_descriptor *dma_desc;
215 int ret;
216
217 if (dir == DMA_DEV_TO_MEM)
218 dma_sconfig.src_addr = apb_add;
219 else
220 dma_sconfig.dst_addr = apb_add;
221
222 ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
223 if (ret)
224 return ret;
225
226 dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
227 tegra_apb_bb_phys, sizeof(u32), dir,
228 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
229 if (!dma_desc)
230 return -EINVAL;
231
232 dma_desc->callback = apb_dma_complete;
233 dma_desc->callback_param = NULL;
234
235 INIT_COMPLETION(tegra_apb_wait);
236
237 dmaengine_submit(dma_desc);
238 dma_async_issue_pending(tegra_apb_dma_chan);
239 ret = wait_for_completion_timeout(&tegra_apb_wait,
240 msecs_to_jiffies(50));
241
242 if (WARN(ret == 0, "apb read dma timed out")) {
243 dmaengine_terminate_all(tegra_apb_dma_chan);
244 return -EFAULT;
245 }
246 return 0;
247}
248
249static u32 tegra_apb_readl_using_dma(unsigned long offset)
250{
251 int ret;
252
253 if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
254 return tegra_apb_readl_direct(offset);
255
256 mutex_lock(&tegra_apb_dma_lock);
257 ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
258 if (ret < 0) {
259 pr_err("error in reading offset 0x%08lx using dma\n", offset);
260 *(u32 *)tegra_apb_bb = 0;
261 }
262 mutex_unlock(&tegra_apb_dma_lock);
263 return *((u32 *)tegra_apb_bb);
264}
265
266static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
267{
268 int ret;
269
270 if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
271 tegra_apb_writel_direct(value, offset);
272 return;
273 }
274
275 mutex_lock(&tegra_apb_dma_lock);
276 *((u32 *)tegra_apb_bb) = value;
277 ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
278 if (ret < 0)
279 pr_err("error in writing offset 0x%08lx using dma\n", offset);
280 mutex_unlock(&tegra_apb_dma_lock);
281}
282#endif
152#else 283#else
153#define tegra_apb_readl_using_dma tegra_apb_readl_direct 284#define tegra_apb_readl_using_dma tegra_apb_readl_direct
154#define tegra_apb_writel_using_dma tegra_apb_writel_direct 285#define tegra_apb_writel_using_dma tegra_apb_writel_direct