aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/apbio.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/apbio.c')
-rw-r--r--arch/arm/mach-tegra/apbio.c158
1 files changed, 158 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
new file mode 100644
index 00000000000..41eb0aa3c73
--- /dev/null
+++ b/arch/arm/mach-tegra/apbio.c
@@ -0,0 +1,158 @@
1/*
2 * arch/arm/mach-tegra/apbio.c
3 *
4 * Copyright (C) 2010 NVIDIA Corporation.
5 * Copyright (C) 2010 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/dma-mapping.h>
21#include <linux/spinlock.h>
22#include <linux/completion.h>
23#include <linux/sched.h>
24#include <linux/mutex.h>
25
26#include <mach/dma.h>
27#include <mach/iomap.h>
28
29#include "apbio.h"
30
31static DEFINE_MUTEX(tegra_apb_dma_lock);
32
33#ifdef CONFIG_TEGRA_SYSTEM_DMA
34static struct tegra_dma_channel *tegra_apb_dma;
35static u32 *tegra_apb_bb;
36static dma_addr_t tegra_apb_bb_phys;
37static DECLARE_COMPLETION(tegra_apb_wait);
38
39static void apb_dma_complete(struct tegra_dma_req *req)
40{
41 complete(&tegra_apb_wait);
42}
43
44static inline u32 apb_readl(unsigned long offset)
45{
46 struct tegra_dma_req req;
47 int ret;
48
49 if (!tegra_apb_dma)
50 return readl(IO_TO_VIRT(offset));
51
52 mutex_lock(&tegra_apb_dma_lock);
53 req.complete = apb_dma_complete;
54 req.to_memory = 1;
55 req.dest_addr = tegra_apb_bb_phys;
56 req.dest_bus_width = 32;
57 req.dest_wrap = 1;
58 req.source_addr = offset;
59 req.source_bus_width = 32;
60 req.source_wrap = 4;
61 req.req_sel = 0;
62 req.size = 4;
63
64 INIT_COMPLETION(tegra_apb_wait);
65
66 tegra_dma_enqueue_req(tegra_apb_dma, &req);
67
68 ret = wait_for_completion_timeout(&tegra_apb_wait,
69 msecs_to_jiffies(400));
70
71 if (WARN(ret == 0, "apb read dma timed out")) {
72 tegra_dma_dequeue_req(tegra_apb_dma, &req);
73 *(u32 *)tegra_apb_bb = 0;
74 }
75
76 mutex_unlock(&tegra_apb_dma_lock);
77 return *((u32 *)tegra_apb_bb);
78}
79
80static inline void apb_writel(u32 value, unsigned long offset)
81{
82 struct tegra_dma_req req;
83 int ret;
84
85 if (!tegra_apb_dma) {
86 writel(value, IO_TO_VIRT(offset));
87 return;
88 }
89
90 mutex_lock(&tegra_apb_dma_lock);
91 *((u32 *)tegra_apb_bb) = value;
92 req.complete = apb_dma_complete;
93 req.to_memory = 0;
94 req.dest_addr = offset;
95 req.dest_wrap = 4;
96 req.dest_bus_width = 32;
97 req.source_addr = tegra_apb_bb_phys;
98 req.source_bus_width = 32;
99 req.source_wrap = 1;
100 req.req_sel = 0;
101 req.size = 4;
102
103 INIT_COMPLETION(tegra_apb_wait);
104
105 tegra_dma_enqueue_req(tegra_apb_dma, &req);
106
107 ret = wait_for_completion_timeout(&tegra_apb_wait,
108 msecs_to_jiffies(400));
109
110 if (WARN(ret == 0, "apb write dma timed out"))
111 tegra_dma_dequeue_req(tegra_apb_dma, &req);
112
113 mutex_unlock(&tegra_apb_dma_lock);
114}
115#else
116static inline u32 apb_readl(unsigned long offset)
117{
118 return readl(IO_TO_VIRT(offset));
119}
120
121static inline void apb_writel(u32 value, unsigned long offset)
122{
123 writel(value, IO_TO_VIRT(offset));
124}
125#endif
126
127u32 tegra_apb_readl(unsigned long offset)
128{
129 return apb_readl(offset);
130}
131
132void tegra_apb_writel(u32 value, unsigned long offset)
133{
134 apb_writel(value, offset);
135}
136
137static int tegra_init_apb_dma(void)
138{
139#ifdef CONFIG_TEGRA_SYSTEM_DMA
140 tegra_apb_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
141 TEGRA_DMA_SHARED, "apbio");
142 if (!tegra_apb_dma) {
143 pr_err("%s: can not allocate dma channel\n", __func__);
144 return -ENODEV;
145 }
146
147 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
148 &tegra_apb_bb_phys, GFP_KERNEL);
149 if (!tegra_apb_bb) {
150 pr_err("%s: can not allocate bounce buffer\n", __func__);
151 tegra_dma_free_channel(tegra_apb_dma);
152 tegra_apb_dma = NULL;
153 return -ENOMEM;
154 }
155#endif
156 return 0;
157}
158arch_initcall(tegra_init_apb_dma);