diff options
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 128 |
1 files changed, 77 insertions, 51 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index c0cf967e47d3..abea4f6e2dd5 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <mach/iomap.h> | 33 | #include <mach/iomap.h> |
34 | #include <mach/suspend.h> | 34 | #include <mach/suspend.h> |
35 | 35 | ||
36 | #include "apbio.h" | ||
37 | |||
36 | #define APB_DMA_GEN 0x000 | 38 | #define APB_DMA_GEN 0x000 |
37 | #define GEN_ENABLE (1<<31) | 39 | #define GEN_ENABLE (1<<31) |
38 | 40 | ||
@@ -50,8 +52,6 @@ | |||
50 | #define CSR_ONCE (1<<27) | 52 | #define CSR_ONCE (1<<27) |
51 | #define CSR_FLOW (1<<21) | 53 | #define CSR_FLOW (1<<21) |
52 | #define CSR_REQ_SEL_SHIFT 16 | 54 | #define CSR_REQ_SEL_SHIFT 16 |
53 | #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT) | ||
54 | #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT) | ||
55 | #define CSR_WCOUNT_SHIFT 2 | 55 | #define CSR_WCOUNT_SHIFT 2 |
56 | #define CSR_WCOUNT_MASK 0xFFFC | 56 | #define CSR_WCOUNT_MASK 0xFFFC |
57 | 57 | ||
@@ -133,6 +133,7 @@ struct tegra_dma_channel { | |||
133 | 133 | ||
134 | static bool tegra_dma_initialized; | 134 | static bool tegra_dma_initialized; |
135 | static DEFINE_MUTEX(tegra_dma_lock); | 135 | static DEFINE_MUTEX(tegra_dma_lock); |
136 | static DEFINE_SPINLOCK(enable_lock); | ||
136 | 137 | ||
137 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | 138 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); |
138 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | 139 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; |
@@ -180,36 +181,94 @@ static void tegra_dma_stop(struct tegra_dma_channel *ch) | |||
180 | 181 | ||
181 | static int tegra_dma_cancel(struct tegra_dma_channel *ch) | 182 | static int tegra_dma_cancel(struct tegra_dma_channel *ch) |
182 | { | 183 | { |
183 | u32 csr; | ||
184 | unsigned long irq_flags; | 184 | unsigned long irq_flags; |
185 | 185 | ||
186 | spin_lock_irqsave(&ch->lock, irq_flags); | 186 | spin_lock_irqsave(&ch->lock, irq_flags); |
187 | while (!list_empty(&ch->list)) | 187 | while (!list_empty(&ch->list)) |
188 | list_del(ch->list.next); | 188 | list_del(ch->list.next); |
189 | 189 | ||
190 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | ||
191 | csr &= ~CSR_REQ_SEL_MASK; | ||
192 | csr |= CSR_REQ_SEL_INVALID; | ||
193 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
194 | |||
195 | tegra_dma_stop(ch); | 190 | tegra_dma_stop(ch); |
196 | 191 | ||
197 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 192 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
198 | return 0; | 193 | return 0; |
199 | } | 194 | } |
200 | 195 | ||
196 | static unsigned int get_channel_status(struct tegra_dma_channel *ch, | ||
197 | struct tegra_dma_req *req, bool is_stop_dma) | ||
198 | { | ||
199 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | ||
200 | unsigned int status; | ||
201 | |||
202 | if (is_stop_dma) { | ||
203 | /* | ||
204 | * STOP the DMA and get the transfer count. | ||
205 | * Getting the transfer count is tricky. | ||
206 | * - Globally disable DMA on all channels | ||
207 | * - Read the channel's status register to know the number | ||
208 | * of pending bytes to be transfered. | ||
209 | * - Stop the dma channel | ||
210 | * - Globally re-enable DMA to resume other transfers | ||
211 | */ | ||
212 | spin_lock(&enable_lock); | ||
213 | writel(0, addr + APB_DMA_GEN); | ||
214 | udelay(20); | ||
215 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
216 | tegra_dma_stop(ch); | ||
217 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | ||
218 | spin_unlock(&enable_lock); | ||
219 | if (status & STA_ISE_EOC) { | ||
220 | pr_err("Got Dma Int here clearing"); | ||
221 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
222 | } | ||
223 | req->status = TEGRA_DMA_REQ_ERROR_ABORTED; | ||
224 | } else { | ||
225 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
226 | } | ||
227 | return status; | ||
228 | } | ||
229 | |||
230 | /* should be called with the channel lock held */ | ||
231 | static unsigned int dma_active_count(struct tegra_dma_channel *ch, | ||
232 | struct tegra_dma_req *req, unsigned int status) | ||
233 | { | ||
234 | unsigned int to_transfer; | ||
235 | unsigned int req_transfer_count; | ||
236 | unsigned int bytes_transferred; | ||
237 | |||
238 | to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1; | ||
239 | req_transfer_count = ch->req_transfer_count + 1; | ||
240 | bytes_transferred = req_transfer_count; | ||
241 | if (status & STA_BUSY) | ||
242 | bytes_transferred -= to_transfer; | ||
243 | /* | ||
244 | * In continuous transfer mode, DMA only tracks the count of the | ||
245 | * half DMA buffer. So, if the DMA already finished half the DMA | ||
246 | * then add the half buffer to the completed count. | ||
247 | */ | ||
248 | if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) { | ||
249 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | ||
250 | bytes_transferred += req_transfer_count; | ||
251 | if (status & STA_ISE_EOC) | ||
252 | bytes_transferred += req_transfer_count; | ||
253 | } | ||
254 | bytes_transferred *= 4; | ||
255 | return bytes_transferred; | ||
256 | } | ||
257 | |||
201 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | 258 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, |
202 | struct tegra_dma_req *_req) | 259 | struct tegra_dma_req *_req) |
203 | { | 260 | { |
204 | unsigned int csr; | ||
205 | unsigned int status; | 261 | unsigned int status; |
206 | struct tegra_dma_req *req = NULL; | 262 | struct tegra_dma_req *req = NULL; |
207 | int found = 0; | 263 | int found = 0; |
208 | unsigned long irq_flags; | 264 | unsigned long irq_flags; |
209 | int to_transfer; | 265 | int stop = 0; |
210 | int req_transfer_count; | ||
211 | 266 | ||
212 | spin_lock_irqsave(&ch->lock, irq_flags); | 267 | spin_lock_irqsave(&ch->lock, irq_flags); |
268 | |||
269 | if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) | ||
270 | stop = 1; | ||
271 | |||
213 | list_for_each_entry(req, &ch->list, node) { | 272 | list_for_each_entry(req, &ch->list, node) { |
214 | if (req == _req) { | 273 | if (req == _req) { |
215 | list_del(&req->node); | 274 | list_del(&req->node); |
@@ -222,47 +281,12 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | |||
222 | return 0; | 281 | return 0; |
223 | } | 282 | } |
224 | 283 | ||
225 | /* STOP the DMA and get the transfer count. | 284 | if (!stop) |
226 | * Getting the transfer count is tricky. | 285 | goto skip_stop_dma; |
227 | * - Change the source selector to invalid to stop the DMA from | ||
228 | * FIFO to memory. | ||
229 | * - Read the status register to know the number of pending | ||
230 | * bytes to be transferred. | ||
231 | * - Finally stop or program the DMA to the next buffer in the | ||
232 | * list. | ||
233 | */ | ||
234 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | ||
235 | csr &= ~CSR_REQ_SEL_MASK; | ||
236 | csr |= CSR_REQ_SEL_INVALID; | ||
237 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
238 | |||
239 | /* Get the transfer count */ | ||
240 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
241 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | ||
242 | req_transfer_count = ch->req_transfer_count; | ||
243 | req_transfer_count += 1; | ||
244 | to_transfer += 1; | ||
245 | |||
246 | req->bytes_transferred = req_transfer_count; | ||
247 | |||
248 | if (status & STA_BUSY) | ||
249 | req->bytes_transferred -= to_transfer; | ||
250 | |||
251 | /* In continuous transfer mode, DMA only tracks the count of the | ||
252 | * half DMA buffer. So, if the DMA already finished half the DMA | ||
253 | * then add the half buffer to the completed count. | ||
254 | * | ||
255 | * FIXME: There can be a race here. What if the req to | ||
256 | * dequue happens at the same time as the DMA just moved to | ||
257 | * the new buffer and SW didn't yet received the interrupt? | ||
258 | */ | ||
259 | if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) | ||
260 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | ||
261 | req->bytes_transferred += req_transfer_count; | ||
262 | 286 | ||
263 | req->bytes_transferred *= 4; | 287 | status = get_channel_status(ch, req, true); |
288 | req->bytes_transferred = dma_active_count(ch, req, status); | ||
264 | 289 | ||
265 | tegra_dma_stop(ch); | ||
266 | if (!list_empty(&ch->list)) { | 290 | if (!list_empty(&ch->list)) { |
267 | /* if the list is not empty, queue the next request */ | 291 | /* if the list is not empty, queue the next request */ |
268 | struct tegra_dma_req *next_req; | 292 | struct tegra_dma_req *next_req; |
@@ -270,6 +294,8 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | |||
270 | typeof(*next_req), node); | 294 | typeof(*next_req), node); |
271 | tegra_dma_update_hw(ch, next_req); | 295 | tegra_dma_update_hw(ch, next_req); |
272 | } | 296 | } |
297 | |||
298 | skip_stop_dma: | ||
273 | req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; | 299 | req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; |
274 | 300 | ||
275 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 301 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
@@ -357,7 +383,7 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | |||
357 | int channel; | 383 | int channel; |
358 | struct tegra_dma_channel *ch = NULL; | 384 | struct tegra_dma_channel *ch = NULL; |
359 | 385 | ||
360 | if (WARN_ON(!tegra_dma_initialized)) | 386 | if (!tegra_dma_initialized) |
361 | return NULL; | 387 | return NULL; |
362 | 388 | ||
363 | mutex_lock(&tegra_dma_lock); | 389 | mutex_lock(&tegra_dma_lock); |