aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2012-01-09 15:05:11 -0500
committerOlof Johansson <olof@lixom.net>2012-02-06 21:24:59 -0500
commitcb3732d0dc9df198c889a26210b6b27bc51a1c4a (patch)
treeb95a5a952d14897c093d09af778dcafa39cf756e /arch/arm/mach-tegra
parent941b8db1df8bfc29a88fc8e3e203289d84a3f64d (diff)
ARM: tegra: Pause DMA when reading transfer count
In order to read an accurate channel transfer count from the APB DMA engine, the DMA controller must be paused first. Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com> Acked-by: Stephen Warren <swarren@nvidia.com> Tested-by: Stephen Warren <swarren@nvidia.com> Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/arm/mach-tegra')
-rw-r--r--arch/arm/mach-tegra/dma.c116
1 files changed, 74 insertions, 42 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 122e46770d9..998c55ddca4 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -135,6 +135,7 @@ struct tegra_dma_channel {
135 135
136static bool tegra_dma_initialized; 136static bool tegra_dma_initialized;
137static DEFINE_MUTEX(tegra_dma_lock); 137static DEFINE_MUTEX(tegra_dma_lock);
138static DEFINE_SPINLOCK(enable_lock);
138 139
139static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); 140static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
140static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; 141static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
@@ -200,18 +201,82 @@ static int tegra_dma_cancel(struct tegra_dma_channel *ch)
200 return 0; 201 return 0;
201} 202}
202 203
204static unsigned int get_channel_status(struct tegra_dma_channel *ch,
205 struct tegra_dma_req *req, bool is_stop_dma)
206{
207 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
208 unsigned int status;
209
210 if (is_stop_dma) {
211 /*
212 * STOP the DMA and get the transfer count.
213 * Getting the transfer count is tricky.
214 * - Globally disable DMA on all channels
215 * - Read the channel's status register to know the number
216 * of pending bytes to be transfered.
217 * - Stop the dma channel
218 * - Globally re-enable DMA to resume other transfers
219 */
220 spin_lock(&enable_lock);
221 writel(0, addr + APB_DMA_GEN);
222 udelay(20);
223 status = readl(ch->addr + APB_DMA_CHAN_STA);
224 tegra_dma_stop(ch);
225 writel(GEN_ENABLE, addr + APB_DMA_GEN);
226 spin_unlock(&enable_lock);
227 if (status & STA_ISE_EOC) {
228 pr_err("Got Dma Int here clearing");
229 writel(status, ch->addr + APB_DMA_CHAN_STA);
230 }
231 req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
232 } else {
233 status = readl(ch->addr + APB_DMA_CHAN_STA);
234 }
235 return status;
236}
237
238/* should be called with the channel lock held */
239static unsigned int dma_active_count(struct tegra_dma_channel *ch,
240 struct tegra_dma_req *req, unsigned int status)
241{
242 unsigned int to_transfer;
243 unsigned int req_transfer_count;
244 unsigned int bytes_transferred;
245
246 to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
247 req_transfer_count = ch->req_transfer_count + 1;
248 bytes_transferred = req_transfer_count;
249 if (status & STA_BUSY)
250 bytes_transferred -= to_transfer;
251 /*
252 * In continuous transfer mode, DMA only tracks the count of the
253 * half DMA buffer. So, if the DMA already finished half the DMA
254 * then add the half buffer to the completed count.
255 */
256 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) {
257 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
258 bytes_transferred += req_transfer_count;
259 if (status & STA_ISE_EOC)
260 bytes_transferred += req_transfer_count;
261 }
262 bytes_transferred *= 4;
263 return bytes_transferred;
264}
265
203int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, 266int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
204 struct tegra_dma_req *_req) 267 struct tegra_dma_req *_req)
205{ 268{
206 unsigned int csr;
207 unsigned int status; 269 unsigned int status;
208 struct tegra_dma_req *req = NULL; 270 struct tegra_dma_req *req = NULL;
209 int found = 0; 271 int found = 0;
210 unsigned long irq_flags; 272 unsigned long irq_flags;
211 int to_transfer; 273 int stop = 0;
212 int req_transfer_count;
213 274
214 spin_lock_irqsave(&ch->lock, irq_flags); 275 spin_lock_irqsave(&ch->lock, irq_flags);
276
277 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
278 stop = 1;
279
215 list_for_each_entry(req, &ch->list, node) { 280 list_for_each_entry(req, &ch->list, node) {
216 if (req == _req) { 281 if (req == _req) {
217 list_del(&req->node); 282 list_del(&req->node);
@@ -224,47 +289,12 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
224 return 0; 289 return 0;
225 } 290 }
226 291
227 /* STOP the DMA and get the transfer count. 292 if (!stop)
228 * Getting the transfer count is tricky. 293 goto skip_stop_dma;
229 * - Change the source selector to invalid to stop the DMA from
230 * FIFO to memory.
231 * - Read the status register to know the number of pending
232 * bytes to be transferred.
233 * - Finally stop or program the DMA to the next buffer in the
234 * list.
235 */
236 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
237 csr &= ~CSR_REQ_SEL_MASK;
238 csr |= CSR_REQ_SEL_INVALID;
239 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
240
241 /* Get the transfer count */
242 status = readl(ch->addr + APB_DMA_CHAN_STA);
243 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
244 req_transfer_count = ch->req_transfer_count;
245 req_transfer_count += 1;
246 to_transfer += 1;
247 294
248 req->bytes_transferred = req_transfer_count; 295 status = get_channel_status(ch, req, true);
296 req->bytes_transferred = dma_active_count(ch, req, status);
249 297
250 if (status & STA_BUSY)
251 req->bytes_transferred -= to_transfer;
252
253 /* In continuous transfer mode, DMA only tracks the count of the
254 * half DMA buffer. So, if the DMA already finished half the DMA
255 * then add the half buffer to the completed count.
256 *
257 * FIXME: There can be a race here. What if the req to
258 * dequue happens at the same time as the DMA just moved to
259 * the new buffer and SW didn't yet received the interrupt?
260 */
261 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
262 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
263 req->bytes_transferred += req_transfer_count;
264
265 req->bytes_transferred *= 4;
266
267 tegra_dma_stop(ch);
268 if (!list_empty(&ch->list)) { 298 if (!list_empty(&ch->list)) {
269 /* if the list is not empty, queue the next request */ 299 /* if the list is not empty, queue the next request */
270 struct tegra_dma_req *next_req; 300 struct tegra_dma_req *next_req;
@@ -272,6 +302,8 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
272 typeof(*next_req), node); 302 typeof(*next_req), node);
273 tegra_dma_update_hw(ch, next_req); 303 tegra_dma_update_hw(ch, next_req);
274 } 304 }
305
306skip_stop_dma:
275 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; 307 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
276 308
277 spin_unlock_irqrestore(&ch->lock, irq_flags); 309 spin_unlock_irqrestore(&ch->lock, irq_flags);