aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/dma.c
diff options
context:
space:
mode:
authorStephen Warren <swarren@nvidia.com>2011-01-05 16:24:12 -0500
committerColin Cross <ccross@android.com>2011-02-19 17:35:52 -0500
commit499ef7a5c48ea9fe8034b61de304ce9f6b753fe7 (patch)
treef1ca0f89104dab39f3152a06d7b797ed181a17d8 /arch/arm/mach-tegra/dma.c
parentfe92a026e30050d5c79b2d41274211550a14cb04 (diff)
ARM: tegra: Prevent requeuing in-progress DMA requests
If a request already in the queue is passed to tegra_dma_enqueue_req, tegra_dma_req.node->{next,prev} will end up pointing to itself instead of at tegra_dma_channel.list, which is the way a the end-of-list should be set up. When the DMA request completes and is list_del'd, the list head will still point at it, yet the node's next/prev will contain the list poison values. When the next DMA request completes, a kernel panic will occur when those poison values are dereferenced. This makes the DMA driver more robust in the face of buggy clients. Signed-off-by: Stephen Warren <swarren@nvidia.com> Signed-off-by: Colin Cross <ccross@android.com>
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r--arch/arm/mach-tegra/dma.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 250bc7baa00a..4625e3ae766e 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -311,6 +311,7 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
311 struct tegra_dma_req *req) 311 struct tegra_dma_req *req)
312{ 312{
313 unsigned long irq_flags; 313 unsigned long irq_flags;
314 struct tegra_dma_req *_req;
314 int start_dma = 0; 315 int start_dma = 0;
315 316
316 if (req->size > NV_DMA_MAX_TRASFER_SIZE || 317 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
@@ -321,6 +322,13 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
321 322
322 spin_lock_irqsave(&ch->lock, irq_flags); 323 spin_lock_irqsave(&ch->lock, irq_flags);
323 324
325 list_for_each_entry(_req, &ch->list, node) {
326 if (req == _req) {
327 spin_unlock_irqrestore(&ch->lock, irq_flags);
328 return -EEXIST;
329 }
330 }
331
324 req->bytes_transferred = 0; 332 req->bytes_transferred = 0;
325 req->status = 0; 333 req->status = 0;
326 req->buffer_status = 0; 334 req->buffer_status = 0;