diff options
author | Ilya Yanok <yanok@emcraft.com> | 2010-10-26 19:52:58 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-12-30 00:29:36 -0500 |
commit | a276991357c63bfb8d3ffdf5800054dba4f8d558 (patch) | |
tree | 8a3bb0d23eb2011f69e426ab1d87bbe311567c96 /drivers/dma/mpc512x_dma.c | |
parent | ba2eea251f815b3674cde13ecdba4772332bf56e (diff) |
powerpc/512x: try to free dma descriptors in case of allocation failure
Currently completed descriptors are processed in the tasklet. This can
lead to dead lock in case of CONFIG_NET_DMA enabled (new requests are
submitted from softirq context and dma_memcpy_to_iovec() busy loops until
the requests is submitted). To prevent this we should process completed
descriptors from the allocation failure path in prepare_memcpy too.
Signed-off-by: Ilya Yanok <yanok@emcraft.com>
Cc: Piotr Ziecik <kosmo@semihalf.com>
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/dma/mpc512x_dma.c')
-rw-r--r-- | drivers/dma/mpc512x_dma.c | 79 |
1 files changed, 45 insertions, 34 deletions
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 97b92ecb1427..59c270192ccc 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -328,19 +328,55 @@ static irqreturn_t mpc_dma_irq(int irq, void *data) | |||
328 | return IRQ_HANDLED; | 328 | return IRQ_HANDLED; |
329 | } | 329 | } |
330 | 330 | ||
331 | /* DMA Tasklet */ | 331 | /* proccess completed descriptors */ |
332 | static void mpc_dma_tasklet(unsigned long data) | 332 | static void mpc_dma_process_completed(struct mpc_dma *mdma) |
333 | { | 333 | { |
334 | struct mpc_dma *mdma = (void *)data; | ||
335 | dma_cookie_t last_cookie = 0; | 334 | dma_cookie_t last_cookie = 0; |
336 | struct mpc_dma_chan *mchan; | 335 | struct mpc_dma_chan *mchan; |
337 | struct mpc_dma_desc *mdesc; | 336 | struct mpc_dma_desc *mdesc; |
338 | struct dma_async_tx_descriptor *desc; | 337 | struct dma_async_tx_descriptor *desc; |
339 | unsigned long flags; | 338 | unsigned long flags; |
340 | LIST_HEAD(list); | 339 | LIST_HEAD(list); |
341 | uint es; | ||
342 | int i; | 340 | int i; |
343 | 341 | ||
342 | for (i = 0; i < mdma->dma.chancnt; i++) { | ||
343 | mchan = &mdma->channels[i]; | ||
344 | |||
345 | /* Get all completed descriptors */ | ||
346 | spin_lock_irqsave(&mchan->lock, flags); | ||
347 | if (!list_empty(&mchan->completed)) | ||
348 | list_splice_tail_init(&mchan->completed, &list); | ||
349 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
350 | |||
351 | if (list_empty(&list)) | ||
352 | continue; | ||
353 | |||
354 | /* Execute callbacks and run dependencies */ | ||
355 | list_for_each_entry(mdesc, &list, node) { | ||
356 | desc = &mdesc->desc; | ||
357 | |||
358 | if (desc->callback) | ||
359 | desc->callback(desc->callback_param); | ||
360 | |||
361 | last_cookie = desc->cookie; | ||
362 | dma_run_dependencies(desc); | ||
363 | } | ||
364 | |||
365 | /* Free descriptors */ | ||
366 | spin_lock_irqsave(&mchan->lock, flags); | ||
367 | list_splice_tail_init(&list, &mchan->free); | ||
368 | mchan->completed_cookie = last_cookie; | ||
369 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* DMA Tasklet */ | ||
374 | static void mpc_dma_tasklet(unsigned long data) | ||
375 | { | ||
376 | struct mpc_dma *mdma = (void *)data; | ||
377 | unsigned long flags; | ||
378 | uint es; | ||
379 | |||
344 | spin_lock_irqsave(&mdma->error_status_lock, flags); | 380 | spin_lock_irqsave(&mdma->error_status_lock, flags); |
345 | es = mdma->error_status; | 381 | es = mdma->error_status; |
346 | mdma->error_status = 0; | 382 | mdma->error_status = 0; |
@@ -379,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data) | |||
379 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); | 415 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); |
380 | } | 416 | } |
381 | 417 | ||
382 | for (i = 0; i < mdma->dma.chancnt; i++) { | 418 | mpc_dma_process_completed(mdma); |
383 | mchan = &mdma->channels[i]; | ||
384 | |||
385 | /* Get all completed descriptors */ | ||
386 | spin_lock_irqsave(&mchan->lock, flags); | ||
387 | if (!list_empty(&mchan->completed)) | ||
388 | list_splice_tail_init(&mchan->completed, &list); | ||
389 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
390 | |||
391 | if (list_empty(&list)) | ||
392 | continue; | ||
393 | |||
394 | /* Execute callbacks and run dependencies */ | ||
395 | list_for_each_entry(mdesc, &list, node) { | ||
396 | desc = &mdesc->desc; | ||
397 | |||
398 | if (desc->callback) | ||
399 | desc->callback(desc->callback_param); | ||
400 | |||
401 | last_cookie = desc->cookie; | ||
402 | dma_run_dependencies(desc); | ||
403 | } | ||
404 | |||
405 | /* Free descriptors */ | ||
406 | spin_lock_irqsave(&mchan->lock, flags); | ||
407 | list_splice_tail_init(&list, &mchan->free); | ||
408 | mchan->completed_cookie = last_cookie; | ||
409 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
410 | } | ||
411 | } | 419 | } |
412 | 420 | ||
413 | /* Submit descriptor to hardware */ | 421 | /* Submit descriptor to hardware */ |
@@ -587,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
587 | } | 595 | } |
588 | spin_unlock_irqrestore(&mchan->lock, iflags); | 596 | spin_unlock_irqrestore(&mchan->lock, iflags); |
589 | 597 | ||
590 | if (!mdesc) | 598 | if (!mdesc) { |
599 | /* try to free completed descriptors */ | ||
600 | mpc_dma_process_completed(mdma); | ||
591 | return NULL; | 601 | return NULL; |
602 | } | ||
592 | 603 | ||
593 | mdesc->error = 0; | 604 | mdesc->error = 0; |
594 | tcd = mdesc->tcd; | 605 | tcd = mdesc->tcd; |