aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2012-07-18 12:51:28 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-07-20 02:14:19 -0400
commit634332502366554849fe37e88d05ec0a13e550c8 (patch)
treee683c0b588228dfad001c49fe6cbdc094c0b439a
parent0e79f9ae1610c15f5e5959c39d7c39071619de97 (diff)
dmaengine: Cleanup logging messages
Use a more current logging style. Add pr_fmt to prefix dmaengine: to messages. Convert printk(KERN_ERR to pr_err(. Convert embedded function name use to "%s: ", __func__ Align arguments. Original-patch-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
-rw-r--r--drivers/dma/dmaengine.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2397f6f451b1..3491654cdf7b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,6 +45,8 @@
45 * See Documentation/dmaengine.txt for more details 45 * See Documentation/dmaengine.txt for more details
46 */ 46 */
47 47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
48#include <linux/dma-mapping.h> 50#include <linux/dma-mapping.h>
49#include <linux/init.h> 51#include <linux/init.h>
50#include <linux/module.h> 52#include <linux/module.h>
@@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
261 do { 263 do {
262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 264 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 265 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
264 printk(KERN_ERR "dma_sync_wait_timeout!\n"); 266 pr_err("%s: timeout!\n", __func__);
265 return DMA_ERROR; 267 return DMA_ERROR;
266 } 268 }
267 } while (status == DMA_IN_PROGRESS); 269 } while (status == DMA_IN_PROGRESS);
@@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void)
312 } 314 }
313 315
314 if (err) { 316 if (err) {
315 pr_err("dmaengine: initialization failure\n"); 317 pr_err("initialization failure\n");
316 for_each_dma_cap_mask(cap, dma_cap_mask_all) 318 for_each_dma_cap_mask(cap, dma_cap_mask_all)
317 if (channel_table[cap]) 319 if (channel_table[cap])
318 free_percpu(channel_table[cap]); 320 free_percpu(channel_table[cap]);
@@ -520,12 +522,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
520 err = dma_chan_get(chan); 522 err = dma_chan_get(chan);
521 523
522 if (err == -ENODEV) { 524 if (err == -ENODEV) {
523 pr_debug("%s: %s module removed\n", __func__, 525 pr_debug("%s: %s module removed\n",
524 dma_chan_name(chan)); 526 __func__, dma_chan_name(chan));
525 list_del_rcu(&device->global_node); 527 list_del_rcu(&device->global_node);
526 } else if (err) 528 } else if (err)
527 pr_debug("%s: failed to get %s: (%d)\n", 529 pr_debug("%s: failed to get %s: (%d)\n",
528 __func__, dma_chan_name(chan), err); 530 __func__, dma_chan_name(chan), err);
529 else 531 else
530 break; 532 break;
531 if (--device->privatecnt == 0) 533 if (--device->privatecnt == 0)
@@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
535 } 537 }
536 mutex_unlock(&dma_list_mutex); 538 mutex_unlock(&dma_list_mutex);
537 539
538 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 540 pr_debug("%s: %s (%s)\n",
541 __func__,
542 chan ? "success" : "fail",
539 chan ? dma_chan_name(chan) : NULL); 543 chan ? dma_chan_name(chan) : NULL);
540 544
541 return chan; 545 return chan;
@@ -579,7 +583,7 @@ void dmaengine_get(void)
579 break; 583 break;
580 } else if (err) 584 } else if (err)
581 pr_err("%s: failed to get %s: (%d)\n", 585 pr_err("%s: failed to get %s: (%d)\n",
582 __func__, dma_chan_name(chan), err); 586 __func__, dma_chan_name(chan), err);
583 } 587 }
584 } 588 }
585 589
@@ -1015,7 +1019,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1015 while (tx->cookie == -EBUSY) { 1019 while (tx->cookie == -EBUSY) {
1016 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1020 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1017 pr_err("%s timeout waiting for descriptor submission\n", 1021 pr_err("%s timeout waiting for descriptor submission\n",
1018 __func__); 1022 __func__);
1019 return DMA_ERROR; 1023 return DMA_ERROR;
1020 } 1024 }
1021 cpu_relax(); 1025 cpu_relax();