diff options
author | Tomoya MORINAGA <tomoya-linux@dsn.okisemi.com> | 2011-02-17 23:31:20 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2011-02-26 09:48:29 -0500 |
commit | c5a9f9d0895b2c16908979244d3d678fd6db0545 (patch) | |
tree | da7855891e874662423a796e5834ba06294adccb /drivers/dma/pch_dma.c | |
parent | 0670e7157f75ec6d2231fbc6f67b075d6b6d486f (diff) |
pch_dma: fix kernel error issue
fix the following kernel error
------------[ cut here ]------------
WARNING: at kernel/softirq.c:159 _local_bh_enable_ip.clone.5+0x35/0x71()
Hardware name: To be filled by O.E.M.
Modules linked in: pch_uart pch_dma fuse mga drm cpufreq_ondemand acpi_cpufreq mperf ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables ipv6 uinput snd_hda_codec_realtek snd_hda_intel snd_hda_codec matroxfb_base snd_hwdep 8250_pnp snd_seq snd_seq_device matroxfb_DAC1064 snd_pcm joydev 8250 matroxfb_accel snd_timer matroxfb_Ti3026 ppdev pegasus parport_pc snd parport matroxfb_g450 g450_pll serial_core video output matroxfb_misc soundcore snd_page_alloc serio_raw pcspkr ext4 jbd2 crc16 sdhci_pci sdhci mmc_core floppy [last unloaded: scsi_wait_scan]
Pid: 0, comm: swapper Not tainted 2.6.37.upstream_check+ #8
Call Trace:
[<c0433add>] warn_slowpath_common+0x65/0x7a
[<c043825b>] ? _local_bh_enable_ip.clone.5+0x35/0x71
[<c0433b01>] warn_slowpath_null+0xf/0x13
[<c043825b>] _local_bh_enable_ip.clone.5+0x35/0x71
[<c043829f>] local_bh_enable_ip+0x8/0xa
[<c06ec471>] _raw_spin_unlock_bh+0x10/0x12
[<f82b57dd>] pd_prep_slave_sg+0xba/0x200 [pch_dma]
[<f82f7b7a>] pch_uart_interrupt+0x44d/0x6aa [pch_uart]
[<c046fa97>] handle_IRQ_event+0x1d/0x9e
[<c047146f>] handle_fasteoi_irq+0x90/0xc7
[<c04713df>] ? handle_fasteoi_irq+0x0/0xc7
<IRQ> [<c04045af>] ? do_IRQ+0x3e/0x89
[<c04035a9>] ? common_interrupt+0x29/0x30
[<c04400d8>] ? sys_getpriority+0x12d/0x1a2
[<c058bb2b>] ? arch_local_irq_enable+0x5/0xb
[<c058c740>] ? acpi_idle_enter_bm+0x22a/0x261
[<c0648b11>] ? cpuidle_idle_call+0x70/0xa1
[<c0401f44>] ? cpu_idle+0x49/0x6a
[<c06d9fc4>] ? rest_init+0x58/0x5a
[<c089e762>] ? start_kernel+0x2d0/0x2d5
[<c089e0ce>] ? i386_start_kernel+0xce/0xd5
Signed-off-by: Tomoya MORINAGA <tomoya-linux@dsn.okisemi.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/pch_dma.c')
-rw-r--r-- | drivers/dma/pch_dma.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1c38418ae61f..bf2ddd601dc2 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | 366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); |
367 | dma_cookie_t cookie; | 367 | dma_cookie_t cookie; |
368 | 368 | ||
369 | spin_lock_bh(&pd_chan->lock); | 369 | spin_lock(&pd_chan->lock); |
370 | cookie = pdc_assign_cookie(pd_chan, desc); | 370 | cookie = pdc_assign_cookie(pd_chan, desc); |
371 | 371 | ||
372 | if (list_empty(&pd_chan->active_list)) { | 372 | if (list_empty(&pd_chan->active_list)) { |
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
376 | list_add_tail(&desc->desc_node, &pd_chan->queue); | 376 | list_add_tail(&desc->desc_node, &pd_chan->queue); |
377 | } | 377 | } |
378 | 378 | ||
379 | spin_unlock_bh(&pd_chan->lock); | 379 | spin_unlock(&pd_chan->lock); |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |||
386 | struct pch_dma *pd = to_pd(chan->device); | 386 | struct pch_dma *pd = to_pd(chan->device); |
387 | dma_addr_t addr; | 387 | dma_addr_t addr; |
388 | 388 | ||
389 | desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); | 389 | desc = pci_pool_alloc(pd->pool, flags, &addr); |
390 | if (desc) { | 390 | if (desc) { |
391 | memset(desc, 0, sizeof(struct pch_dma_desc)); | 391 | memset(desc, 0, sizeof(struct pch_dma_desc)); |
392 | INIT_LIST_HEAD(&desc->tx_list); | 392 | INIT_LIST_HEAD(&desc->tx_list); |
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
405 | struct pch_dma_desc *ret = NULL; | 405 | struct pch_dma_desc *ret = NULL; |
406 | int i; | 406 | int i; |
407 | 407 | ||
408 | spin_lock_bh(&pd_chan->lock); | 408 | spin_lock(&pd_chan->lock); |
409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | 409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
410 | i++; | 410 | i++; |
411 | if (async_tx_test_ack(&desc->txd)) { | 411 | if (async_tx_test_ack(&desc->txd)) { |
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
415 | } | 415 | } |
416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | 416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); |
417 | } | 417 | } |
418 | spin_unlock_bh(&pd_chan->lock); | 418 | spin_unlock(&pd_chan->lock); |
419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | 419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
420 | 420 | ||
421 | if (!ret) { | 421 | if (!ret) { |
422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | 422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); |
423 | if (ret) { | 423 | if (ret) { |
424 | spin_lock_bh(&pd_chan->lock); | 424 | spin_lock(&pd_chan->lock); |
425 | pd_chan->descs_allocated++; | 425 | pd_chan->descs_allocated++; |
426 | spin_unlock_bh(&pd_chan->lock); | 426 | spin_unlock(&pd_chan->lock); |
427 | } else { | 427 | } else { |
428 | dev_err(chan2dev(&pd_chan->chan), | 428 | dev_err(chan2dev(&pd_chan->chan), |
429 | "failed to alloc desc\n"); | 429 | "failed to alloc desc\n"); |
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |||
437 | struct pch_dma_desc *desc) | 437 | struct pch_dma_desc *desc) |
438 | { | 438 | { |
439 | if (desc) { | 439 | if (desc) { |
440 | spin_lock_bh(&pd_chan->lock); | 440 | spin_lock(&pd_chan->lock); |
441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | 441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
442 | list_add(&desc->desc_node, &pd_chan->free_list); | 442 | list_add(&desc->desc_node, &pd_chan->free_list); |
443 | spin_unlock_bh(&pd_chan->lock); | 443 | spin_unlock(&pd_chan->lock); |
444 | } | 444 | } |
445 | } | 445 | } |
446 | 446 | ||
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
531 | 531 | ||
532 | if (pdc_is_idle(pd_chan)) { | 532 | if (pdc_is_idle(pd_chan)) { |
533 | spin_lock_bh(&pd_chan->lock); | 533 | spin_lock(&pd_chan->lock); |
534 | pdc_advance_work(pd_chan); | 534 | pdc_advance_work(pd_chan); |
535 | spin_unlock_bh(&pd_chan->lock); | 535 | spin_unlock(&pd_chan->lock); |
536 | } | 536 | } |
537 | } | 537 | } |
538 | 538 | ||
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
592 | goto err_desc_get; | 592 | goto err_desc_get; |
593 | } | 593 | } |
594 | 594 | ||
595 | |||
596 | if (!first) { | 595 | if (!first) { |
597 | first = desc; | 596 | first = desc; |
598 | } else { | 597 | } else { |
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
641 | 640 | ||
642 | spin_unlock_bh(&pd_chan->lock); | 641 | spin_unlock_bh(&pd_chan->lock); |
643 | 642 | ||
644 | |||
645 | return 0; | 643 | return 0; |
646 | } | 644 | } |
647 | 645 | ||
648 | static void pdc_tasklet(unsigned long data) | 646 | static void pdc_tasklet(unsigned long data) |
649 | { | 647 | { |
650 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | 648 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; |
649 | unsigned long flags; | ||
651 | 650 | ||
652 | if (!pdc_is_idle(pd_chan)) { | 651 | if (!pdc_is_idle(pd_chan)) { |
653 | dev_err(chan2dev(&pd_chan->chan), | 652 | dev_err(chan2dev(&pd_chan->chan), |
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data) | |||
655 | return; | 654 | return; |
656 | } | 655 | } |
657 | 656 | ||
658 | spin_lock_bh(&pd_chan->lock); | 657 | spin_lock_irqsave(&pd_chan->lock, flags); |
659 | if (test_and_clear_bit(0, &pd_chan->err_status)) | 658 | if (test_and_clear_bit(0, &pd_chan->err_status)) |
660 | pdc_handle_error(pd_chan); | 659 | pdc_handle_error(pd_chan); |
661 | else | 660 | else |
662 | pdc_advance_work(pd_chan); | 661 | pdc_advance_work(pd_chan); |
663 | spin_unlock_bh(&pd_chan->lock); | 662 | spin_unlock_irqrestore(&pd_chan->lock, flags); |
664 | } | 663 | } |
665 | 664 | ||
666 | static irqreturn_t pd_irq(int irq, void *devid) | 665 | static irqreturn_t pd_irq(int irq, void *devid) |