aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/pch_dma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:53:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:53:13 -0400
commit6447f55da90b77faec1697d499ed7986bb4f6de6 (patch)
tree2d360d48121bdaa354d1ef19fed48467d08dfb1f /drivers/dma/pch_dma.c
parentc50e3f512a5a15a73acd94e6ec8ed63cd512e04f (diff)
parent3ea205c449d2b5996d0256aa8b2894f7aea228a2 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (66 commits) avr32: at32ap700x: fix typo in DMA master configuration dmaengine/dmatest: Pass timeout via module params dma: let IMX_DMA depend on IMX_HAVE_DMA_V1 instead of an explicit list of SoCs fsldma: make halt behave nicely on all supported controllers fsldma: reduce locking during descriptor cleanup fsldma: support async_tx dependencies and automatic unmapping fsldma: fix controller lockups fsldma: minor codingstyle and consistency fixes fsldma: improve link descriptor debugging fsldma: use channel name in printk output fsldma: move related helper functions near each other dmatest: fix automatic buffer unmap type drivers, pch_dma: Fix warning when CONFIG_PM=n. dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel avr32: at32ap700x: Specify DMA Flow Controller, Src and Dst msize dw_dmac: Setting Default Burst length for transfers as 16. dw_dmac: Allow src/dst msize & flow controller to be configured at runtime dw_dmac: Changing type of src_master and dest_master to u8. dw_dmac: Pass Channel Priority from platform_data dw_dmac: Pass Channel Allocation Order from platform_data ...
Diffstat (limited to 'drivers/dma/pch_dma.c')
-rw-r--r--drivers/dma/pch_dma.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1c38418ae61f..8d8fef1480a9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -82,7 +82,7 @@ struct pch_dma_regs {
82 u32 dma_sts1; 82 u32 dma_sts1;
83 u32 reserved2; 83 u32 reserved2;
84 u32 reserved3; 84 u32 reserved3;
85 struct pch_dma_desc_regs desc[0]; 85 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
86}; 86};
87 87
88struct pch_dma_desc { 88struct pch_dma_desc {
@@ -124,7 +124,7 @@ struct pch_dma {
124 struct pci_pool *pool; 124 struct pci_pool *pool;
125 struct pch_dma_regs regs; 125 struct pch_dma_regs regs;
126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; 126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
127 struct pch_dma_chan channels[0]; 127 struct pch_dma_chan channels[MAX_CHAN_NR];
128}; 128};
129 129
130#define PCH_DMA_CTL0 0x00 130#define PCH_DMA_CTL0 0x00
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); 366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
367 dma_cookie_t cookie; 367 dma_cookie_t cookie;
368 368
369 spin_lock_bh(&pd_chan->lock); 369 spin_lock(&pd_chan->lock);
370 cookie = pdc_assign_cookie(pd_chan, desc); 370 cookie = pdc_assign_cookie(pd_chan, desc);
371 371
372 if (list_empty(&pd_chan->active_list)) { 372 if (list_empty(&pd_chan->active_list)) {
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
376 list_add_tail(&desc->desc_node, &pd_chan->queue); 376 list_add_tail(&desc->desc_node, &pd_chan->queue);
377 } 377 }
378 378
379 spin_unlock_bh(&pd_chan->lock); 379 spin_unlock(&pd_chan->lock);
380 return 0; 380 return 0;
381} 381}
382 382
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
386 struct pch_dma *pd = to_pd(chan->device); 386 struct pch_dma *pd = to_pd(chan->device);
387 dma_addr_t addr; 387 dma_addr_t addr;
388 388
389 desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); 389 desc = pci_pool_alloc(pd->pool, flags, &addr);
390 if (desc) { 390 if (desc) {
391 memset(desc, 0, sizeof(struct pch_dma_desc)); 391 memset(desc, 0, sizeof(struct pch_dma_desc));
392 INIT_LIST_HEAD(&desc->tx_list); 392 INIT_LIST_HEAD(&desc->tx_list);
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
405 struct pch_dma_desc *ret = NULL; 405 struct pch_dma_desc *ret = NULL;
406 int i; 406 int i;
407 407
408 spin_lock_bh(&pd_chan->lock); 408 spin_lock(&pd_chan->lock);
409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { 409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
410 i++; 410 i++;
411 if (async_tx_test_ack(&desc->txd)) { 411 if (async_tx_test_ack(&desc->txd)) {
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
415 } 415 }
416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); 416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
417 } 417 }
418 spin_unlock_bh(&pd_chan->lock); 418 spin_unlock(&pd_chan->lock);
419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); 419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
420 420
421 if (!ret) { 421 if (!ret) {
422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); 422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
423 if (ret) { 423 if (ret) {
424 spin_lock_bh(&pd_chan->lock); 424 spin_lock(&pd_chan->lock);
425 pd_chan->descs_allocated++; 425 pd_chan->descs_allocated++;
426 spin_unlock_bh(&pd_chan->lock); 426 spin_unlock(&pd_chan->lock);
427 } else { 427 } else {
428 dev_err(chan2dev(&pd_chan->chan), 428 dev_err(chan2dev(&pd_chan->chan),
429 "failed to alloc desc\n"); 429 "failed to alloc desc\n");
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan,
437 struct pch_dma_desc *desc) 437 struct pch_dma_desc *desc)
438{ 438{
439 if (desc) { 439 if (desc) {
440 spin_lock_bh(&pd_chan->lock); 440 spin_lock(&pd_chan->lock);
441 list_splice_init(&desc->tx_list, &pd_chan->free_list); 441 list_splice_init(&desc->tx_list, &pd_chan->free_list);
442 list_add(&desc->desc_node, &pd_chan->free_list); 442 list_add(&desc->desc_node, &pd_chan->free_list);
443 spin_unlock_bh(&pd_chan->lock); 443 spin_unlock(&pd_chan->lock);
444 } 444 }
445} 445}
446 446
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan)
530 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 530 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
531 531
532 if (pdc_is_idle(pd_chan)) { 532 if (pdc_is_idle(pd_chan)) {
533 spin_lock_bh(&pd_chan->lock); 533 spin_lock(&pd_chan->lock);
534 pdc_advance_work(pd_chan); 534 pdc_advance_work(pd_chan);
535 spin_unlock_bh(&pd_chan->lock); 535 spin_unlock(&pd_chan->lock);
536 } 536 }
537} 537}
538 538
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
592 goto err_desc_get; 592 goto err_desc_get;
593 } 593 }
594 594
595
596 if (!first) { 595 if (!first) {
597 first = desc; 596 first = desc;
598 } else { 597 } else {
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
641 640
642 spin_unlock_bh(&pd_chan->lock); 641 spin_unlock_bh(&pd_chan->lock);
643 642
644
645 return 0; 643 return 0;
646} 644}
647 645
648static void pdc_tasklet(unsigned long data) 646static void pdc_tasklet(unsigned long data)
649{ 647{
650 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; 648 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
649 unsigned long flags;
651 650
652 if (!pdc_is_idle(pd_chan)) { 651 if (!pdc_is_idle(pd_chan)) {
653 dev_err(chan2dev(&pd_chan->chan), 652 dev_err(chan2dev(&pd_chan->chan),
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data)
655 return; 654 return;
656 } 655 }
657 656
658 spin_lock_bh(&pd_chan->lock); 657 spin_lock_irqsave(&pd_chan->lock, flags);
659 if (test_and_clear_bit(0, &pd_chan->err_status)) 658 if (test_and_clear_bit(0, &pd_chan->err_status))
660 pdc_handle_error(pd_chan); 659 pdc_handle_error(pd_chan);
661 else 660 else
662 pdc_advance_work(pd_chan); 661 pdc_advance_work(pd_chan);
663 spin_unlock_bh(&pd_chan->lock); 662 spin_unlock_irqrestore(&pd_chan->lock, flags);
664} 663}
665 664
666static irqreturn_t pd_irq(int irq, void *devid) 665static irqreturn_t pd_irq(int irq, void *devid)
@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
694 return ret; 693 return ret;
695} 694}
696 695
696#ifdef CONFIG_PM
697static void pch_dma_save_regs(struct pch_dma *pd) 697static void pch_dma_save_regs(struct pch_dma *pd)
698{ 698{
699 struct pch_dma_chan *pd_chan; 699 struct pch_dma_chan *pd_chan;
@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
771 771
772 return 0; 772 return 0;
773} 773}
774#endif
774 775
775static int __devinit pch_dma_probe(struct pci_dev *pdev, 776static int __devinit pch_dma_probe(struct pci_dev *pdev,
776 const struct pci_device_id *id) 777 const struct pci_device_id *id)