aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorKoul, Vinod <vinod.koul@intel.com>2010-10-04 06:38:43 -0400
committerDan Williams <dan.j.williams@intel.com>2010-10-07 18:03:44 -0400
commit20dd63900d238e17b122fe0c7376ff090867f528 (patch)
tree8f25adbbb5d49ca428df2596d1e2e24e8e40e428 /drivers/dma
parent8b6492231d2a92352a6371eebd622e3bc824a663 (diff)
intel_mid_dma: change the slave interface
In 2.6.36 kernel, dma slave control command was introduced, this patch changes the intel-mid-dma driver to this new kernel slave interface Signed-off-by: Vinod Koul <vinod.koul@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/intel_mid_dma.c66
-rw-r--r--drivers/dma/intel_mid_dma_regs.h11
2 files changed, 52 insertions, 25 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index ef7ffb813fe9..338bc4eed1f3 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -92,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
92 int byte_width = 0, block_ts = 0; 92 int byte_width = 0, block_ts = 0;
93 93
94 switch (tx_width) { 94 switch (tx_width) {
95 case LNW_DMA_WIDTH_8BIT: 95 case DMA_SLAVE_BUSWIDTH_1_BYTE:
96 byte_width = 1; 96 byte_width = 1;
97 break; 97 break;
98 case LNW_DMA_WIDTH_16BIT: 98 case DMA_SLAVE_BUSWIDTH_2_BYTES:
99 byte_width = 2; 99 byte_width = 2;
100 break; 100 break;
101 case LNW_DMA_WIDTH_32BIT: 101 case DMA_SLAVE_BUSWIDTH_4_BYTES:
102 default: 102 default:
103 byte_width = 4; 103 byte_width = 4;
104 break; 104 break;
@@ -367,7 +367,7 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
367 int i; 367 int i;
368 368
369 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 369 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
370 mids = midc->chan.private; 370 mids = midc->mid_slave;
371 371
372 lli_bloc_desc = desc->lli; 372 lli_bloc_desc = desc->lli;
373 lli_next = desc->lli_phys; 373 lli_next = desc->lli_phys;
@@ -398,9 +398,9 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
398 sg_phy_addr = sg_phys(sg); 398 sg_phy_addr = sg_phys(sg);
399 if (desc->dirn == DMA_TO_DEVICE) { 399 if (desc->dirn == DMA_TO_DEVICE) {
400 lli_bloc_desc->sar = sg_phy_addr; 400 lli_bloc_desc->sar = sg_phy_addr;
401 lli_bloc_desc->dar = mids->per_addr; 401 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
402 } else if (desc->dirn == DMA_FROM_DEVICE) { 402 } else if (desc->dirn == DMA_FROM_DEVICE) {
403 lli_bloc_desc->sar = mids->per_addr; 403 lli_bloc_desc->sar = mids->dma_slave.src_addr;
404 lli_bloc_desc->dar = sg_phy_addr; 404 lli_bloc_desc->dar = sg_phy_addr;
405 } 405 }
406 /*Copy values into block descriptor in system memroy*/ 406 /*Copy values into block descriptor in system memroy*/
@@ -507,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
507 return ret; 507 return ret;
508} 508}
509 509
510static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
511{
512 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
513 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
514 struct intel_mid_dma_slave *mid_slave;
515
516 BUG_ON(!midc);
517 BUG_ON(!slave);
518 pr_debug("MDMA: slave control called\n");
519
520 mid_slave = to_intel_mid_dma_slave(slave);
521
522 BUG_ON(!mid_slave);
523
524 midc->mid_slave = mid_slave;
525 return 0;
526}
510/** 527/**
511 * intel_mid_dma_device_control - DMA device control 528 * intel_mid_dma_device_control - DMA device control
512 * @chan: chan for DMA control 529 * @chan: chan for DMA control
@@ -523,6 +540,9 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
523 struct intel_mid_dma_desc *desc, *_desc; 540 struct intel_mid_dma_desc *desc, *_desc;
524 union intel_mid_dma_cfg_lo cfg_lo; 541 union intel_mid_dma_cfg_lo cfg_lo;
525 542
543 if (cmd == DMA_SLAVE_CONFIG)
544 return dma_slave_control(chan, arg);
545
526 if (cmd != DMA_TERMINATE_ALL) 546 if (cmd != DMA_TERMINATE_ALL)
527 return -ENXIO; 547 return -ENXIO;
528 548
@@ -540,7 +560,6 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
540 /* Disable interrupts */ 560 /* Disable interrupts */
541 disable_dma_interrupt(midc); 561 disable_dma_interrupt(midc);
542 midc->descs_allocated = 0; 562 midc->descs_allocated = 0;
543 midc->slave = NULL;
544 563
545 spin_unlock_bh(&midc->lock); 564 spin_unlock_bh(&midc->lock);
546 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 565 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -578,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
578 union intel_mid_dma_ctl_hi ctl_hi; 597 union intel_mid_dma_ctl_hi ctl_hi;
579 union intel_mid_dma_cfg_lo cfg_lo; 598 union intel_mid_dma_cfg_lo cfg_lo;
580 union intel_mid_dma_cfg_hi cfg_hi; 599 union intel_mid_dma_cfg_hi cfg_hi;
581 enum intel_mid_dma_width width = 0; 600 enum dma_slave_buswidth width;
582 601
583 pr_debug("MDMA: Prep for memcpy\n"); 602 pr_debug("MDMA: Prep for memcpy\n");
584 BUG_ON(!chan); 603 BUG_ON(!chan);
585 if (!len) 604 if (!len)
586 return NULL; 605 return NULL;
587 606
588 mids = chan->private;
589 BUG_ON(!mids);
590
591 midc = to_intel_mid_dma_chan(chan); 607 midc = to_intel_mid_dma_chan(chan);
592 BUG_ON(!midc); 608 BUG_ON(!midc);
593 609
610 mids = midc->mid_slave;
611 BUG_ON(!mids);
612
594 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 613 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
595 midc->dma->pci_id, midc->ch_id, len); 614 midc->dma->pci_id, midc->ch_id, len);
596 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 615 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
597 mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 616 mids->cfg_mode, mids->dma_slave.direction,
617 mids->hs_mode, mids->dma_slave.src_addr_width);
598 618
599 /*calculate CFG_LO*/ 619 /*calculate CFG_LO*/
600 if (mids->hs_mode == LNW_DMA_SW_HS) { 620 if (mids->hs_mode == LNW_DMA_SW_HS) {
@@ -613,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
613 if (midc->dma->pimr_mask) { 633 if (midc->dma->pimr_mask) {
614 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 634 cfg_hi.cfgx.protctl = 0x0; /*default value*/
615 cfg_hi.cfgx.fifo_mode = 1; 635 cfg_hi.cfgx.fifo_mode = 1;
616 if (mids->dirn == DMA_TO_DEVICE) { 636 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
617 cfg_hi.cfgx.src_per = 0; 637 cfg_hi.cfgx.src_per = 0;
618 if (mids->device_instance == 0) 638 if (mids->device_instance == 0)
619 cfg_hi.cfgx.dst_per = 3; 639 cfg_hi.cfgx.dst_per = 3;
620 if (mids->device_instance == 1) 640 if (mids->device_instance == 1)
621 cfg_hi.cfgx.dst_per = 1; 641 cfg_hi.cfgx.dst_per = 1;
622 } else if (mids->dirn == DMA_FROM_DEVICE) { 642 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
623 if (mids->device_instance == 0) 643 if (mids->device_instance == 0)
624 cfg_hi.cfgx.src_per = 2; 644 cfg_hi.cfgx.src_per = 2;
625 if (mids->device_instance == 1) 645 if (mids->device_instance == 1)
@@ -636,7 +656,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
636 /*calculate CTL_HI*/ 656 /*calculate CTL_HI*/
637 ctl_hi.ctlx.reser = 0; 657 ctl_hi.ctlx.reser = 0;
638 ctl_hi.ctlx.done = 0; 658 ctl_hi.ctlx.done = 0;
639 width = mids->src_width; 659 width = mids->dma_slave.src_addr_width;
640 660
641 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 661 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
642 pr_debug("MDMA:calc len %d for block size %d\n", 662 pr_debug("MDMA:calc len %d for block size %d\n",
@@ -644,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
644 /*calculate CTL_LO*/ 664 /*calculate CTL_LO*/
645 ctl_lo.ctl_lo = 0; 665 ctl_lo.ctl_lo = 0;
646 ctl_lo.ctlx.int_en = 1; 666 ctl_lo.ctlx.int_en = 1;
647 ctl_lo.ctlx.dst_tr_width = mids->dst_width; 667 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
648 ctl_lo.ctlx.src_tr_width = mids->src_width; 668 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
649 ctl_lo.ctlx.dst_msize = mids->src_msize; 669 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
650 ctl_lo.ctlx.src_msize = mids->dst_msize; 670 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
651 671
652 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 672 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
653 ctl_lo.ctlx.tt_fc = 0; 673 ctl_lo.ctlx.tt_fc = 0;
654 ctl_lo.ctlx.sinc = 0; 674 ctl_lo.ctlx.sinc = 0;
655 ctl_lo.ctlx.dinc = 0; 675 ctl_lo.ctlx.dinc = 0;
656 } else { 676 } else {
657 if (mids->dirn == DMA_TO_DEVICE) { 677 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
658 ctl_lo.ctlx.sinc = 0; 678 ctl_lo.ctlx.sinc = 0;
659 ctl_lo.ctlx.dinc = 2; 679 ctl_lo.ctlx.dinc = 2;
660 ctl_lo.ctlx.tt_fc = 1; 680 ctl_lo.ctlx.tt_fc = 1;
661 } else if (mids->dirn == DMA_FROM_DEVICE) { 681 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
662 ctl_lo.ctlx.sinc = 2; 682 ctl_lo.ctlx.sinc = 2;
663 ctl_lo.ctlx.dinc = 0; 683 ctl_lo.ctlx.dinc = 0;
664 ctl_lo.ctlx.tt_fc = 2; 684 ctl_lo.ctlx.tt_fc = 2;
@@ -681,7 +701,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
681 desc->ctl_lo = ctl_lo.ctl_lo; 701 desc->ctl_lo = ctl_lo.ctl_lo;
682 desc->ctl_hi = ctl_hi.ctl_hi; 702 desc->ctl_hi = ctl_hi.ctl_hi;
683 desc->width = width; 703 desc->width = width;
684 desc->dirn = mids->dirn; 704 desc->dirn = mids->dma_slave.direction;
685 desc->lli_phys = 0; 705 desc->lli_phys = 0;
686 desc->lli = NULL; 706 desc->lli = NULL;
687 desc->lli_pool = NULL; 707 desc->lli_pool = NULL;
@@ -722,7 +742,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
722 midc = to_intel_mid_dma_chan(chan); 742 midc = to_intel_mid_dma_chan(chan);
723 BUG_ON(!midc); 743 BUG_ON(!midc);
724 744
725 mids = chan->private; 745 mids = midc->mid_slave;
726 BUG_ON(!mids); 746 BUG_ON(!mids);
727 747
728 if (!midc->dma->pimr_mask) { 748 if (!midc->dma->pimr_mask) {
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index 7a5ac56d1324..709fecbdde79 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -187,13 +187,13 @@ struct intel_mid_dma_chan {
187 struct list_head active_list; 187 struct list_head active_list;
188 struct list_head queue; 188 struct list_head queue;
189 struct list_head free_list; 189 struct list_head free_list;
190 struct intel_mid_dma_slave *slave;
191 unsigned int descs_allocated; 190 unsigned int descs_allocated;
192 struct middma_device *dma; 191 struct middma_device *dma;
193 bool busy; 192 bool busy;
194 bool in_use; 193 bool in_use;
195 u32 raw_tfr; 194 u32 raw_tfr;
196 u32 raw_block; 195 u32 raw_block;
196 struct intel_mid_dma_slave *mid_slave;
197}; 197};
198 198
199static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( 199static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -264,7 +264,7 @@ struct intel_mid_dma_desc {
264 dma_addr_t next; 264 dma_addr_t next;
265 enum dma_data_direction dirn; 265 enum dma_data_direction dirn;
266 enum dma_status status; 266 enum dma_status status;
267 enum intel_mid_dma_width width; /*width of DMA txn*/ 267 enum dma_slave_buswidth width; /*width of DMA txn*/
268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
269 269
270}; 270};
@@ -289,6 +289,13 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
289 return container_of(txd, struct intel_mid_dma_desc, txd); 289 return container_of(txd, struct intel_mid_dma_desc, txd);
290} 290}
291 291
292static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
293 (struct dma_slave_config *slave)
294{
295 return container_of(slave, struct intel_mid_dma_slave, dma_slave);
296}
297
298
292int dma_resume(struct pci_dev *pci); 299int dma_resume(struct pci_dev *pci);
293 300
294#endif /*__INTEL_MID_DMAC_REGS_H__*/ 301#endif /*__INTEL_MID_DMAC_REGS_H__*/