aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-03-13 16:17:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-03-13 16:17:25 -0400
commitb1881fb148a67a5b694ac9701672ce6b359abfa4 (patch)
tree3c75d3cefe3c5d59304bc46c4782e7d52830aebf /drivers
parentebe168d52c6255cfaf701b488e9e9ed0f548da19 (diff)
parent3280ab3e8815d60cea483d49b21261972e2785d6 (diff)
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: async_tx: checkpatch says s/__FUNCTION__/__func__/g iop-adma.c: replace remaining __FUNCTION__ occurrences fsldma: Add a completed cookie updated action in DMA finish interrupt. fsldma: Add device_prep_dma_interrupt support to fsldma.c dmaengine: Fix a bug about BUG_ON() on DMA engine capability DMA_INTERRUPT. fsldma: Fix fsldma.c warning messages when it's compiled under PPC64.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/fsldma.c58
-rw-r--r--drivers/dma/iop-adma.c32
3 files changed, 61 insertions, 31 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 29965231b912..8db0e7f9d3f4 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -357,7 +357,7 @@ int dma_async_device_register(struct dma_device *device)
357 !device->device_prep_dma_zero_sum); 357 !device->device_prep_dma_zero_sum);
358 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 358 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
359 !device->device_prep_dma_memset); 359 !device->device_prep_dma_memset);
360 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && 360 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
361 !device->device_prep_dma_interrupt); 361 !device->device_prep_dma_interrupt);
362 362
363 BUG_ON(!device->device_alloc_chan_resources); 363 BUG_ON(!device->device_alloc_chan_resources);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index cc9a68158d99..ad2f938597e2 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -57,12 +57,12 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
57 57
58} 58}
59 59
60static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) 60static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
61{ 61{
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
63} 63}
64 64
65static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) 65static u32 get_sr(struct fsl_dma_chan *fsl_chan)
66{ 66{
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
68} 68}
@@ -406,6 +406,32 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan)
406 dma_pool_destroy(fsl_chan->desc_pool); 406 dma_pool_destroy(fsl_chan->desc_pool);
407} 407}
408 408
409static struct dma_async_tx_descriptor *
410fsl_dma_prep_interrupt(struct dma_chan *chan)
411{
412 struct fsl_dma_chan *fsl_chan;
413 struct fsl_desc_sw *new;
414
415 if (!chan)
416 return NULL;
417
418 fsl_chan = to_fsl_chan(chan);
419
420 new = fsl_dma_alloc_descriptor(fsl_chan);
421 if (!new) {
422 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
423 return NULL;
424 }
425
426 new->async_tx.cookie = -EBUSY;
427 new->async_tx.ack = 0;
428
429 /* Set End-of-link to the last link descriptor of new list*/
430 set_ld_eol(fsl_chan, new);
431
432 return &new->async_tx;
433}
434
409static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 435static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
410 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 436 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
411 size_t len, unsigned long flags) 437 size_t len, unsigned long flags)
@@ -436,7 +462,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
436 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 462 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
437#endif 463#endif
438 464
439 copy = min(len, FSL_DMA_BCR_MAX_CNT); 465 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
440 466
441 set_desc_cnt(fsl_chan, &new->hw, copy); 467 set_desc_cnt(fsl_chan, &new->hw, copy);
442 set_desc_src(fsl_chan, &new->hw, dma_src); 468 set_desc_src(fsl_chan, &new->hw, dma_src);
@@ -513,7 +539,6 @@ static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
513 539
514 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 540 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
515 541
516 fsl_dma_update_completed_cookie(fsl_chan);
517 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 542 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
518 fsl_chan->completed_cookie); 543 fsl_chan->completed_cookie);
519 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 544 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
@@ -581,8 +606,8 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
581 if (ld_node != &fsl_chan->ld_queue) { 606 if (ld_node != &fsl_chan->ld_queue) {
582 /* Get the ld start address from ld_queue */ 607 /* Get the ld start address from ld_queue */
583 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 608 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
584 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", 609 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
585 (u64)next_dest_addr); 610 (void *)next_dest_addr);
586 set_cdar(fsl_chan, next_dest_addr); 611 set_cdar(fsl_chan, next_dest_addr);
587 dma_start(fsl_chan); 612 dma_start(fsl_chan);
588 } else { 613 } else {
@@ -662,7 +687,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
662static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 687static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
663{ 688{
664 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 689 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
665 dma_addr_t stat; 690 u32 stat;
666 691
667 stat = get_sr(fsl_chan); 692 stat = get_sr(fsl_chan);
668 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", 693 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
@@ -681,10 +706,10 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
681 */ 706 */
682 if (stat & FSL_DMA_SR_EOSI) { 707 if (stat & FSL_DMA_SR_EOSI) {
683 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 708 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
684 dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " 709 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
685 "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), 710 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
686 (u64)get_ndar(fsl_chan));
687 stat &= ~FSL_DMA_SR_EOSI; 711 stat &= ~FSL_DMA_SR_EOSI;
712 fsl_dma_update_completed_cookie(fsl_chan);
688 } 713 }
689 714
690 /* If it current transfer is the end-of-transfer, 715 /* If it current transfer is the end-of-transfer,
@@ -726,12 +751,15 @@ static void dma_do_tasklet(unsigned long data)
726 fsl_chan_ld_cleanup(fsl_chan); 751 fsl_chan_ld_cleanup(fsl_chan);
727} 752}
728 753
754#ifdef FSL_DMA_CALLBACKTEST
729static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) 755static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
730{ 756{
731 if (fsl_chan) 757 if (fsl_chan)
732 dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); 758 dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
733} 759}
760#endif
734 761
762#ifdef CONFIG_FSL_DMA_SELFTEST
735static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) 763static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
736{ 764{
737 struct dma_chan *chan; 765 struct dma_chan *chan;
@@ -837,9 +865,9 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
837 if (err) { 865 if (err) {
838 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); 866 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
839 i++); 867 i++);
840 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " 868 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is "
841 "error! src 0x%x, dest 0x%x\n", 869 "error! src 0x%x, dest 0x%x\n",
842 i, test_size, *(src + i), *(dest + i)); 870 i, (long)test_size, *(src + i), *(dest + i));
843 } 871 }
844 872
845free_resources: 873free_resources:
@@ -848,6 +876,7 @@ out:
848 kfree(src); 876 kfree(src);
849 return err; 877 return err;
850} 878}
879#endif
851 880
852static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, 881static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
853 const struct of_device_id *match) 882 const struct of_device_id *match)
@@ -1008,8 +1037,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1008 } 1037 }
1009 1038
1010 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1039 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
1011 "controller at 0x%08x...\n", 1040 "controller at %p...\n",
1012 match->compatible, fdev->reg.start); 1041 match->compatible, (void *)fdev->reg.start);
1013 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 1042 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
1014 - fdev->reg.start + 1); 1043 - fdev->reg.start + 1);
1015 1044
@@ -1017,6 +1046,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1017 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1046 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1018 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1047 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1019 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1048 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1049 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1020 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1050 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1021 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1051 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1022 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1052 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 3986d54492bd..f82b0906d466 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -140,7 +140,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
140 int busy = iop_chan_is_busy(iop_chan); 140 int busy = iop_chan_is_busy(iop_chan);
141 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 141 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
142 142
143 dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 143 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
144 /* free completed slots from the chain starting with 144 /* free completed slots from the chain starting with
145 * the oldest descriptor 145 * the oldest descriptor
146 */ 146 */
@@ -438,7 +438,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
438 spin_unlock_bh(&iop_chan->lock); 438 spin_unlock_bh(&iop_chan->lock);
439 439
440 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 440 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
441 __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); 441 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
442 442
443 return cookie; 443 return cookie;
444} 444}
@@ -520,7 +520,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
520 struct iop_adma_desc_slot *sw_desc, *grp_start; 520 struct iop_adma_desc_slot *sw_desc, *grp_start;
521 int slot_cnt, slots_per_op; 521 int slot_cnt, slots_per_op;
522 522
523 dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 523 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
524 524
525 spin_lock_bh(&iop_chan->lock); 525 spin_lock_bh(&iop_chan->lock);
526 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); 526 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
@@ -548,7 +548,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
548 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 548 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
549 549
550 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 550 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
551 __FUNCTION__, len); 551 __func__, len);
552 552
553 spin_lock_bh(&iop_chan->lock); 553 spin_lock_bh(&iop_chan->lock);
554 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); 554 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
@@ -580,7 +580,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
580 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 580 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
581 581
582 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 582 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
583 __FUNCTION__, len); 583 __func__, len);
584 584
585 spin_lock_bh(&iop_chan->lock); 585 spin_lock_bh(&iop_chan->lock);
586 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); 586 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
@@ -614,7 +614,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
614 614
615 dev_dbg(iop_chan->device->common.dev, 615 dev_dbg(iop_chan->device->common.dev,
616 "%s src_cnt: %d len: %u flags: %lx\n", 616 "%s src_cnt: %d len: %u flags: %lx\n",
617 __FUNCTION__, src_cnt, len, flags); 617 __func__, src_cnt, len, flags);
618 618
619 spin_lock_bh(&iop_chan->lock); 619 spin_lock_bh(&iop_chan->lock);
620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
@@ -648,7 +648,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
648 return NULL; 648 return NULL;
649 649
650 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 650 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
651 __FUNCTION__, src_cnt, len); 651 __func__, src_cnt, len);
652 652
653 spin_lock_bh(&iop_chan->lock); 653 spin_lock_bh(&iop_chan->lock);
654 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); 654 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
@@ -659,7 +659,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
659 iop_desc_set_zero_sum_byte_count(grp_start, len); 659 iop_desc_set_zero_sum_byte_count(grp_start, len);
660 grp_start->xor_check_result = result; 660 grp_start->xor_check_result = result;
661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
662 __FUNCTION__, grp_start->xor_check_result); 662 __func__, grp_start->xor_check_result);
663 sw_desc->unmap_src_cnt = src_cnt; 663 sw_desc->unmap_src_cnt = src_cnt;
664 sw_desc->unmap_len = len; 664 sw_desc->unmap_len = len;
665 while (src_cnt--) 665 while (src_cnt--)
@@ -700,7 +700,7 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
700 iop_chan->last_used = NULL; 700 iop_chan->last_used = NULL;
701 701
702 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 702 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
703 __FUNCTION__, iop_chan->slots_allocated); 703 __func__, iop_chan->slots_allocated);
704 spin_unlock_bh(&iop_chan->lock); 704 spin_unlock_bh(&iop_chan->lock);
705 705
706 /* one is ok since we left it on there on purpose */ 706 /* one is ok since we left it on there on purpose */
@@ -753,7 +753,7 @@ static irqreturn_t iop_adma_eot_handler(int irq, void *data)
753{ 753{
754 struct iop_adma_chan *chan = data; 754 struct iop_adma_chan *chan = data;
755 755
756 dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 756 dev_dbg(chan->device->common.dev, "%s\n", __func__);
757 757
758 tasklet_schedule(&chan->irq_tasklet); 758 tasklet_schedule(&chan->irq_tasklet);
759 759
@@ -766,7 +766,7 @@ static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
766{ 766{
767 struct iop_adma_chan *chan = data; 767 struct iop_adma_chan *chan = data;
768 768
769 dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 769 dev_dbg(chan->device->common.dev, "%s\n", __func__);
770 770
771 tasklet_schedule(&chan->irq_tasklet); 771 tasklet_schedule(&chan->irq_tasklet);
772 772
@@ -823,7 +823,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
823 int err = 0; 823 int err = 0;
824 struct iop_adma_chan *iop_chan; 824 struct iop_adma_chan *iop_chan;
825 825
826 dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 826 dev_dbg(device->common.dev, "%s\n", __func__);
827 827
828 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); 828 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
829 if (!src) 829 if (!src)
@@ -906,7 +906,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
906 int err = 0; 906 int err = 0;
907 struct iop_adma_chan *iop_chan; 907 struct iop_adma_chan *iop_chan;
908 908
909 dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 909 dev_dbg(device->common.dev, "%s\n", __func__);
910 910
911 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 911 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
912 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 912 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1159,7 +1159,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1159 } 1159 }
1160 1160
1161 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1161 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1162 __FUNCTION__, adev->dma_desc_pool_virt, 1162 __func__, adev->dma_desc_pool_virt,
1163 (void *) adev->dma_desc_pool); 1163 (void *) adev->dma_desc_pool);
1164 1164
1165 adev->id = plat_data->hw_id; 1165 adev->id = plat_data->hw_id;
@@ -1289,7 +1289,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1289 dma_cookie_t cookie; 1289 dma_cookie_t cookie;
1290 int slot_cnt, slots_per_op; 1290 int slot_cnt, slots_per_op;
1291 1291
1292 dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1292 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1293 1293
1294 spin_lock_bh(&iop_chan->lock); 1294 spin_lock_bh(&iop_chan->lock);
1295 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); 1295 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
@@ -1346,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1346 dma_cookie_t cookie; 1346 dma_cookie_t cookie;
1347 int slot_cnt, slots_per_op; 1347 int slot_cnt, slots_per_op;
1348 1348
1349 dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1349 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1350 1350
1351 spin_lock_bh(&iop_chan->lock); 1351 spin_lock_bh(&iop_chan->lock);
1352 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); 1352 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);