diff options
Diffstat (limited to 'drivers/dma/iop-adma.c')
-rw-r--r-- | drivers/dma/iop-adma.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 3986d54492bd..f82b0906d466 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -140,7 +140,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
140 | int busy = iop_chan_is_busy(iop_chan); | 140 | int busy = iop_chan_is_busy(iop_chan); |
141 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; | 141 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; |
142 | 142 | ||
143 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 143 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
144 | /* free completed slots from the chain starting with | 144 | /* free completed slots from the chain starting with |
145 | * the oldest descriptor | 145 | * the oldest descriptor |
146 | */ | 146 | */ |
@@ -438,7 +438,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
438 | spin_unlock_bh(&iop_chan->lock); | 438 | spin_unlock_bh(&iop_chan->lock); |
439 | 439 | ||
440 | dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", | 440 | dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", |
441 | __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); | 441 | __func__, sw_desc->async_tx.cookie, sw_desc->idx); |
442 | 442 | ||
443 | return cookie; | 443 | return cookie; |
444 | } | 444 | } |
@@ -520,7 +520,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
520 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 520 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
521 | int slot_cnt, slots_per_op; | 521 | int slot_cnt, slots_per_op; |
522 | 522 | ||
523 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 523 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
524 | 524 | ||
525 | spin_lock_bh(&iop_chan->lock); | 525 | spin_lock_bh(&iop_chan->lock); |
526 | slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); | 526 | slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); |
@@ -548,7 +548,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
548 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 548 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); |
549 | 549 | ||
550 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 550 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
551 | __FUNCTION__, len); | 551 | __func__, len); |
552 | 552 | ||
553 | spin_lock_bh(&iop_chan->lock); | 553 | spin_lock_bh(&iop_chan->lock); |
554 | slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); | 554 | slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); |
@@ -580,7 +580,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
580 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 580 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); |
581 | 581 | ||
582 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 582 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
583 | __FUNCTION__, len); | 583 | __func__, len); |
584 | 584 | ||
585 | spin_lock_bh(&iop_chan->lock); | 585 | spin_lock_bh(&iop_chan->lock); |
586 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); | 586 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); |
@@ -614,7 +614,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
614 | 614 | ||
615 | dev_dbg(iop_chan->device->common.dev, | 615 | dev_dbg(iop_chan->device->common.dev, |
616 | "%s src_cnt: %d len: %u flags: %lx\n", | 616 | "%s src_cnt: %d len: %u flags: %lx\n", |
617 | __FUNCTION__, src_cnt, len, flags); | 617 | __func__, src_cnt, len, flags); |
618 | 618 | ||
619 | spin_lock_bh(&iop_chan->lock); | 619 | spin_lock_bh(&iop_chan->lock); |
620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); | 620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); |
@@ -648,7 +648,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
648 | return NULL; | 648 | return NULL; |
649 | 649 | ||
650 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", | 650 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", |
651 | __FUNCTION__, src_cnt, len); | 651 | __func__, src_cnt, len); |
652 | 652 | ||
653 | spin_lock_bh(&iop_chan->lock); | 653 | spin_lock_bh(&iop_chan->lock); |
654 | slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); | 654 | slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); |
@@ -659,7 +659,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
659 | iop_desc_set_zero_sum_byte_count(grp_start, len); | 659 | iop_desc_set_zero_sum_byte_count(grp_start, len); |
660 | grp_start->xor_check_result = result; | 660 | grp_start->xor_check_result = result; |
661 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 661 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
662 | __FUNCTION__, grp_start->xor_check_result); | 662 | __func__, grp_start->xor_check_result); |
663 | sw_desc->unmap_src_cnt = src_cnt; | 663 | sw_desc->unmap_src_cnt = src_cnt; |
664 | sw_desc->unmap_len = len; | 664 | sw_desc->unmap_len = len; |
665 | while (src_cnt--) | 665 | while (src_cnt--) |
@@ -700,7 +700,7 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan) | |||
700 | iop_chan->last_used = NULL; | 700 | iop_chan->last_used = NULL; |
701 | 701 | ||
702 | dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", | 702 | dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", |
703 | __FUNCTION__, iop_chan->slots_allocated); | 703 | __func__, iop_chan->slots_allocated); |
704 | spin_unlock_bh(&iop_chan->lock); | 704 | spin_unlock_bh(&iop_chan->lock); |
705 | 705 | ||
706 | /* one is ok since we left it on there on purpose */ | 706 | /* one is ok since we left it on there on purpose */ |
@@ -753,7 +753,7 @@ static irqreturn_t iop_adma_eot_handler(int irq, void *data) | |||
753 | { | 753 | { |
754 | struct iop_adma_chan *chan = data; | 754 | struct iop_adma_chan *chan = data; |
755 | 755 | ||
756 | dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); | 756 | dev_dbg(chan->device->common.dev, "%s\n", __func__); |
757 | 757 | ||
758 | tasklet_schedule(&chan->irq_tasklet); | 758 | tasklet_schedule(&chan->irq_tasklet); |
759 | 759 | ||
@@ -766,7 +766,7 @@ static irqreturn_t iop_adma_eoc_handler(int irq, void *data) | |||
766 | { | 766 | { |
767 | struct iop_adma_chan *chan = data; | 767 | struct iop_adma_chan *chan = data; |
768 | 768 | ||
769 | dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); | 769 | dev_dbg(chan->device->common.dev, "%s\n", __func__); |
770 | 770 | ||
771 | tasklet_schedule(&chan->irq_tasklet); | 771 | tasklet_schedule(&chan->irq_tasklet); |
772 | 772 | ||
@@ -823,7 +823,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
823 | int err = 0; | 823 | int err = 0; |
824 | struct iop_adma_chan *iop_chan; | 824 | struct iop_adma_chan *iop_chan; |
825 | 825 | ||
826 | dev_dbg(device->common.dev, "%s\n", __FUNCTION__); | 826 | dev_dbg(device->common.dev, "%s\n", __func__); |
827 | 827 | ||
828 | src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); | 828 | src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); |
829 | if (!src) | 829 | if (!src) |
@@ -906,7 +906,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
906 | int err = 0; | 906 | int err = 0; |
907 | struct iop_adma_chan *iop_chan; | 907 | struct iop_adma_chan *iop_chan; |
908 | 908 | ||
909 | dev_dbg(device->common.dev, "%s\n", __FUNCTION__); | 909 | dev_dbg(device->common.dev, "%s\n", __func__); |
910 | 910 | ||
911 | for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { | 911 | for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { |
912 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 912 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
@@ -1159,7 +1159,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", | 1161 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", |
1162 | __FUNCTION__, adev->dma_desc_pool_virt, | 1162 | __func__, adev->dma_desc_pool_virt, |
1163 | (void *) adev->dma_desc_pool); | 1163 | (void *) adev->dma_desc_pool); |
1164 | 1164 | ||
1165 | adev->id = plat_data->hw_id; | 1165 | adev->id = plat_data->hw_id; |
@@ -1289,7 +1289,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1289 | dma_cookie_t cookie; | 1289 | dma_cookie_t cookie; |
1290 | int slot_cnt, slots_per_op; | 1290 | int slot_cnt, slots_per_op; |
1291 | 1291 | ||
1292 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 1292 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
1293 | 1293 | ||
1294 | spin_lock_bh(&iop_chan->lock); | 1294 | spin_lock_bh(&iop_chan->lock); |
1295 | slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); | 1295 | slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); |
@@ -1346,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1346 | dma_cookie_t cookie; | 1346 | dma_cookie_t cookie; |
1347 | int slot_cnt, slots_per_op; | 1347 | int slot_cnt, slots_per_op; |
1348 | 1348 | ||
1349 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 1349 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
1350 | 1350 | ||
1351 | spin_lock_bh(&iop_chan->lock); | 1351 | spin_lock_bh(&iop_chan->lock); |
1352 | slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); | 1352 | slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); |