aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorJonas Aaberg <jonas.aberg@stericsson.com>2010-08-09 08:08:34 -0400
committerDan Williams <dan.j.williams@intel.com>2010-09-22 17:53:46 -0400
commit767a9675c4a68ada55f0f30d629db627bd47f012 (patch)
tree00cdf68f481bc818962367b6fa3255bd8040843e /drivers/dma/ste_dma40.c
parentaa182ae2621877e0c111922696c84c538b82ad14 (diff)
DMAENGINE: ste_dma40: code clean-up
This patch includes non functional code clean up changes, file header updates and a few magic numbers got defined. Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c72
1 files changed, 29 insertions, 43 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index b8987e791055..7a4919bf1e92 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,11 +1,8 @@
1/* 1/*
2 * driver/dma/ste_dma40.c 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * 3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Copyright (C) ST-Ericsson 2007-2010 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */ 6 */
10 7
11#include <linux/kernel.h> 8#include <linux/kernel.h>
@@ -90,7 +87,6 @@ struct d40_lli_pool {
90 * @txd: DMA engine struct. Used for among other things for communication 87 * @txd: DMA engine struct. Used for among other things for communication
91 * during a transfer. 88 * during a transfer.
92 * @node: List entry. 89 * @node: List entry.
93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor. 90 * @is_in_client_list: true if the client owns this descriptor.
95 * @is_hw_linked: true if this job will automatically be continued for 91 * @is_hw_linked: true if this job will automatically be continued for
96 * the previous one. 92 * the previous one.
@@ -112,7 +108,6 @@ struct d40_desc {
112 struct dma_async_tx_descriptor txd; 108 struct dma_async_tx_descriptor txd;
113 struct list_head node; 109 struct list_head node;
114 110
115 enum dma_data_direction dir;
116 bool is_in_client_list; 111 bool is_in_client_list;
117 bool is_hw_linked; 112 bool is_hw_linked;
118}; 113};
@@ -149,9 +144,7 @@ struct d40_lcla_pool {
149 * this physical channel. Can also be free or physically allocated. 144 * this physical channel. Can also be free or physically allocated.
150 * @allocated_dst: Same as for src but is dst. 145 * @allocated_dst: Same as for src but is dst.
151 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
152 * event line number. Both allocated_src and allocated_dst can not be 147 * event line number.
153 * allocated to a physical channel, since the interrupt handler has then
154 * no way of figure out which one the interrupt belongs to.
155 */ 148 */
156struct d40_phy_res { 149struct d40_phy_res {
157 spinlock_t lock; 150 spinlock_t lock;
@@ -237,7 +230,6 @@ struct d40_chan {
237 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
238 * @dma_slave: dma_device channels that can do only do slave transfers. 231 * @dma_slave: dma_device channels that can do only do slave transfers.
239 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
240 * @phy_chans: Room for all possible physical channels in system.
241 * @log_chans: Room for all possible logical channels in system. 233 * @log_chans: Room for all possible logical channels in system.
242 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
243 * to log_chans entries. 235 * to log_chans entries.
@@ -500,7 +492,8 @@ err:
500static int d40_channel_execute_command(struct d40_chan *d40c, 492static int d40_channel_execute_command(struct d40_chan *d40c,
501 enum d40_command command) 493 enum d40_command command)
502{ 494{
503 int status, i; 495 u32 status;
496 int i;
504 void __iomem *active_reg; 497 void __iomem *active_reg;
505 int ret = 0; 498 int ret = 0;
506 unsigned long flags; 499 unsigned long flags;
@@ -568,16 +561,12 @@ static void d40_term_all(struct d40_chan *d40c)
568 /* Release active descriptors */ 561 /* Release active descriptors */
569 while ((d40d = d40_first_active_get(d40c))) { 562 while ((d40d = d40_first_active_get(d40c))) {
570 d40_desc_remove(d40d); 563 d40_desc_remove(d40d);
571
572 /* Return desc to free-list */
573 d40_desc_free(d40c, d40d); 564 d40_desc_free(d40c, d40d);
574 } 565 }
575 566
576 /* Release queued descriptors waiting for transfer */ 567 /* Release queued descriptors waiting for transfer */
577 while ((d40d = d40_first_queued(d40c))) { 568 while ((d40d = d40_first_queued(d40c))) {
578 d40_desc_remove(d40d); 569 d40_desc_remove(d40d);
579
580 /* Return desc to free-list */
581 d40_desc_free(d40c, d40d); 570 d40_desc_free(d40c, d40d);
582 } 571 }
583 572
@@ -973,9 +962,6 @@ static void dma_tc_handle(struct d40_chan *d40c)
973{ 962{
974 struct d40_desc *d40d; 963 struct d40_desc *d40d;
975 964
976 if (!d40c->phy_chan)
977 return;
978
979 /* Get first active entry from list */ 965 /* Get first active entry from list */
980 d40d = d40_first_active_get(d40c); 966 d40d = d40_first_active_get(d40c);
981 967
@@ -1001,7 +987,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
1001static void dma_tasklet(unsigned long data) 987static void dma_tasklet(unsigned long data)
1002{ 988{
1003 struct d40_chan *d40c = (struct d40_chan *) data; 989 struct d40_chan *d40c = (struct d40_chan *) data;
1004 struct d40_desc *d40d_fin; 990 struct d40_desc *d40d;
1005 unsigned long flags; 991 unsigned long flags;
1006 dma_async_tx_callback callback; 992 dma_async_tx_callback callback;
1007 void *callback_param; 993 void *callback_param;
@@ -1009,12 +995,12 @@ static void dma_tasklet(unsigned long data)
1009 spin_lock_irqsave(&d40c->lock, flags); 995 spin_lock_irqsave(&d40c->lock, flags);
1010 996
1011 /* Get first active entry from list */ 997 /* Get first active entry from list */
1012 d40d_fin = d40_first_active_get(d40c); 998 d40d = d40_first_active_get(d40c);
1013 999
1014 if (d40d_fin == NULL) 1000 if (d40d == NULL)
1015 goto err; 1001 goto err;
1016 1002
1017 d40c->completed = d40d_fin->txd.cookie; 1003 d40c->completed = d40d->txd.cookie;
1018 1004
1019 /* 1005 /*
1020 * If terminating a channel pending_tx is set to zero. 1006 * If terminating a channel pending_tx is set to zero.
@@ -1026,19 +1012,18 @@ static void dma_tasklet(unsigned long data)
1026 } 1012 }
1027 1013
1028 /* Callback to client */ 1014 /* Callback to client */
1029 callback = d40d_fin->txd.callback; 1015 callback = d40d->txd.callback;
1030 callback_param = d40d_fin->txd.callback_param; 1016 callback_param = d40d->txd.callback_param;
1031 1017
1032 if (async_tx_test_ack(&d40d_fin->txd)) { 1018 if (async_tx_test_ack(&d40d->txd)) {
1033 d40_pool_lli_free(d40d_fin); 1019 d40_pool_lli_free(d40d);
1034 d40_desc_remove(d40d_fin); 1020 d40_desc_remove(d40d);
1035 /* Return desc to free-list */ 1021 d40_desc_free(d40c, d40d);
1036 d40_desc_free(d40c, d40d_fin);
1037 } else { 1022 } else {
1038 if (!d40d_fin->is_in_client_list) { 1023 if (!d40d->is_in_client_list) {
1039 d40_desc_remove(d40d_fin); 1024 d40_desc_remove(d40d);
1040 list_add_tail(&d40d_fin->node, &d40c->client); 1025 list_add_tail(&d40d->node, &d40c->client);
1041 d40d_fin->is_in_client_list = true; 1026 d40d->is_in_client_list = true;
1042 } 1027 }
1043 } 1028 }
1044 1029
@@ -1049,7 +1034,7 @@ static void dma_tasklet(unsigned long data)
1049 1034
1050 spin_unlock_irqrestore(&d40c->lock, flags); 1035 spin_unlock_irqrestore(&d40c->lock, flags);
1051 1036
1052 if (callback && (d40d_fin->txd.flags & DMA_PREP_INTERRUPT)) 1037 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1053 callback(callback_param); 1038 callback(callback_param);
1054 1039
1055 return; 1040 return;
@@ -1127,7 +1112,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1127 return IRQ_HANDLED; 1112 return IRQ_HANDLED;
1128} 1113}
1129 1114
1130
1131static int d40_validate_conf(struct d40_chan *d40c, 1115static int d40_validate_conf(struct d40_chan *d40c,
1132 struct stedma40_chan_cfg *conf) 1116 struct stedma40_chan_cfg *conf)
1133{ 1117{
@@ -1432,7 +1416,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1432 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1416 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1433 d40_pool_lli_free(d); 1417 d40_pool_lli_free(d);
1434 d40_desc_remove(d); 1418 d40_desc_remove(d);
1435 /* Return desc to free-list */
1436 d40_desc_free(d40c, d); 1419 d40_desc_free(d40c, d);
1437 } 1420 }
1438 1421
@@ -2793,8 +2776,10 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2793 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 2776 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2794 base->lcla_pool.base = (void *)page_list[i]; 2777 base->lcla_pool.base = (void *)page_list[i];
2795 } else { 2778 } else {
2796 /* After many attempts, no succees with finding the correct 2779 /*
2797 * alignment try with allocating a big buffer */ 2780 * After many attempts and no succees with finding the correct
2781 * alignment, try with allocating a big buffer.
2782 */
2798 dev_warn(base->dev, 2783 dev_warn(base->dev,
2799 "[%s] Failed to get %d pages @ 18 bit align.\n", 2784 "[%s] Failed to get %d pages @ 18 bit align.\n",
2800 __func__, base->lcla_pool.pages); 2785 __func__, base->lcla_pool.pages);
@@ -2916,8 +2901,9 @@ failure:
2916 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2901 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2917 free_pages((unsigned long)base->lcla_pool.base, 2902 free_pages((unsigned long)base->lcla_pool.base,
2918 base->lcla_pool.pages); 2903 base->lcla_pool.pages);
2919 if (base->lcla_pool.base_unaligned) 2904
2920 kfree(base->lcla_pool.base_unaligned); 2905 kfree(base->lcla_pool.base_unaligned);
2906
2921 if (base->phy_lcpa) 2907 if (base->phy_lcpa)
2922 release_mem_region(base->phy_lcpa, 2908 release_mem_region(base->phy_lcpa,
2923 base->lcpa_size); 2909 base->lcpa_size);