diff options
author | Rabin Vincent <rabin.vincent@stericsson.com> | 2011-01-25 05:18:06 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-01-31 01:27:15 -0500 |
commit | 7d83a854a1a44a8f6a699503441403a36c42f66c (patch) | |
tree | 7c83ad7793154f469ccbdd0f9045c748960623b2 /drivers/dma | |
parent | 262d2915d4f11e5e78e432ab68f0ee034ef3f75f (diff) |
dma40: remove "hardware link with previous jobs" code
This link in hardware with previous jobs code is:
- unused, no clients using or requiring this feature
- incomplete, being implemented only for physical channels
- broken, only working to perform one link
Remove it. This also allows us to get rid of the channel pause in the
submit_tx() routine.
Acked-by: Per Forlin <per.forlin@stericsson.com>
Acked-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ste_dma40.c | 113 |
1 files changed, 6 insertions, 107 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 42c88fc28815..ed2a3ebcd86c 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -94,7 +94,6 @@ struct d40_lli_pool { | |||
94 | * during a transfer. | 94 | * during a transfer. |
95 | * @node: List entry. | 95 | * @node: List entry. |
96 | * @is_in_client_list: true if the client owns this descriptor. | 96 | * @is_in_client_list: true if the client owns this descriptor. |
97 | * @is_hw_linked: true if this job will automatically be continued for | ||
98 | * the previous one. | 97 | * the previous one. |
99 | * | 98 | * |
100 | * This descriptor is used for both logical and physical transfers. | 99 | * This descriptor is used for both logical and physical transfers. |
@@ -114,7 +113,6 @@ struct d40_desc { | |||
114 | struct list_head node; | 113 | struct list_head node; |
115 | 114 | ||
116 | bool is_in_client_list; | 115 | bool is_in_client_list; |
117 | bool is_hw_linked; | ||
118 | }; | 116 | }; |
119 | 117 | ||
120 | /** | 118 | /** |
@@ -548,18 +546,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
548 | return d; | 546 | return d; |
549 | } | 547 | } |
550 | 548 | ||
551 | static struct d40_desc *d40_last_queued(struct d40_chan *d40c) | ||
552 | { | ||
553 | struct d40_desc *d; | ||
554 | |||
555 | if (list_empty(&d40c->queue)) | ||
556 | return NULL; | ||
557 | list_for_each_entry(d, &d40c->queue, node) | ||
558 | if (list_is_last(&d->node, &d40c->queue)) | ||
559 | break; | ||
560 | return d; | ||
561 | } | ||
562 | |||
563 | static int d40_psize_2_burst_size(bool is_log, int psize) | 549 | static int d40_psize_2_burst_size(bool is_log, int psize) |
564 | { | 550 | { |
565 | if (is_log) { | 551 | if (is_log) { |
@@ -940,77 +926,6 @@ no_suspend: | |||
940 | return res; | 926 | return res; |
941 | } | 927 | } |
942 | 928 | ||
943 | static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) | ||
944 | { | ||
945 | /* TODO: Write */ | ||
946 | } | ||
947 | |||
948 | static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) | ||
949 | { | ||
950 | struct d40_desc *d40d_prev = NULL; | ||
951 | int i; | ||
952 | u32 val; | ||
953 | |||
954 | if (!list_empty(&d40c->queue)) | ||
955 | d40d_prev = d40_last_queued(d40c); | ||
956 | else if (!list_empty(&d40c->active)) | ||
957 | d40d_prev = d40_first_active_get(d40c); | ||
958 | |||
959 | if (!d40d_prev) | ||
960 | return; | ||
961 | |||
962 | /* Here we try to join this job with previous jobs */ | ||
963 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
964 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
965 | D40_CHAN_REG_SSLNK); | ||
966 | |||
967 | /* Figure out which link we're currently transmitting */ | ||
968 | for (i = 0; i < d40d_prev->lli_len; i++) | ||
969 | if (val == d40d_prev->lli_phy.src[i].reg_lnk) | ||
970 | break; | ||
971 | |||
972 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
973 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
974 | D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; | ||
975 | |||
976 | if (i == (d40d_prev->lli_len - 1) && val > 0) { | ||
977 | /* Change the current one */ | ||
978 | writel(virt_to_phys(d40d->lli_phy.src), | ||
979 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
980 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
981 | D40_CHAN_REG_SSLNK); | ||
982 | writel(virt_to_phys(d40d->lli_phy.dst), | ||
983 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
984 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
985 | D40_CHAN_REG_SDLNK); | ||
986 | |||
987 | d40d->is_hw_linked = true; | ||
988 | |||
989 | } else if (i < d40d_prev->lli_len) { | ||
990 | (void) dma_unmap_single(d40c->base->dev, | ||
991 | virt_to_phys(d40d_prev->lli_phy.src), | ||
992 | d40d_prev->lli_pool.size, | ||
993 | DMA_TO_DEVICE); | ||
994 | |||
995 | /* Keep the settings */ | ||
996 | val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & | ||
997 | ~D40_SREG_LNK_PHYS_LNK_MASK; | ||
998 | d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = | ||
999 | val | virt_to_phys(d40d->lli_phy.src); | ||
1000 | |||
1001 | val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & | ||
1002 | ~D40_SREG_LNK_PHYS_LNK_MASK; | ||
1003 | d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = | ||
1004 | val | virt_to_phys(d40d->lli_phy.dst); | ||
1005 | |||
1006 | (void) dma_map_single(d40c->base->dev, | ||
1007 | d40d_prev->lli_phy.src, | ||
1008 | d40d_prev->lli_pool.size, | ||
1009 | DMA_TO_DEVICE); | ||
1010 | d40d->is_hw_linked = true; | ||
1011 | } | ||
1012 | } | ||
1013 | |||
1014 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 929 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1015 | { | 930 | { |
1016 | struct d40_chan *d40c = container_of(tx->chan, | 931 | struct d40_chan *d40c = container_of(tx->chan, |
@@ -1019,8 +934,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1019 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 934 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1020 | unsigned long flags; | 935 | unsigned long flags; |
1021 | 936 | ||
1022 | (void) d40_pause(&d40c->chan); | ||
1023 | |||
1024 | spin_lock_irqsave(&d40c->lock, flags); | 937 | spin_lock_irqsave(&d40c->lock, flags); |
1025 | 938 | ||
1026 | d40c->chan.cookie++; | 939 | d40c->chan.cookie++; |
@@ -1030,17 +943,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1030 | 943 | ||
1031 | d40d->txd.cookie = d40c->chan.cookie; | 944 | d40d->txd.cookie = d40c->chan.cookie; |
1032 | 945 | ||
1033 | if (d40c->log_num == D40_PHY_CHAN) | ||
1034 | d40_tx_submit_phy(d40c, d40d); | ||
1035 | else | ||
1036 | d40_tx_submit_log(d40c, d40d); | ||
1037 | |||
1038 | d40_desc_queue(d40c, d40d); | 946 | d40_desc_queue(d40c, d40d); |
1039 | 947 | ||
1040 | spin_unlock_irqrestore(&d40c->lock, flags); | 948 | spin_unlock_irqrestore(&d40c->lock, flags); |
1041 | 949 | ||
1042 | (void) d40_resume(&d40c->chan); | ||
1043 | |||
1044 | return tx->cookie; | 950 | return tx->cookie; |
1045 | } | 951 | } |
1046 | 952 | ||
@@ -1080,21 +986,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1080 | /* Add to active queue */ | 986 | /* Add to active queue */ |
1081 | d40_desc_submit(d40c, d40d); | 987 | d40_desc_submit(d40c, d40d); |
1082 | 988 | ||
1083 | /* | 989 | /* Initiate DMA job */ |
1084 | * If this job is already linked in hw, | 990 | d40_desc_load(d40c, d40d); |
1085 | * do not submit it. | ||
1086 | */ | ||
1087 | |||
1088 | if (!d40d->is_hw_linked) { | ||
1089 | /* Initiate DMA job */ | ||
1090 | d40_desc_load(d40c, d40d); | ||
1091 | 991 | ||
1092 | /* Start dma job */ | 992 | /* Start dma job */ |
1093 | err = d40_start(d40c); | 993 | err = d40_start(d40c); |
1094 | 994 | ||
1095 | if (err) | 995 | if (err) |
1096 | return NULL; | 996 | return NULL; |
1097 | } | ||
1098 | } | 997 | } |
1099 | 998 | ||
1100 | return d40d; | 999 | return d40d; |