diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/dma/ste_dma40.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r-- | drivers/dma/ste_dma40.c | 804 |
1 files changed, 197 insertions, 607 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 23c5573e62d..467e4dcb20a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -9,19 +9,15 @@ | |||
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/export.h> | ||
13 | #include <linux/dmaengine.h> | 12 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
17 | #include <linux/pm.h> | ||
18 | #include <linux/pm_runtime.h> | ||
19 | #include <linux/err.h> | 16 | #include <linux/err.h> |
20 | #include <linux/amba/bus.h> | 17 | #include <linux/amba/bus.h> |
21 | #include <linux/regulator/consumer.h> | ||
22 | #include <linux/platform_data/dma-ste-dma40.h> | ||
23 | 18 | ||
24 | #include "dmaengine.h" | 19 | #include <plat/ste_dma40.h> |
20 | |||
25 | #include "ste_dma40_ll.h" | 21 | #include "ste_dma40_ll.h" |
26 | 22 | ||
27 | #define D40_NAME "dma40" | 23 | #define D40_NAME "dma40" |
@@ -35,9 +31,6 @@ | |||
35 | /* Maximum iterations taken before giving up suspending a channel */ | 31 | /* Maximum iterations taken before giving up suspending a channel */ |
36 | #define D40_SUSPEND_MAX_IT 500 | 32 | #define D40_SUSPEND_MAX_IT 500 |
37 | 33 | ||
38 | /* Milliseconds */ | ||
39 | #define DMA40_AUTOSUSPEND_DELAY 100 | ||
40 | |||
41 | /* Hardware requirement on LCLA alignment */ | 34 | /* Hardware requirement on LCLA alignment */ |
42 | #define LCLA_ALIGNMENT 0x40000 | 35 | #define LCLA_ALIGNMENT 0x40000 |
43 | 36 | ||
@@ -68,71 +61,6 @@ enum d40_command { | |||
68 | D40_DMA_SUSPENDED = 3 | 61 | D40_DMA_SUSPENDED = 3 |
69 | }; | 62 | }; |
70 | 63 | ||
71 | /* | ||
72 | * enum d40_events - The different Event Enables for the event lines. | ||
73 | * | ||
74 | * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. | ||
75 | * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. | ||
76 | * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. | ||
77 | * @D40_ROUND_EVENTLINE: Status check for event line. | ||
78 | */ | ||
79 | |||
80 | enum d40_events { | ||
81 | D40_DEACTIVATE_EVENTLINE = 0, | ||
82 | D40_ACTIVATE_EVENTLINE = 1, | ||
83 | D40_SUSPEND_REQ_EVENTLINE = 2, | ||
84 | D40_ROUND_EVENTLINE = 3 | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * These are the registers that has to be saved and later restored | ||
89 | * when the DMA hw is powered off. | ||
90 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | ||
91 | */ | ||
92 | static u32 d40_backup_regs[] = { | ||
93 | D40_DREG_LCPA, | ||
94 | D40_DREG_LCLA, | ||
95 | D40_DREG_PRMSE, | ||
96 | D40_DREG_PRMSO, | ||
97 | D40_DREG_PRMOE, | ||
98 | D40_DREG_PRMOO, | ||
99 | }; | ||
100 | |||
101 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | ||
102 | |||
103 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | ||
104 | static u32 d40_backup_regs_v3[] = { | ||
105 | D40_DREG_PSEG1, | ||
106 | D40_DREG_PSEG2, | ||
107 | D40_DREG_PSEG3, | ||
108 | D40_DREG_PSEG4, | ||
109 | D40_DREG_PCEG1, | ||
110 | D40_DREG_PCEG2, | ||
111 | D40_DREG_PCEG3, | ||
112 | D40_DREG_PCEG4, | ||
113 | D40_DREG_RSEG1, | ||
114 | D40_DREG_RSEG2, | ||
115 | D40_DREG_RSEG3, | ||
116 | D40_DREG_RSEG4, | ||
117 | D40_DREG_RCEG1, | ||
118 | D40_DREG_RCEG2, | ||
119 | D40_DREG_RCEG3, | ||
120 | D40_DREG_RCEG4, | ||
121 | }; | ||
122 | |||
123 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | ||
124 | |||
125 | static u32 d40_backup_regs_chan[] = { | ||
126 | D40_CHAN_REG_SSCFG, | ||
127 | D40_CHAN_REG_SSELT, | ||
128 | D40_CHAN_REG_SSPTR, | ||
129 | D40_CHAN_REG_SSLNK, | ||
130 | D40_CHAN_REG_SDCFG, | ||
131 | D40_CHAN_REG_SDELT, | ||
132 | D40_CHAN_REG_SDPTR, | ||
133 | D40_CHAN_REG_SDLNK, | ||
134 | }; | ||
135 | |||
136 | /** | 64 | /** |
137 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 65 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
138 | * | 66 | * |
@@ -167,7 +95,7 @@ struct d40_lli_pool { | |||
167 | * during a transfer. | 95 | * during a transfer. |
168 | * @node: List entry. | 96 | * @node: List entry. |
169 | * @is_in_client_list: true if the client owns this descriptor. | 97 | * @is_in_client_list: true if the client owns this descriptor. |
170 | * @cyclic: true if this is a cyclic job | 98 | * the previous one. |
171 | * | 99 | * |
172 | * This descriptor is used for both logical and physical transfers. | 100 | * This descriptor is used for both logical and physical transfers. |
173 | */ | 101 | */ |
@@ -214,7 +142,6 @@ struct d40_lcla_pool { | |||
214 | * channels. | 142 | * channels. |
215 | * | 143 | * |
216 | * @lock: A lock protection this entity. | 144 | * @lock: A lock protection this entity. |
217 | * @reserved: True if used by secure world or otherwise. | ||
218 | * @num: The physical channel number of this entity. | 145 | * @num: The physical channel number of this entity. |
219 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 146 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
220 | * this physical channel. Can also be free or physically allocated. | 147 | * this physical channel. Can also be free or physically allocated. |
@@ -224,7 +151,6 @@ struct d40_lcla_pool { | |||
224 | */ | 151 | */ |
225 | struct d40_phy_res { | 152 | struct d40_phy_res { |
226 | spinlock_t lock; | 153 | spinlock_t lock; |
227 | bool reserved; | ||
228 | int num; | 154 | int num; |
229 | u32 allocated_src; | 155 | u32 allocated_src; |
230 | u32 allocated_dst; | 156 | u32 allocated_dst; |
@@ -237,6 +163,8 @@ struct d40_base; | |||
237 | * | 163 | * |
238 | * @lock: A spinlock to protect this struct. | 164 | * @lock: A spinlock to protect this struct. |
239 | * @log_num: The logical number, if any of this channel. | 165 | * @log_num: The logical number, if any of this channel. |
166 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | ||
167 | * current cookie. | ||
240 | * @pending_tx: The number of pending transfers. Used between interrupt handler | 168 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
241 | * and tasklet. | 169 | * and tasklet. |
242 | * @busy: Set to true when transfer is ongoing on this channel. | 170 | * @busy: Set to true when transfer is ongoing on this channel. |
@@ -256,6 +184,7 @@ struct d40_base; | |||
256 | * @src_def_cfg: Default cfg register setting for src. | 184 | * @src_def_cfg: Default cfg register setting for src. |
257 | * @dst_def_cfg: Default cfg register setting for dst. | 185 | * @dst_def_cfg: Default cfg register setting for dst. |
258 | * @log_def: Default logical channel settings. | 186 | * @log_def: Default logical channel settings. |
187 | * @lcla: Space for one dst src pair for logical channel transfers. | ||
259 | * @lcpa: Pointer to dst and src lcpa settings. | 188 | * @lcpa: Pointer to dst and src lcpa settings. |
260 | * @runtime_addr: runtime configured address. | 189 | * @runtime_addr: runtime configured address. |
261 | * @runtime_direction: runtime configured direction. | 190 | * @runtime_direction: runtime configured direction. |
@@ -265,6 +194,8 @@ struct d40_base; | |||
265 | struct d40_chan { | 194 | struct d40_chan { |
266 | spinlock_t lock; | 195 | spinlock_t lock; |
267 | int log_num; | 196 | int log_num; |
197 | /* ID of the most recent completed transfer */ | ||
198 | int completed; | ||
268 | int pending_tx; | 199 | int pending_tx; |
269 | bool busy; | 200 | bool busy; |
270 | struct d40_phy_res *phy_chan; | 201 | struct d40_phy_res *phy_chan; |
@@ -285,7 +216,7 @@ struct d40_chan { | |||
285 | struct d40_log_lli_full *lcpa; | 216 | struct d40_log_lli_full *lcpa; |
286 | /* Runtime reconfiguration */ | 217 | /* Runtime reconfiguration */ |
287 | dma_addr_t runtime_addr; | 218 | dma_addr_t runtime_addr; |
288 | enum dma_transfer_direction runtime_direction; | 219 | enum dma_data_direction runtime_direction; |
289 | }; | 220 | }; |
290 | 221 | ||
291 | /** | 222 | /** |
@@ -309,7 +240,6 @@ struct d40_chan { | |||
309 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 240 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
310 | * @dma_slave: dma_device channels that can do only do slave transfers. | 241 | * @dma_slave: dma_device channels that can do only do slave transfers. |
311 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 242 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
312 | * @phy_chans: Room for all possible physical channels in system. | ||
313 | * @log_chans: Room for all possible logical channels in system. | 243 | * @log_chans: Room for all possible logical channels in system. |
314 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 244 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
315 | * to log_chans entries. | 245 | * to log_chans entries. |
@@ -317,20 +247,12 @@ struct d40_chan { | |||
317 | * to phy_chans entries. | 247 | * to phy_chans entries. |
318 | * @plat_data: Pointer to provided platform_data which is the driver | 248 | * @plat_data: Pointer to provided platform_data which is the driver |
319 | * configuration. | 249 | * configuration. |
320 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. | ||
321 | * @phy_res: Vector containing all physical channels. | 250 | * @phy_res: Vector containing all physical channels. |
322 | * @lcla_pool: lcla pool settings and data. | 251 | * @lcla_pool: lcla pool settings and data. |
323 | * @lcpa_base: The virtual mapped address of LCPA. | 252 | * @lcpa_base: The virtual mapped address of LCPA. |
324 | * @phy_lcpa: The physical address of the LCPA. | 253 | * @phy_lcpa: The physical address of the LCPA. |
325 | * @lcpa_size: The size of the LCPA area. | 254 | * @lcpa_size: The size of the LCPA area. |
326 | * @desc_slab: cache for descriptors. | 255 | * @desc_slab: cache for descriptors. |
327 | * @reg_val_backup: Here the values of some hardware registers are stored | ||
328 | * before the DMA is powered off. They are restored when the power is back on. | ||
329 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | ||
330 | * later. | ||
331 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | ||
332 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | ||
333 | * @initialized: true if the dma has been initialized | ||
334 | */ | 256 | */ |
335 | struct d40_base { | 257 | struct d40_base { |
336 | spinlock_t interrupt_lock; | 258 | spinlock_t interrupt_lock; |
@@ -352,7 +274,6 @@ struct d40_base { | |||
352 | struct d40_chan **lookup_log_chans; | 274 | struct d40_chan **lookup_log_chans; |
353 | struct d40_chan **lookup_phy_chans; | 275 | struct d40_chan **lookup_phy_chans; |
354 | struct stedma40_platform_data *plat_data; | 276 | struct stedma40_platform_data *plat_data; |
355 | struct regulator *lcpa_regulator; | ||
356 | /* Physical half channels */ | 277 | /* Physical half channels */ |
357 | struct d40_phy_res *phy_res; | 278 | struct d40_phy_res *phy_res; |
358 | struct d40_lcla_pool lcla_pool; | 279 | struct d40_lcla_pool lcla_pool; |
@@ -360,11 +281,6 @@ struct d40_base { | |||
360 | dma_addr_t phy_lcpa; | 281 | dma_addr_t phy_lcpa; |
361 | resource_size_t lcpa_size; | 282 | resource_size_t lcpa_size; |
362 | struct kmem_cache *desc_slab; | 283 | struct kmem_cache *desc_slab; |
363 | u32 reg_val_backup[BACKUP_REGS_SZ]; | ||
364 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | ||
365 | u32 *reg_val_backup_chan; | ||
366 | u16 gcc_pwr_off_mask; | ||
367 | bool initialized; | ||
368 | }; | 284 | }; |
369 | 285 | ||
370 | /** | 286 | /** |
@@ -562,14 +478,13 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
562 | struct d40_desc *d; | 478 | struct d40_desc *d; |
563 | struct d40_desc *_d; | 479 | struct d40_desc *_d; |
564 | 480 | ||
565 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 481 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
566 | if (async_tx_test_ack(&d->txd)) { | 482 | if (async_tx_test_ack(&d->txd)) { |
567 | d40_desc_remove(d); | 483 | d40_desc_remove(d); |
568 | desc = d; | 484 | desc = d; |
569 | memset(desc, 0, sizeof(*desc)); | 485 | memset(desc, 0, sizeof(*desc)); |
570 | break; | 486 | break; |
571 | } | 487 | } |
572 | } | ||
573 | } | 488 | } |
574 | 489 | ||
575 | if (!desc) | 490 | if (!desc) |
@@ -620,7 +535,6 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
620 | bool cyclic = desc->cyclic; | 535 | bool cyclic = desc->cyclic; |
621 | int curr_lcla = -EINVAL; | 536 | int curr_lcla = -EINVAL; |
622 | int first_lcla = 0; | 537 | int first_lcla = 0; |
623 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; | ||
624 | bool linkback; | 538 | bool linkback; |
625 | 539 | ||
626 | /* | 540 | /* |
@@ -693,16 +607,11 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
693 | &lli->src[lli_current], | 607 | &lli->src[lli_current], |
694 | next_lcla, flags); | 608 | next_lcla, flags); |
695 | 609 | ||
696 | /* | 610 | dma_sync_single_range_for_device(chan->base->dev, |
697 | * Cache maintenance is not needed if lcla is | 611 | pool->dma_addr, lcla_offset, |
698 | * mapped in esram | 612 | 2 * sizeof(struct d40_log_lli), |
699 | */ | 613 | DMA_TO_DEVICE); |
700 | if (!use_esram_lcla) { | 614 | |
701 | dma_sync_single_range_for_device(chan->base->dev, | ||
702 | pool->dma_addr, lcla_offset, | ||
703 | 2 * sizeof(struct d40_log_lli), | ||
704 | DMA_TO_DEVICE); | ||
705 | } | ||
706 | curr_lcla = next_lcla; | 615 | curr_lcla = next_lcla; |
707 | 616 | ||
708 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | 617 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
@@ -830,64 +739,10 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
830 | return len; | 739 | return len; |
831 | } | 740 | } |
832 | 741 | ||
742 | /* Support functions for logical channels */ | ||
833 | 743 | ||
834 | #ifdef CONFIG_PM | 744 | static int d40_channel_execute_command(struct d40_chan *d40c, |
835 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | 745 | enum d40_command command) |
836 | u32 *regaddr, int num, bool save) | ||
837 | { | ||
838 | int i; | ||
839 | |||
840 | for (i = 0; i < num; i++) { | ||
841 | void __iomem *addr = baseaddr + regaddr[i]; | ||
842 | |||
843 | if (save) | ||
844 | backup[i] = readl_relaxed(addr); | ||
845 | else | ||
846 | writel_relaxed(backup[i], addr); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
851 | { | ||
852 | int i; | ||
853 | |||
854 | /* Save/Restore channel specific registers */ | ||
855 | for (i = 0; i < base->num_phy_chans; i++) { | ||
856 | void __iomem *addr; | ||
857 | int idx; | ||
858 | |||
859 | if (base->phy_res[i].reserved) | ||
860 | continue; | ||
861 | |||
862 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
863 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
864 | |||
865 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
866 | d40_backup_regs_chan, | ||
867 | ARRAY_SIZE(d40_backup_regs_chan), | ||
868 | save); | ||
869 | } | ||
870 | |||
871 | /* Save/Restore global registers */ | ||
872 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
873 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
874 | save); | ||
875 | |||
876 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
877 | if (base->rev >= 3) | ||
878 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | ||
879 | d40_backup_regs_v3, | ||
880 | ARRAY_SIZE(d40_backup_regs_v3), | ||
881 | save); | ||
882 | } | ||
883 | #else | ||
884 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
885 | { | ||
886 | } | ||
887 | #endif | ||
888 | |||
889 | static int __d40_execute_command_phy(struct d40_chan *d40c, | ||
890 | enum d40_command command) | ||
891 | { | 746 | { |
892 | u32 status; | 747 | u32 status; |
893 | int i; | 748 | int i; |
@@ -896,12 +751,6 @@ static int __d40_execute_command_phy(struct d40_chan *d40c, | |||
896 | unsigned long flags; | 751 | unsigned long flags; |
897 | u32 wmask; | 752 | u32 wmask; |
898 | 753 | ||
899 | if (command == D40_DMA_STOP) { | ||
900 | ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); | ||
901 | if (ret) | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 754 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
906 | 755 | ||
907 | if (d40c->phy_chan->num % 2 == 0) | 756 | if (d40c->phy_chan->num % 2 == 0) |
@@ -995,109 +844,67 @@ static void d40_term_all(struct d40_chan *d40c) | |||
995 | } | 844 | } |
996 | 845 | ||
997 | d40c->pending_tx = 0; | 846 | d40c->pending_tx = 0; |
847 | d40c->busy = false; | ||
998 | } | 848 | } |
999 | 849 | ||
1000 | static void __d40_config_set_event(struct d40_chan *d40c, | 850 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, |
1001 | enum d40_events event_type, u32 event, | 851 | u32 event, int reg) |
1002 | int reg) | ||
1003 | { | 852 | { |
1004 | void __iomem *addr = chan_base(d40c) + reg; | 853 | void __iomem *addr = chan_base(d40c) + reg; |
1005 | int tries; | 854 | int tries; |
1006 | u32 status; | ||
1007 | |||
1008 | switch (event_type) { | ||
1009 | |||
1010 | case D40_DEACTIVATE_EVENTLINE: | ||
1011 | 855 | ||
856 | if (!enable) { | ||
1012 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | 857 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1013 | | ~D40_EVENTLINE_MASK(event), addr); | 858 | | ~D40_EVENTLINE_MASK(event), addr); |
1014 | break; | 859 | return; |
1015 | 860 | } | |
1016 | case D40_SUSPEND_REQ_EVENTLINE: | ||
1017 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | ||
1018 | D40_EVENTLINE_POS(event); | ||
1019 | |||
1020 | if (status == D40_DEACTIVATE_EVENTLINE || | ||
1021 | status == D40_SUSPEND_REQ_EVENTLINE) | ||
1022 | break; | ||
1023 | |||
1024 | writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
1025 | | ~D40_EVENTLINE_MASK(event), addr); | ||
1026 | |||
1027 | for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { | ||
1028 | |||
1029 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | ||
1030 | D40_EVENTLINE_POS(event); | ||
1031 | |||
1032 | cpu_relax(); | ||
1033 | /* | ||
1034 | * Reduce the number of bus accesses while | ||
1035 | * waiting for the DMA to suspend. | ||
1036 | */ | ||
1037 | udelay(3); | ||
1038 | |||
1039 | if (status == D40_DEACTIVATE_EVENTLINE) | ||
1040 | break; | ||
1041 | } | ||
1042 | |||
1043 | if (tries == D40_SUSPEND_MAX_IT) { | ||
1044 | chan_err(d40c, | ||
1045 | "unable to stop the event_line chl %d (log: %d)" | ||
1046 | "status %x\n", d40c->phy_chan->num, | ||
1047 | d40c->log_num, status); | ||
1048 | } | ||
1049 | break; | ||
1050 | 861 | ||
1051 | case D40_ACTIVATE_EVENTLINE: | ||
1052 | /* | 862 | /* |
1053 | * The hardware sometimes doesn't register the enable when src and dst | 863 | * The hardware sometimes doesn't register the enable when src and dst |
1054 | * event lines are active on the same logical channel. Retry to ensure | 864 | * event lines are active on the same logical channel. Retry to ensure |
1055 | * it does. Usually only one retry is sufficient. | 865 | * it does. Usually only one retry is sufficient. |
1056 | */ | 866 | */ |
1057 | tries = 100; | 867 | tries = 100; |
1058 | while (--tries) { | 868 | while (--tries) { |
1059 | writel((D40_ACTIVATE_EVENTLINE << | 869 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1060 | D40_EVENTLINE_POS(event)) | | 870 | | ~D40_EVENTLINE_MASK(event), addr); |
1061 | ~D40_EVENTLINE_MASK(event), addr); | ||
1062 | |||
1063 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | ||
1064 | break; | ||
1065 | } | ||
1066 | |||
1067 | if (tries != 99) | ||
1068 | dev_dbg(chan2dev(d40c), | ||
1069 | "[%s] workaround enable S%cLNK (%d tries)\n", | ||
1070 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
1071 | 100 - tries); | ||
1072 | 871 | ||
1073 | WARN_ON(!tries); | 872 | if (readl(addr) & D40_EVENTLINE_MASK(event)) |
1074 | break; | 873 | break; |
874 | } | ||
1075 | 875 | ||
1076 | case D40_ROUND_EVENTLINE: | 876 | if (tries != 99) |
1077 | BUG(); | 877 | dev_dbg(chan2dev(d40c), |
1078 | break; | 878 | "[%s] workaround enable S%cLNK (%d tries)\n", |
879 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
880 | 100 - tries); | ||
1079 | 881 | ||
1080 | } | 882 | WARN_ON(!tries); |
1081 | } | 883 | } |
1082 | 884 | ||
1083 | static void d40_config_set_event(struct d40_chan *d40c, | 885 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
1084 | enum d40_events event_type) | ||
1085 | { | 886 | { |
887 | unsigned long flags; | ||
888 | |||
889 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | ||
890 | |||
1086 | /* Enable event line connected to device (or memcpy) */ | 891 | /* Enable event line connected to device (or memcpy) */ |
1087 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 892 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
1088 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 893 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
1089 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 894 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1090 | 895 | ||
1091 | __d40_config_set_event(d40c, event_type, event, | 896 | __d40_config_set_event(d40c, do_enable, event, |
1092 | D40_CHAN_REG_SSLNK); | 897 | D40_CHAN_REG_SSLNK); |
1093 | } | 898 | } |
1094 | 899 | ||
1095 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 900 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
1096 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 901 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1097 | 902 | ||
1098 | __d40_config_set_event(d40c, event_type, event, | 903 | __d40_config_set_event(d40c, do_enable, event, |
1099 | D40_CHAN_REG_SDLNK); | 904 | D40_CHAN_REG_SDLNK); |
1100 | } | 905 | } |
906 | |||
907 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | ||
1101 | } | 908 | } |
1102 | 909 | ||
1103 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 910 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
@@ -1111,64 +918,6 @@ static u32 d40_chan_has_events(struct d40_chan *d40c) | |||
1111 | return val; | 918 | return val; |
1112 | } | 919 | } |
1113 | 920 | ||
1114 | static int | ||
1115 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) | ||
1116 | { | ||
1117 | unsigned long flags; | ||
1118 | int ret = 0; | ||
1119 | u32 active_status; | ||
1120 | void __iomem *active_reg; | ||
1121 | |||
1122 | if (d40c->phy_chan->num % 2 == 0) | ||
1123 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | ||
1124 | else | ||
1125 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | ||
1126 | |||
1127 | |||
1128 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | ||
1129 | |||
1130 | switch (command) { | ||
1131 | case D40_DMA_STOP: | ||
1132 | case D40_DMA_SUSPEND_REQ: | ||
1133 | |||
1134 | active_status = (readl(active_reg) & | ||
1135 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | ||
1136 | D40_CHAN_POS(d40c->phy_chan->num); | ||
1137 | |||
1138 | if (active_status == D40_DMA_RUN) | ||
1139 | d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); | ||
1140 | else | ||
1141 | d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); | ||
1142 | |||
1143 | if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) | ||
1144 | ret = __d40_execute_command_phy(d40c, command); | ||
1145 | |||
1146 | break; | ||
1147 | |||
1148 | case D40_DMA_RUN: | ||
1149 | |||
1150 | d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); | ||
1151 | ret = __d40_execute_command_phy(d40c, command); | ||
1152 | break; | ||
1153 | |||
1154 | case D40_DMA_SUSPENDED: | ||
1155 | BUG(); | ||
1156 | break; | ||
1157 | } | ||
1158 | |||
1159 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | ||
1160 | return ret; | ||
1161 | } | ||
1162 | |||
1163 | static int d40_channel_execute_command(struct d40_chan *d40c, | ||
1164 | enum d40_command command) | ||
1165 | { | ||
1166 | if (chan_is_logical(d40c)) | ||
1167 | return __d40_execute_command_log(d40c, command); | ||
1168 | else | ||
1169 | return __d40_execute_command_phy(d40c, command); | ||
1170 | } | ||
1171 | |||
1172 | static u32 d40_get_prmo(struct d40_chan *d40c) | 921 | static u32 d40_get_prmo(struct d40_chan *d40c) |
1173 | { | 922 | { |
1174 | static const unsigned int phy_map[] = { | 923 | static const unsigned int phy_map[] = { |
@@ -1223,10 +972,6 @@ static void d40_config_write(struct d40_chan *d40c) | |||
1223 | /* Set LIDX for lcla */ | 972 | /* Set LIDX for lcla */ |
1224 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); | 973 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
1225 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | 974 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
1226 | |||
1227 | /* Clear LNK which will be used by d40_chan_has_events() */ | ||
1228 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | ||
1229 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | ||
1230 | } | 975 | } |
1231 | } | 976 | } |
1232 | 977 | ||
@@ -1267,13 +1012,19 @@ static int d40_pause(struct d40_chan *d40c) | |||
1267 | if (!d40c->busy) | 1012 | if (!d40c->busy) |
1268 | return 0; | 1013 | return 0; |
1269 | 1014 | ||
1270 | pm_runtime_get_sync(d40c->base->dev); | ||
1271 | spin_lock_irqsave(&d40c->lock, flags); | 1015 | spin_lock_irqsave(&d40c->lock, flags); |
1272 | 1016 | ||
1273 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1017 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1018 | if (res == 0) { | ||
1019 | if (chan_is_logical(d40c)) { | ||
1020 | d40_config_set_event(d40c, false); | ||
1021 | /* Resume the other logical channels if any */ | ||
1022 | if (d40_chan_has_events(d40c)) | ||
1023 | res = d40_channel_execute_command(d40c, | ||
1024 | D40_DMA_RUN); | ||
1025 | } | ||
1026 | } | ||
1274 | 1027 | ||
1275 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1276 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1277 | spin_unlock_irqrestore(&d40c->lock, flags); | 1028 | spin_unlock_irqrestore(&d40c->lock, flags); |
1278 | return res; | 1029 | return res; |
1279 | } | 1030 | } |
@@ -1287,18 +1038,44 @@ static int d40_resume(struct d40_chan *d40c) | |||
1287 | return 0; | 1038 | return 0; |
1288 | 1039 | ||
1289 | spin_lock_irqsave(&d40c->lock, flags); | 1040 | spin_lock_irqsave(&d40c->lock, flags); |
1290 | pm_runtime_get_sync(d40c->base->dev); | 1041 | |
1042 | if (d40c->base->rev == 0) | ||
1043 | if (chan_is_logical(d40c)) { | ||
1044 | res = d40_channel_execute_command(d40c, | ||
1045 | D40_DMA_SUSPEND_REQ); | ||
1046 | goto no_suspend; | ||
1047 | } | ||
1291 | 1048 | ||
1292 | /* If bytes left to transfer or linked tx resume job */ | 1049 | /* If bytes left to transfer or linked tx resume job */ |
1293 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) | 1050 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { |
1051 | |||
1052 | if (chan_is_logical(d40c)) | ||
1053 | d40_config_set_event(d40c, true); | ||
1054 | |||
1294 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1055 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
1056 | } | ||
1295 | 1057 | ||
1296 | pm_runtime_mark_last_busy(d40c->base->dev); | 1058 | no_suspend: |
1297 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1298 | spin_unlock_irqrestore(&d40c->lock, flags); | 1059 | spin_unlock_irqrestore(&d40c->lock, flags); |
1299 | return res; | 1060 | return res; |
1300 | } | 1061 | } |
1301 | 1062 | ||
1063 | static int d40_terminate_all(struct d40_chan *chan) | ||
1064 | { | ||
1065 | unsigned long flags; | ||
1066 | int ret = 0; | ||
1067 | |||
1068 | ret = d40_pause(chan); | ||
1069 | if (!ret && chan_is_physical(chan)) | ||
1070 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); | ||
1071 | |||
1072 | spin_lock_irqsave(&chan->lock, flags); | ||
1073 | d40_term_all(chan); | ||
1074 | spin_unlock_irqrestore(&chan->lock, flags); | ||
1075 | |||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1302 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 1079 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1303 | { | 1080 | { |
1304 | struct d40_chan *d40c = container_of(tx->chan, | 1081 | struct d40_chan *d40c = container_of(tx->chan, |
@@ -1306,18 +1083,39 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1306 | chan); | 1083 | chan); |
1307 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 1084 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1308 | unsigned long flags; | 1085 | unsigned long flags; |
1309 | dma_cookie_t cookie; | ||
1310 | 1086 | ||
1311 | spin_lock_irqsave(&d40c->lock, flags); | 1087 | spin_lock_irqsave(&d40c->lock, flags); |
1312 | cookie = dma_cookie_assign(tx); | 1088 | |
1089 | d40c->chan.cookie++; | ||
1090 | |||
1091 | if (d40c->chan.cookie < 0) | ||
1092 | d40c->chan.cookie = 1; | ||
1093 | |||
1094 | d40d->txd.cookie = d40c->chan.cookie; | ||
1095 | |||
1313 | d40_desc_queue(d40c, d40d); | 1096 | d40_desc_queue(d40c, d40d); |
1097 | |||
1314 | spin_unlock_irqrestore(&d40c->lock, flags); | 1098 | spin_unlock_irqrestore(&d40c->lock, flags); |
1315 | 1099 | ||
1316 | return cookie; | 1100 | return tx->cookie; |
1317 | } | 1101 | } |
1318 | 1102 | ||
1319 | static int d40_start(struct d40_chan *d40c) | 1103 | static int d40_start(struct d40_chan *d40c) |
1320 | { | 1104 | { |
1105 | if (d40c->base->rev == 0) { | ||
1106 | int err; | ||
1107 | |||
1108 | if (chan_is_logical(d40c)) { | ||
1109 | err = d40_channel_execute_command(d40c, | ||
1110 | D40_DMA_SUSPEND_REQ); | ||
1111 | if (err) | ||
1112 | return err; | ||
1113 | } | ||
1114 | } | ||
1115 | |||
1116 | if (chan_is_logical(d40c)) | ||
1117 | d40_config_set_event(d40c, true); | ||
1118 | |||
1321 | return d40_channel_execute_command(d40c, D40_DMA_RUN); | 1119 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
1322 | } | 1120 | } |
1323 | 1121 | ||
@@ -1330,10 +1128,7 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1330 | d40d = d40_first_queued(d40c); | 1128 | d40d = d40_first_queued(d40c); |
1331 | 1129 | ||
1332 | if (d40d != NULL) { | 1130 | if (d40d != NULL) { |
1333 | if (!d40c->busy) { | 1131 | d40c->busy = true; |
1334 | d40c->busy = true; | ||
1335 | pm_runtime_get_sync(d40c->base->dev); | ||
1336 | } | ||
1337 | 1132 | ||
1338 | /* Remove from queue */ | 1133 | /* Remove from queue */ |
1339 | d40_desc_remove(d40d); | 1134 | d40_desc_remove(d40d); |
@@ -1394,8 +1189,6 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1394 | 1189 | ||
1395 | if (d40_queue_start(d40c) == NULL) | 1190 | if (d40_queue_start(d40c) == NULL) |
1396 | d40c->busy = false; | 1191 | d40c->busy = false; |
1397 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1398 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1399 | } | 1192 | } |
1400 | 1193 | ||
1401 | d40c->pending_tx++; | 1194 | d40c->pending_tx++; |
@@ -1419,7 +1212,7 @@ static void dma_tasklet(unsigned long data) | |||
1419 | goto err; | 1212 | goto err; |
1420 | 1213 | ||
1421 | if (!d40d->cyclic) | 1214 | if (!d40d->cyclic) |
1422 | dma_cookie_complete(&d40d->txd); | 1215 | d40c->completed = d40d->txd.cookie; |
1423 | 1216 | ||
1424 | /* | 1217 | /* |
1425 | * If terminating a channel pending_tx is set to zero. | 1218 | * If terminating a channel pending_tx is set to zero. |
@@ -1460,8 +1253,8 @@ static void dma_tasklet(unsigned long data) | |||
1460 | 1253 | ||
1461 | return; | 1254 | return; |
1462 | 1255 | ||
1463 | err: | 1256 | err: |
1464 | /* Rescue manouver if receiving double interrupts */ | 1257 | /* Rescue manoeuvre if receiving double interrupts */ |
1465 | if (d40c->pending_tx > 0) | 1258 | if (d40c->pending_tx > 0) |
1466 | d40c->pending_tx--; | 1259 | d40c->pending_tx--; |
1467 | spin_unlock_irqrestore(&d40c->lock, flags); | 1260 | spin_unlock_irqrestore(&d40c->lock, flags); |
@@ -1611,16 +1404,11 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1611 | return res; | 1404 | return res; |
1612 | } | 1405 | } |
1613 | 1406 | ||
1614 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, | 1407 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, |
1615 | bool is_src, int log_event_line, bool is_log, | 1408 | int log_event_line, bool is_log) |
1616 | bool *first_user) | ||
1617 | { | 1409 | { |
1618 | unsigned long flags; | 1410 | unsigned long flags; |
1619 | spin_lock_irqsave(&phy->lock, flags); | 1411 | spin_lock_irqsave(&phy->lock, flags); |
1620 | |||
1621 | *first_user = ((phy->allocated_src | phy->allocated_dst) | ||
1622 | == D40_ALLOC_FREE); | ||
1623 | |||
1624 | if (!is_log) { | 1412 | if (!is_log) { |
1625 | /* Physical interrupts are masked per physical full channel */ | 1413 | /* Physical interrupts are masked per physical full channel */ |
1626 | if (phy->allocated_src == D40_ALLOC_FREE && | 1414 | if (phy->allocated_src == D40_ALLOC_FREE && |
@@ -1701,7 +1489,7 @@ out: | |||
1701 | return is_free; | 1489 | return is_free; |
1702 | } | 1490 | } |
1703 | 1491 | ||
1704 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | 1492 | static int d40_allocate_channel(struct d40_chan *d40c) |
1705 | { | 1493 | { |
1706 | int dev_type; | 1494 | int dev_type; |
1707 | int event_group; | 1495 | int event_group; |
@@ -1737,8 +1525,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1737 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1525 | for (i = 0; i < d40c->base->num_phy_chans; i++) { |
1738 | 1526 | ||
1739 | if (d40_alloc_mask_set(&phys[i], is_src, | 1527 | if (d40_alloc_mask_set(&phys[i], is_src, |
1740 | 0, is_log, | 1528 | 0, is_log)) |
1741 | first_phy_user)) | ||
1742 | goto found_phy; | 1529 | goto found_phy; |
1743 | } | 1530 | } |
1744 | } else | 1531 | } else |
@@ -1748,8 +1535,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1748 | if (d40_alloc_mask_set(&phys[i], | 1535 | if (d40_alloc_mask_set(&phys[i], |
1749 | is_src, | 1536 | is_src, |
1750 | 0, | 1537 | 0, |
1751 | is_log, | 1538 | is_log)) |
1752 | first_phy_user)) | ||
1753 | goto found_phy; | 1539 | goto found_phy; |
1754 | } | 1540 | } |
1755 | } | 1541 | } |
@@ -1765,25 +1551,6 @@ found_phy: | |||
1765 | /* Find logical channel */ | 1551 | /* Find logical channel */ |
1766 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1552 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1767 | int phy_num = j + event_group * 2; | 1553 | int phy_num = j + event_group * 2; |
1768 | |||
1769 | if (d40c->dma_cfg.use_fixed_channel) { | ||
1770 | i = d40c->dma_cfg.phy_channel; | ||
1771 | |||
1772 | if ((i != phy_num) && (i != phy_num + 1)) { | ||
1773 | dev_err(chan2dev(d40c), | ||
1774 | "invalid fixed phy channel %d\n", i); | ||
1775 | return -EINVAL; | ||
1776 | } | ||
1777 | |||
1778 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | ||
1779 | is_log, first_phy_user)) | ||
1780 | goto found_log; | ||
1781 | |||
1782 | dev_err(chan2dev(d40c), | ||
1783 | "could not allocate fixed phy channel %d\n", i); | ||
1784 | return -EINVAL; | ||
1785 | } | ||
1786 | |||
1787 | /* | 1554 | /* |
1788 | * Spread logical channels across all available physical rather | 1555 | * Spread logical channels across all available physical rather |
1789 | * than pack every logical channel at the first available phy | 1556 | * than pack every logical channel at the first available phy |
@@ -1792,15 +1559,13 @@ found_phy: | |||
1792 | if (is_src) { | 1559 | if (is_src) { |
1793 | for (i = phy_num; i < phy_num + 2; i++) { | 1560 | for (i = phy_num; i < phy_num + 2; i++) { |
1794 | if (d40_alloc_mask_set(&phys[i], is_src, | 1561 | if (d40_alloc_mask_set(&phys[i], is_src, |
1795 | event_line, is_log, | 1562 | event_line, is_log)) |
1796 | first_phy_user)) | ||
1797 | goto found_log; | 1563 | goto found_log; |
1798 | } | 1564 | } |
1799 | } else { | 1565 | } else { |
1800 | for (i = phy_num + 1; i >= phy_num; i--) { | 1566 | for (i = phy_num + 1; i >= phy_num; i--) { |
1801 | if (d40_alloc_mask_set(&phys[i], is_src, | 1567 | if (d40_alloc_mask_set(&phys[i], is_src, |
1802 | event_line, is_log, | 1568 | event_line, is_log)) |
1803 | first_phy_user)) | ||
1804 | goto found_log; | 1569 | goto found_log; |
1805 | } | 1570 | } |
1806 | } | 1571 | } |
@@ -1842,6 +1607,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
1842 | return 0; | 1607 | return 0; |
1843 | } | 1608 | } |
1844 | 1609 | ||
1610 | |||
1845 | static int d40_free_dma(struct d40_chan *d40c) | 1611 | static int d40_free_dma(struct d40_chan *d40c) |
1846 | { | 1612 | { |
1847 | 1613 | ||
@@ -1876,33 +1642,50 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1876 | return -EINVAL; | 1642 | return -EINVAL; |
1877 | } | 1643 | } |
1878 | 1644 | ||
1879 | pm_runtime_get_sync(d40c->base->dev); | 1645 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1880 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
1881 | if (res) { | 1646 | if (res) { |
1882 | chan_err(d40c, "stop failed\n"); | 1647 | chan_err(d40c, "suspend failed\n"); |
1883 | goto out; | 1648 | return res; |
1884 | } | 1649 | } |
1885 | 1650 | ||
1886 | d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); | 1651 | if (chan_is_logical(d40c)) { |
1652 | /* Release logical channel, deactivate the event line */ | ||
1887 | 1653 | ||
1888 | if (chan_is_logical(d40c)) | 1654 | d40_config_set_event(d40c, false); |
1889 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 1655 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
1890 | else | ||
1891 | d40c->base->lookup_phy_chans[phy->num] = NULL; | ||
1892 | 1656 | ||
1893 | if (d40c->busy) { | 1657 | /* |
1894 | pm_runtime_mark_last_busy(d40c->base->dev); | 1658 | * Check if there are more logical allocation |
1895 | pm_runtime_put_autosuspend(d40c->base->dev); | 1659 | * on this phy channel. |
1660 | */ | ||
1661 | if (!d40_alloc_mask_free(phy, is_src, event)) { | ||
1662 | /* Resume the other logical channels if any */ | ||
1663 | if (d40_chan_has_events(d40c)) { | ||
1664 | res = d40_channel_execute_command(d40c, | ||
1665 | D40_DMA_RUN); | ||
1666 | if (res) { | ||
1667 | chan_err(d40c, | ||
1668 | "Executing RUN command\n"); | ||
1669 | return res; | ||
1670 | } | ||
1671 | } | ||
1672 | return 0; | ||
1673 | } | ||
1674 | } else { | ||
1675 | (void) d40_alloc_mask_free(phy, is_src, 0); | ||
1896 | } | 1676 | } |
1897 | 1677 | ||
1898 | d40c->busy = false; | 1678 | /* Release physical channel */ |
1679 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
1680 | if (res) { | ||
1681 | chan_err(d40c, "Failed to stop channel\n"); | ||
1682 | return res; | ||
1683 | } | ||
1899 | d40c->phy_chan = NULL; | 1684 | d40c->phy_chan = NULL; |
1900 | d40c->configured = false; | 1685 | d40c->configured = false; |
1901 | out: | 1686 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1902 | 1687 | ||
1903 | pm_runtime_mark_last_busy(d40c->base->dev); | 1688 | return 0; |
1904 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1905 | return res; | ||
1906 | } | 1689 | } |
1907 | 1690 | ||
1908 | static bool d40_is_paused(struct d40_chan *d40c) | 1691 | static bool d40_is_paused(struct d40_chan *d40c) |
@@ -2071,7 +1854,7 @@ err: | |||
2071 | } | 1854 | } |
2072 | 1855 | ||
2073 | static dma_addr_t | 1856 | static dma_addr_t |
2074 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | 1857 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) |
2075 | { | 1858 | { |
2076 | struct stedma40_platform_data *plat = chan->base->plat_data; | 1859 | struct stedma40_platform_data *plat = chan->base->plat_data; |
2077 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 1860 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
@@ -2080,9 +1863,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | |||
2080 | if (chan->runtime_addr) | 1863 | if (chan->runtime_addr) |
2081 | return chan->runtime_addr; | 1864 | return chan->runtime_addr; |
2082 | 1865 | ||
2083 | if (direction == DMA_DEV_TO_MEM) | 1866 | if (direction == DMA_FROM_DEVICE) |
2084 | addr = plat->dev_rx[cfg->src_dev_type]; | 1867 | addr = plat->dev_rx[cfg->src_dev_type]; |
2085 | else if (direction == DMA_MEM_TO_DEV) | 1868 | else if (direction == DMA_TO_DEVICE) |
2086 | addr = plat->dev_tx[cfg->dst_dev_type]; | 1869 | addr = plat->dev_tx[cfg->dst_dev_type]; |
2087 | 1870 | ||
2088 | return addr; | 1871 | return addr; |
@@ -2091,7 +1874,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) | |||
2091 | static struct dma_async_tx_descriptor * | 1874 | static struct dma_async_tx_descriptor * |
2092 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 1875 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
2093 | struct scatterlist *sg_dst, unsigned int sg_len, | 1876 | struct scatterlist *sg_dst, unsigned int sg_len, |
2094 | enum dma_transfer_direction direction, unsigned long dma_flags) | 1877 | enum dma_data_direction direction, unsigned long dma_flags) |
2095 | { | 1878 | { |
2096 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | 1879 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
2097 | dma_addr_t src_dev_addr = 0; | 1880 | dma_addr_t src_dev_addr = 0; |
@@ -2115,12 +1898,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
2115 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | 1898 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2116 | desc->cyclic = true; | 1899 | desc->cyclic = true; |
2117 | 1900 | ||
2118 | if (direction != DMA_TRANS_NONE) { | 1901 | if (direction != DMA_NONE) { |
2119 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 1902 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
2120 | 1903 | ||
2121 | if (direction == DMA_DEV_TO_MEM) | 1904 | if (direction == DMA_FROM_DEVICE) |
2122 | src_dev_addr = dev_addr; | 1905 | src_dev_addr = dev_addr; |
2123 | else if (direction == DMA_MEM_TO_DEV) | 1906 | else if (direction == DMA_TO_DEVICE) |
2124 | dst_dev_addr = dev_addr; | 1907 | dst_dev_addr = dev_addr; |
2125 | } | 1908 | } |
2126 | 1909 | ||
@@ -2217,7 +2000,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2217 | bool is_free_phy; | 2000 | bool is_free_phy; |
2218 | spin_lock_irqsave(&d40c->lock, flags); | 2001 | spin_lock_irqsave(&d40c->lock, flags); |
2219 | 2002 | ||
2220 | dma_cookie_init(chan); | 2003 | d40c->completed = chan->cookie = 1; |
2221 | 2004 | ||
2222 | /* If no dma configuration is set use default configuration (memcpy) */ | 2005 | /* If no dma configuration is set use default configuration (memcpy) */ |
2223 | if (!d40c->configured) { | 2006 | if (!d40c->configured) { |
@@ -2227,15 +2010,14 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2227 | goto fail; | 2010 | goto fail; |
2228 | } | 2011 | } |
2229 | } | 2012 | } |
2013 | is_free_phy = (d40c->phy_chan == NULL); | ||
2230 | 2014 | ||
2231 | err = d40_allocate_channel(d40c, &is_free_phy); | 2015 | err = d40_allocate_channel(d40c); |
2232 | if (err) { | 2016 | if (err) { |
2233 | chan_err(d40c, "Failed to allocate channel\n"); | 2017 | chan_err(d40c, "Failed to allocate channel\n"); |
2234 | d40c->configured = false; | ||
2235 | goto fail; | 2018 | goto fail; |
2236 | } | 2019 | } |
2237 | 2020 | ||
2238 | pm_runtime_get_sync(d40c->base->dev); | ||
2239 | /* Fill in basic CFG register values */ | 2021 | /* Fill in basic CFG register values */ |
2240 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 2022 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2241 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | 2023 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
@@ -2255,12 +2037,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2255 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2037 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2256 | } | 2038 | } |
2257 | 2039 | ||
2258 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | ||
2259 | chan_is_logical(d40c) ? "logical" : "physical", | ||
2260 | d40c->phy_chan->num, | ||
2261 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | ||
2262 | |||
2263 | |||
2264 | /* | 2040 | /* |
2265 | * Only write channel configuration to the DMA if the physical | 2041 | * Only write channel configuration to the DMA if the physical |
2266 | * resource is free. In case of multiple logical channels | 2042 | * resource is free. In case of multiple logical channels |
@@ -2269,8 +2045,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2269 | if (is_free_phy) | 2045 | if (is_free_phy) |
2270 | d40_config_write(d40c); | 2046 | d40_config_write(d40c); |
2271 | fail: | 2047 | fail: |
2272 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2273 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2274 | spin_unlock_irqrestore(&d40c->lock, flags); | 2048 | spin_unlock_irqrestore(&d40c->lock, flags); |
2275 | return err; | 2049 | return err; |
2276 | } | 2050 | } |
@@ -2333,11 +2107,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
2333 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2107 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
2334 | struct scatterlist *sgl, | 2108 | struct scatterlist *sgl, |
2335 | unsigned int sg_len, | 2109 | unsigned int sg_len, |
2336 | enum dma_transfer_direction direction, | 2110 | enum dma_data_direction direction, |
2337 | unsigned long dma_flags, | 2111 | unsigned long dma_flags) |
2338 | void *context) | ||
2339 | { | 2112 | { |
2340 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) | 2113 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) |
2341 | return NULL; | 2114 | return NULL; |
2342 | 2115 | ||
2343 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2116 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
@@ -2346,8 +2119,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2346 | static struct dma_async_tx_descriptor * | 2119 | static struct dma_async_tx_descriptor * |
2347 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2120 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2348 | size_t buf_len, size_t period_len, | 2121 | size_t buf_len, size_t period_len, |
2349 | enum dma_transfer_direction direction, unsigned long flags, | 2122 | enum dma_data_direction direction) |
2350 | void *context) | ||
2351 | { | 2123 | { |
2352 | unsigned int periods = buf_len / period_len; | 2124 | unsigned int periods = buf_len / period_len; |
2353 | struct dma_async_tx_descriptor *txd; | 2125 | struct dma_async_tx_descriptor *txd; |
@@ -2362,7 +2134,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
2362 | } | 2134 | } |
2363 | 2135 | ||
2364 | sg[periods].offset = 0; | 2136 | sg[periods].offset = 0; |
2365 | sg_dma_len(&sg[periods]) = 0; | 2137 | sg[periods].length = 0; |
2366 | sg[periods].page_link = | 2138 | sg[periods].page_link = |
2367 | ((unsigned long)sg | 0x01) & ~0x02; | 2139 | ((unsigned long)sg | 0x01) & ~0x02; |
2368 | 2140 | ||
@@ -2379,19 +2151,25 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2379 | struct dma_tx_state *txstate) | 2151 | struct dma_tx_state *txstate) |
2380 | { | 2152 | { |
2381 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2153 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2382 | enum dma_status ret; | 2154 | dma_cookie_t last_used; |
2155 | dma_cookie_t last_complete; | ||
2156 | int ret; | ||
2383 | 2157 | ||
2384 | if (d40c->phy_chan == NULL) { | 2158 | if (d40c->phy_chan == NULL) { |
2385 | chan_err(d40c, "Cannot read status of unallocated channel\n"); | 2159 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2386 | return -EINVAL; | 2160 | return -EINVAL; |
2387 | } | 2161 | } |
2388 | 2162 | ||
2389 | ret = dma_cookie_status(chan, cookie, txstate); | 2163 | last_complete = d40c->completed; |
2390 | if (ret != DMA_SUCCESS) | 2164 | last_used = chan->cookie; |
2391 | dma_set_residue(txstate, stedma40_residue(chan)); | ||
2392 | 2165 | ||
2393 | if (d40_is_paused(d40c)) | 2166 | if (d40_is_paused(d40c)) |
2394 | ret = DMA_PAUSED; | 2167 | ret = DMA_PAUSED; |
2168 | else | ||
2169 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
2170 | |||
2171 | dma_set_tx_state(txstate, last_complete, last_used, | ||
2172 | stedma40_residue(chan)); | ||
2395 | 2173 | ||
2396 | return ret; | 2174 | return ret; |
2397 | } | 2175 | } |
@@ -2417,31 +2195,6 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2417 | spin_unlock_irqrestore(&d40c->lock, flags); | 2195 | spin_unlock_irqrestore(&d40c->lock, flags); |
2418 | } | 2196 | } |
2419 | 2197 | ||
2420 | static void d40_terminate_all(struct dma_chan *chan) | ||
2421 | { | ||
2422 | unsigned long flags; | ||
2423 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
2424 | int ret; | ||
2425 | |||
2426 | spin_lock_irqsave(&d40c->lock, flags); | ||
2427 | |||
2428 | pm_runtime_get_sync(d40c->base->dev); | ||
2429 | ret = d40_channel_execute_command(d40c, D40_DMA_STOP); | ||
2430 | if (ret) | ||
2431 | chan_err(d40c, "Failed to stop channel\n"); | ||
2432 | |||
2433 | d40_term_all(d40c); | ||
2434 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2435 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2436 | if (d40c->busy) { | ||
2437 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2438 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2439 | } | ||
2440 | d40c->busy = false; | ||
2441 | |||
2442 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2443 | } | ||
2444 | |||
2445 | static int | 2198 | static int |
2446 | dma40_config_to_halfchannel(struct d40_chan *d40c, | 2199 | dma40_config_to_halfchannel(struct d40_chan *d40c, |
2447 | struct stedma40_half_channel_info *info, | 2200 | struct stedma40_half_channel_info *info, |
@@ -2515,7 +2268,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2515 | dst_addr_width = config->dst_addr_width; | 2268 | dst_addr_width = config->dst_addr_width; |
2516 | dst_maxburst = config->dst_maxburst; | 2269 | dst_maxburst = config->dst_maxburst; |
2517 | 2270 | ||
2518 | if (config->direction == DMA_DEV_TO_MEM) { | 2271 | if (config->direction == DMA_FROM_DEVICE) { |
2519 | dma_addr_t dev_addr_rx = | 2272 | dma_addr_t dev_addr_rx = |
2520 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | 2273 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; |
2521 | 2274 | ||
@@ -2538,7 +2291,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2538 | if (dst_maxburst == 0) | 2291 | if (dst_maxburst == 0) |
2539 | dst_maxburst = src_maxburst; | 2292 | dst_maxburst = src_maxburst; |
2540 | 2293 | ||
2541 | } else if (config->direction == DMA_MEM_TO_DEV) { | 2294 | } else if (config->direction == DMA_TO_DEVICE) { |
2542 | dma_addr_t dev_addr_tx = | 2295 | dma_addr_t dev_addr_tx = |
2543 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | 2296 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; |
2544 | 2297 | ||
@@ -2603,7 +2356,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2603 | "configured channel %s for %s, data width %d/%d, " | 2356 | "configured channel %s for %s, data width %d/%d, " |
2604 | "maxburst %d/%d elements, LE, no flow control\n", | 2357 | "maxburst %d/%d elements, LE, no flow control\n", |
2605 | dma_chan_name(chan), | 2358 | dma_chan_name(chan), |
2606 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", | 2359 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
2607 | src_addr_width, dst_addr_width, | 2360 | src_addr_width, dst_addr_width, |
2608 | src_maxburst, dst_maxburst); | 2361 | src_maxburst, dst_maxburst); |
2609 | 2362 | ||
@@ -2622,8 +2375,7 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
2622 | 2375 | ||
2623 | switch (cmd) { | 2376 | switch (cmd) { |
2624 | case DMA_TERMINATE_ALL: | 2377 | case DMA_TERMINATE_ALL: |
2625 | d40_terminate_all(chan); | 2378 | return d40_terminate_all(d40c); |
2626 | return 0; | ||
2627 | case DMA_PAUSE: | 2379 | case DMA_PAUSE: |
2628 | return d40_pause(d40c); | 2380 | return d40_pause(d40c); |
2629 | case DMA_RESUME: | 2381 | case DMA_RESUME: |
@@ -2766,72 +2518,6 @@ failure1: | |||
2766 | return err; | 2518 | return err; |
2767 | } | 2519 | } |
2768 | 2520 | ||
2769 | /* Suspend resume functionality */ | ||
2770 | #ifdef CONFIG_PM | ||
2771 | static int dma40_pm_suspend(struct device *dev) | ||
2772 | { | ||
2773 | struct platform_device *pdev = to_platform_device(dev); | ||
2774 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2775 | int ret = 0; | ||
2776 | if (!pm_runtime_suspended(dev)) | ||
2777 | return -EBUSY; | ||
2778 | |||
2779 | if (base->lcpa_regulator) | ||
2780 | ret = regulator_disable(base->lcpa_regulator); | ||
2781 | return ret; | ||
2782 | } | ||
2783 | |||
2784 | static int dma40_runtime_suspend(struct device *dev) | ||
2785 | { | ||
2786 | struct platform_device *pdev = to_platform_device(dev); | ||
2787 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2788 | |||
2789 | d40_save_restore_registers(base, true); | ||
2790 | |||
2791 | /* Don't disable/enable clocks for v1 due to HW bugs */ | ||
2792 | if (base->rev != 1) | ||
2793 | writel_relaxed(base->gcc_pwr_off_mask, | ||
2794 | base->virtbase + D40_DREG_GCC); | ||
2795 | |||
2796 | return 0; | ||
2797 | } | ||
2798 | |||
2799 | static int dma40_runtime_resume(struct device *dev) | ||
2800 | { | ||
2801 | struct platform_device *pdev = to_platform_device(dev); | ||
2802 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2803 | |||
2804 | if (base->initialized) | ||
2805 | d40_save_restore_registers(base, false); | ||
2806 | |||
2807 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | ||
2808 | base->virtbase + D40_DREG_GCC); | ||
2809 | return 0; | ||
2810 | } | ||
2811 | |||
2812 | static int dma40_resume(struct device *dev) | ||
2813 | { | ||
2814 | struct platform_device *pdev = to_platform_device(dev); | ||
2815 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2816 | int ret = 0; | ||
2817 | |||
2818 | if (base->lcpa_regulator) | ||
2819 | ret = regulator_enable(base->lcpa_regulator); | ||
2820 | |||
2821 | return ret; | ||
2822 | } | ||
2823 | |||
2824 | static const struct dev_pm_ops dma40_pm_ops = { | ||
2825 | .suspend = dma40_pm_suspend, | ||
2826 | .runtime_suspend = dma40_runtime_suspend, | ||
2827 | .runtime_resume = dma40_runtime_resume, | ||
2828 | .resume = dma40_resume, | ||
2829 | }; | ||
2830 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
2831 | #else | ||
2832 | #define DMA40_PM_OPS NULL | ||
2833 | #endif | ||
2834 | |||
2835 | /* Initialization functions. */ | 2521 | /* Initialization functions. */ |
2836 | 2522 | ||
2837 | static int __init d40_phy_res_init(struct d40_base *base) | 2523 | static int __init d40_phy_res_init(struct d40_base *base) |
@@ -2840,7 +2526,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2840 | int num_phy_chans_avail = 0; | 2526 | int num_phy_chans_avail = 0; |
2841 | u32 val[2]; | 2527 | u32 val[2]; |
2842 | int odd_even_bit = -2; | 2528 | int odd_even_bit = -2; |
2843 | int gcc = D40_DREG_GCC_ENA; | ||
2844 | 2529 | ||
2845 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2530 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2846 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2531 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
@@ -2852,17 +2537,9 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2852 | /* Mark security only channels as occupied */ | 2537 | /* Mark security only channels as occupied */ |
2853 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2538 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2854 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2539 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2855 | base->phy_res[i].reserved = true; | ||
2856 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2857 | D40_DREG_GCC_SRC); | ||
2858 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2859 | D40_DREG_GCC_DST); | ||
2860 | |||
2861 | |||
2862 | } else { | 2540 | } else { |
2863 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2541 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2864 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2542 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2865 | base->phy_res[i].reserved = false; | ||
2866 | num_phy_chans_avail++; | 2543 | num_phy_chans_avail++; |
2867 | } | 2544 | } |
2868 | spin_lock_init(&base->phy_res[i].lock); | 2545 | spin_lock_init(&base->phy_res[i].lock); |
@@ -2874,11 +2551,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2874 | 2551 | ||
2875 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 2552 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2876 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 2553 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
2877 | base->phy_res[chan].reserved = true; | ||
2878 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2879 | D40_DREG_GCC_SRC); | ||
2880 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2881 | D40_DREG_GCC_DST); | ||
2882 | num_phy_chans_avail--; | 2554 | num_phy_chans_avail--; |
2883 | } | 2555 | } |
2884 | 2556 | ||
@@ -2899,15 +2571,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2899 | val[0] = val[0] >> 2; | 2571 | val[0] = val[0] >> 2; |
2900 | } | 2572 | } |
2901 | 2573 | ||
2902 | /* | ||
2903 | * To keep things simple, Enable all clocks initially. | ||
2904 | * The clocks will get managed later post channel allocation. | ||
2905 | * The clocks for the event lines on which reserved channels exists | ||
2906 | * are not managed here. | ||
2907 | */ | ||
2908 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | ||
2909 | base->gcc_pwr_off_mask = gcc; | ||
2910 | |||
2911 | return num_phy_chans_avail; | 2574 | return num_phy_chans_avail; |
2912 | } | 2575 | } |
2913 | 2576 | ||
@@ -2920,23 +2583,19 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2920 | struct d40_base *base = NULL; | 2583 | struct d40_base *base = NULL; |
2921 | int num_log_chans = 0; | 2584 | int num_log_chans = 0; |
2922 | int num_phy_chans; | 2585 | int num_phy_chans; |
2923 | int clk_ret = -EINVAL; | ||
2924 | int i; | 2586 | int i; |
2925 | u32 pid; | 2587 | u32 pid; |
2926 | u32 cid; | 2588 | u32 cid; |
2927 | u8 rev; | 2589 | u8 rev; |
2928 | 2590 | ||
2929 | clk = clk_get(&pdev->dev, NULL); | 2591 | clk = clk_get(&pdev->dev, NULL); |
2592 | |||
2930 | if (IS_ERR(clk)) { | 2593 | if (IS_ERR(clk)) { |
2931 | d40_err(&pdev->dev, "No matching clock found\n"); | 2594 | d40_err(&pdev->dev, "No matching clock found\n"); |
2932 | goto failure; | 2595 | goto failure; |
2933 | } | 2596 | } |
2934 | 2597 | ||
2935 | clk_ret = clk_prepare_enable(clk); | 2598 | clk_enable(clk); |
2936 | if (clk_ret) { | ||
2937 | d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); | ||
2938 | goto failure; | ||
2939 | } | ||
2940 | 2599 | ||
2941 | /* Get IO for DMAC base address */ | 2600 | /* Get IO for DMAC base address */ |
2942 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 2601 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); |
@@ -2984,12 +2643,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2984 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2643 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
2985 | rev, res->start); | 2644 | rev, res->start); |
2986 | 2645 | ||
2987 | if (rev < 2) { | ||
2988 | d40_err(&pdev->dev, "hardware revision: %d is not supported", | ||
2989 | rev); | ||
2990 | goto failure; | ||
2991 | } | ||
2992 | |||
2993 | plat_data = pdev->dev.platform_data; | 2646 | plat_data = pdev->dev.platform_data; |
2994 | 2647 | ||
2995 | /* Count the number of logical channels in use */ | 2648 | /* Count the number of logical channels in use */ |
@@ -3045,15 +2698,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3045 | goto failure; | 2698 | goto failure; |
3046 | } | 2699 | } |
3047 | 2700 | ||
3048 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * | 2701 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * |
3049 | sizeof(d40_backup_regs_chan), | 2702 | sizeof(struct d40_desc *) * |
2703 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
3050 | GFP_KERNEL); | 2704 | GFP_KERNEL); |
3051 | if (!base->reg_val_backup_chan) | ||
3052 | goto failure; | ||
3053 | |||
3054 | base->lcla_pool.alloc_map = | ||
3055 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | ||
3056 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | ||
3057 | if (!base->lcla_pool.alloc_map) | 2705 | if (!base->lcla_pool.alloc_map) |
3058 | goto failure; | 2706 | goto failure; |
3059 | 2707 | ||
@@ -3066,10 +2714,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3066 | return base; | 2714 | return base; |
3067 | 2715 | ||
3068 | failure: | 2716 | failure: |
3069 | if (!clk_ret) | 2717 | if (!IS_ERR(clk)) { |
3070 | clk_disable_unprepare(clk); | 2718 | clk_disable(clk); |
3071 | if (!IS_ERR(clk)) | ||
3072 | clk_put(clk); | 2719 | clk_put(clk); |
2720 | } | ||
3073 | if (virtbase) | 2721 | if (virtbase) |
3074 | iounmap(virtbase); | 2722 | iounmap(virtbase); |
3075 | if (res) | 2723 | if (res) |
@@ -3080,7 +2728,6 @@ failure: | |||
3080 | 2728 | ||
3081 | if (base) { | 2729 | if (base) { |
3082 | kfree(base->lcla_pool.alloc_map); | 2730 | kfree(base->lcla_pool.alloc_map); |
3083 | kfree(base->reg_val_backup_chan); | ||
3084 | kfree(base->lookup_log_chans); | 2731 | kfree(base->lookup_log_chans); |
3085 | kfree(base->lookup_phy_chans); | 2732 | kfree(base->lookup_phy_chans); |
3086 | kfree(base->phy_res); | 2733 | kfree(base->phy_res); |
@@ -3093,9 +2740,9 @@ failure: | |||
3093 | static void __init d40_hw_init(struct d40_base *base) | 2740 | static void __init d40_hw_init(struct d40_base *base) |
3094 | { | 2741 | { |
3095 | 2742 | ||
3096 | static struct d40_reg_val dma_init_reg[] = { | 2743 | static const struct d40_reg_val dma_init_reg[] = { |
3097 | /* Clock every part of the DMA block from start */ | 2744 | /* Clock every part of the DMA block from start */ |
3098 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | 2745 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, |
3099 | 2746 | ||
3100 | /* Interrupts on all logical channels */ | 2747 | /* Interrupts on all logical channels */ |
3101 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 2748 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
@@ -3295,31 +2942,11 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3295 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); | 2942 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
3296 | goto failure; | 2943 | goto failure; |
3297 | } | 2944 | } |
3298 | /* If lcla has to be located in ESRAM we don't need to allocate */ | ||
3299 | if (base->plat_data->use_esram_lcla) { | ||
3300 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
3301 | "lcla_esram"); | ||
3302 | if (!res) { | ||
3303 | ret = -ENOENT; | ||
3304 | d40_err(&pdev->dev, | ||
3305 | "No \"lcla_esram\" memory resource\n"); | ||
3306 | goto failure; | ||
3307 | } | ||
3308 | base->lcla_pool.base = ioremap(res->start, | ||
3309 | resource_size(res)); | ||
3310 | if (!base->lcla_pool.base) { | ||
3311 | ret = -ENOMEM; | ||
3312 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | ||
3313 | goto failure; | ||
3314 | } | ||
3315 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
3316 | 2945 | ||
3317 | } else { | 2946 | ret = d40_lcla_allocate(base); |
3318 | ret = d40_lcla_allocate(base); | 2947 | if (ret) { |
3319 | if (ret) { | 2948 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
3320 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | 2949 | goto failure; |
3321 | goto failure; | ||
3322 | } | ||
3323 | } | 2950 | } |
3324 | 2951 | ||
3325 | spin_lock_init(&base->lcla_pool.lock); | 2952 | spin_lock_init(&base->lcla_pool.lock); |
@@ -3332,32 +2959,6 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3332 | goto failure; | 2959 | goto failure; |
3333 | } | 2960 | } |
3334 | 2961 | ||
3335 | pm_runtime_irq_safe(base->dev); | ||
3336 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3337 | pm_runtime_use_autosuspend(base->dev); | ||
3338 | pm_runtime_enable(base->dev); | ||
3339 | pm_runtime_resume(base->dev); | ||
3340 | |||
3341 | if (base->plat_data->use_esram_lcla) { | ||
3342 | |||
3343 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | ||
3344 | if (IS_ERR(base->lcpa_regulator)) { | ||
3345 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | ||
3346 | base->lcpa_regulator = NULL; | ||
3347 | goto failure; | ||
3348 | } | ||
3349 | |||
3350 | ret = regulator_enable(base->lcpa_regulator); | ||
3351 | if (ret) { | ||
3352 | d40_err(&pdev->dev, | ||
3353 | "Failed to enable lcpa_regulator\n"); | ||
3354 | regulator_put(base->lcpa_regulator); | ||
3355 | base->lcpa_regulator = NULL; | ||
3356 | goto failure; | ||
3357 | } | ||
3358 | } | ||
3359 | |||
3360 | base->initialized = true; | ||
3361 | err = d40_dmaengine_init(base, num_reserved_chans); | 2962 | err = d40_dmaengine_init(base, num_reserved_chans); |
3362 | if (err) | 2963 | if (err) |
3363 | goto failure; | 2964 | goto failure; |
@@ -3374,11 +2975,6 @@ failure: | |||
3374 | if (base->virtbase) | 2975 | if (base->virtbase) |
3375 | iounmap(base->virtbase); | 2976 | iounmap(base->virtbase); |
3376 | 2977 | ||
3377 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | ||
3378 | iounmap(base->lcla_pool.base); | ||
3379 | base->lcla_pool.base = NULL; | ||
3380 | } | ||
3381 | |||
3382 | if (base->lcla_pool.dma_addr) | 2978 | if (base->lcla_pool.dma_addr) |
3383 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 2979 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
3384 | SZ_1K * base->num_phy_chans, | 2980 | SZ_1K * base->num_phy_chans, |
@@ -3401,11 +2997,6 @@ failure: | |||
3401 | clk_put(base->clk); | 2997 | clk_put(base->clk); |
3402 | } | 2998 | } |
3403 | 2999 | ||
3404 | if (base->lcpa_regulator) { | ||
3405 | regulator_disable(base->lcpa_regulator); | ||
3406 | regulator_put(base->lcpa_regulator); | ||
3407 | } | ||
3408 | |||
3409 | kfree(base->lcla_pool.alloc_map); | 3000 | kfree(base->lcla_pool.alloc_map); |
3410 | kfree(base->lookup_log_chans); | 3001 | kfree(base->lookup_log_chans); |
3411 | kfree(base->lookup_phy_chans); | 3002 | kfree(base->lookup_phy_chans); |
@@ -3421,7 +3012,6 @@ static struct platform_driver d40_driver = { | |||
3421 | .driver = { | 3012 | .driver = { |
3422 | .owner = THIS_MODULE, | 3013 | .owner = THIS_MODULE, |
3423 | .name = D40_NAME, | 3014 | .name = D40_NAME, |
3424 | .pm = DMA40_PM_OPS, | ||
3425 | }, | 3015 | }, |
3426 | }; | 3016 | }; |
3427 | 3017 | ||