diff options
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r-- | drivers/dma/ste_dma40.c | 441 |
1 files changed, 391 insertions, 50 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 13259cad0ceb..cc5ecbc067a3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/pm.h> | ||
18 | #include <linux/pm_runtime.h> | ||
17 | #include <linux/err.h> | 19 | #include <linux/err.h> |
18 | #include <linux/amba/bus.h> | 20 | #include <linux/amba/bus.h> |
19 | 21 | ||
@@ -32,6 +34,9 @@ | |||
32 | /* Maximum iterations taken before giving up suspending a channel */ | 34 | /* Maximum iterations taken before giving up suspending a channel */ |
33 | #define D40_SUSPEND_MAX_IT 500 | 35 | #define D40_SUSPEND_MAX_IT 500 |
34 | 36 | ||
37 | /* Milliseconds */ | ||
38 | #define DMA40_AUTOSUSPEND_DELAY 100 | ||
39 | |||
35 | /* Hardware requirement on LCLA alignment */ | 40 | /* Hardware requirement on LCLA alignment */ |
36 | #define LCLA_ALIGNMENT 0x40000 | 41 | #define LCLA_ALIGNMENT 0x40000 |
37 | 42 | ||
@@ -62,6 +67,55 @@ enum d40_command { | |||
62 | D40_DMA_SUSPENDED = 3 | 67 | D40_DMA_SUSPENDED = 3 |
63 | }; | 68 | }; |
64 | 69 | ||
70 | /* | ||
71 | * These are the registers that has to be saved and later restored | ||
72 | * when the DMA hw is powered off. | ||
73 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | ||
74 | */ | ||
75 | static u32 d40_backup_regs[] = { | ||
76 | D40_DREG_LCPA, | ||
77 | D40_DREG_LCLA, | ||
78 | D40_DREG_PRMSE, | ||
79 | D40_DREG_PRMSO, | ||
80 | D40_DREG_PRMOE, | ||
81 | D40_DREG_PRMOO, | ||
82 | }; | ||
83 | |||
84 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | ||
85 | |||
86 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | ||
87 | static u32 d40_backup_regs_v3[] = { | ||
88 | D40_DREG_PSEG1, | ||
89 | D40_DREG_PSEG2, | ||
90 | D40_DREG_PSEG3, | ||
91 | D40_DREG_PSEG4, | ||
92 | D40_DREG_PCEG1, | ||
93 | D40_DREG_PCEG2, | ||
94 | D40_DREG_PCEG3, | ||
95 | D40_DREG_PCEG4, | ||
96 | D40_DREG_RSEG1, | ||
97 | D40_DREG_RSEG2, | ||
98 | D40_DREG_RSEG3, | ||
99 | D40_DREG_RSEG4, | ||
100 | D40_DREG_RCEG1, | ||
101 | D40_DREG_RCEG2, | ||
102 | D40_DREG_RCEG3, | ||
103 | D40_DREG_RCEG4, | ||
104 | }; | ||
105 | |||
106 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | ||
107 | |||
108 | static u32 d40_backup_regs_chan[] = { | ||
109 | D40_CHAN_REG_SSCFG, | ||
110 | D40_CHAN_REG_SSELT, | ||
111 | D40_CHAN_REG_SSPTR, | ||
112 | D40_CHAN_REG_SSLNK, | ||
113 | D40_CHAN_REG_SDCFG, | ||
114 | D40_CHAN_REG_SDELT, | ||
115 | D40_CHAN_REG_SDPTR, | ||
116 | D40_CHAN_REG_SDLNK, | ||
117 | }; | ||
118 | |||
65 | /** | 119 | /** |
66 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 120 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
67 | * | 121 | * |
@@ -96,7 +150,7 @@ struct d40_lli_pool { | |||
96 | * during a transfer. | 150 | * during a transfer. |
97 | * @node: List entry. | 151 | * @node: List entry. |
98 | * @is_in_client_list: true if the client owns this descriptor. | 152 | * @is_in_client_list: true if the client owns this descriptor. |
99 | * the previous one. | 153 | * @cyclic: true if this is a cyclic job |
100 | * | 154 | * |
101 | * This descriptor is used for both logical and physical transfers. | 155 | * This descriptor is used for both logical and physical transfers. |
102 | */ | 156 | */ |
@@ -143,6 +197,7 @@ struct d40_lcla_pool { | |||
143 | * channels. | 197 | * channels. |
144 | * | 198 | * |
145 | * @lock: A lock protection this entity. | 199 | * @lock: A lock protection this entity. |
200 | * @reserved: True if used by secure world or otherwise. | ||
146 | * @num: The physical channel number of this entity. | 201 | * @num: The physical channel number of this entity. |
147 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 202 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
148 | * this physical channel. Can also be free or physically allocated. | 203 | * this physical channel. Can also be free or physically allocated. |
@@ -152,6 +207,7 @@ struct d40_lcla_pool { | |||
152 | */ | 207 | */ |
153 | struct d40_phy_res { | 208 | struct d40_phy_res { |
154 | spinlock_t lock; | 209 | spinlock_t lock; |
210 | bool reserved; | ||
155 | int num; | 211 | int num; |
156 | u32 allocated_src; | 212 | u32 allocated_src; |
157 | u32 allocated_dst; | 213 | u32 allocated_dst; |
@@ -185,7 +241,6 @@ struct d40_base; | |||
185 | * @src_def_cfg: Default cfg register setting for src. | 241 | * @src_def_cfg: Default cfg register setting for src. |
186 | * @dst_def_cfg: Default cfg register setting for dst. | 242 | * @dst_def_cfg: Default cfg register setting for dst. |
187 | * @log_def: Default logical channel settings. | 243 | * @log_def: Default logical channel settings. |
188 | * @lcla: Space for one dst src pair for logical channel transfers. | ||
189 | * @lcpa: Pointer to dst and src lcpa settings. | 244 | * @lcpa: Pointer to dst and src lcpa settings. |
190 | * @runtime_addr: runtime configured address. | 245 | * @runtime_addr: runtime configured address. |
191 | * @runtime_direction: runtime configured direction. | 246 | * @runtime_direction: runtime configured direction. |
@@ -217,7 +272,7 @@ struct d40_chan { | |||
217 | struct d40_log_lli_full *lcpa; | 272 | struct d40_log_lli_full *lcpa; |
218 | /* Runtime reconfiguration */ | 273 | /* Runtime reconfiguration */ |
219 | dma_addr_t runtime_addr; | 274 | dma_addr_t runtime_addr; |
220 | enum dma_data_direction runtime_direction; | 275 | enum dma_transfer_direction runtime_direction; |
221 | }; | 276 | }; |
222 | 277 | ||
223 | /** | 278 | /** |
@@ -241,6 +296,7 @@ struct d40_chan { | |||
241 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 296 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
242 | * @dma_slave: dma_device channels that can do only do slave transfers. | 297 | * @dma_slave: dma_device channels that can do only do slave transfers. |
243 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 298 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
299 | * @phy_chans: Room for all possible physical channels in system. | ||
244 | * @log_chans: Room for all possible logical channels in system. | 300 | * @log_chans: Room for all possible logical channels in system. |
245 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 301 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
246 | * to log_chans entries. | 302 | * to log_chans entries. |
@@ -248,12 +304,20 @@ struct d40_chan { | |||
248 | * to phy_chans entries. | 304 | * to phy_chans entries. |
249 | * @plat_data: Pointer to provided platform_data which is the driver | 305 | * @plat_data: Pointer to provided platform_data which is the driver |
250 | * configuration. | 306 | * configuration. |
307 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. | ||
251 | * @phy_res: Vector containing all physical channels. | 308 | * @phy_res: Vector containing all physical channels. |
252 | * @lcla_pool: lcla pool settings and data. | 309 | * @lcla_pool: lcla pool settings and data. |
253 | * @lcpa_base: The virtual mapped address of LCPA. | 310 | * @lcpa_base: The virtual mapped address of LCPA. |
254 | * @phy_lcpa: The physical address of the LCPA. | 311 | * @phy_lcpa: The physical address of the LCPA. |
255 | * @lcpa_size: The size of the LCPA area. | 312 | * @lcpa_size: The size of the LCPA area. |
256 | * @desc_slab: cache for descriptors. | 313 | * @desc_slab: cache for descriptors. |
314 | * @reg_val_backup: Here the values of some hardware registers are stored | ||
315 | * before the DMA is powered off. They are restored when the power is back on. | ||
316 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | ||
317 | * later. | ||
318 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | ||
319 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | ||
320 | * @initialized: true if the dma has been initialized | ||
257 | */ | 321 | */ |
258 | struct d40_base { | 322 | struct d40_base { |
259 | spinlock_t interrupt_lock; | 323 | spinlock_t interrupt_lock; |
@@ -275,6 +339,7 @@ struct d40_base { | |||
275 | struct d40_chan **lookup_log_chans; | 339 | struct d40_chan **lookup_log_chans; |
276 | struct d40_chan **lookup_phy_chans; | 340 | struct d40_chan **lookup_phy_chans; |
277 | struct stedma40_platform_data *plat_data; | 341 | struct stedma40_platform_data *plat_data; |
342 | struct regulator *lcpa_regulator; | ||
278 | /* Physical half channels */ | 343 | /* Physical half channels */ |
279 | struct d40_phy_res *phy_res; | 344 | struct d40_phy_res *phy_res; |
280 | struct d40_lcla_pool lcla_pool; | 345 | struct d40_lcla_pool lcla_pool; |
@@ -282,6 +347,11 @@ struct d40_base { | |||
282 | dma_addr_t phy_lcpa; | 347 | dma_addr_t phy_lcpa; |
283 | resource_size_t lcpa_size; | 348 | resource_size_t lcpa_size; |
284 | struct kmem_cache *desc_slab; | 349 | struct kmem_cache *desc_slab; |
350 | u32 reg_val_backup[BACKUP_REGS_SZ]; | ||
351 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | ||
352 | u32 *reg_val_backup_chan; | ||
353 | u16 gcc_pwr_off_mask; | ||
354 | bool initialized; | ||
285 | }; | 355 | }; |
286 | 356 | ||
287 | /** | 357 | /** |
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
479 | struct d40_desc *d; | 549 | struct d40_desc *d; |
480 | struct d40_desc *_d; | 550 | struct d40_desc *_d; |
481 | 551 | ||
482 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 552 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
483 | if (async_tx_test_ack(&d->txd)) { | 553 | if (async_tx_test_ack(&d->txd)) { |
484 | d40_desc_remove(d); | 554 | d40_desc_remove(d); |
485 | desc = d; | 555 | desc = d; |
486 | memset(desc, 0, sizeof(*desc)); | 556 | memset(desc, 0, sizeof(*desc)); |
487 | break; | 557 | break; |
488 | } | 558 | } |
559 | } | ||
489 | } | 560 | } |
490 | 561 | ||
491 | if (!desc) | 562 | if (!desc) |
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
536 | bool cyclic = desc->cyclic; | 607 | bool cyclic = desc->cyclic; |
537 | int curr_lcla = -EINVAL; | 608 | int curr_lcla = -EINVAL; |
538 | int first_lcla = 0; | 609 | int first_lcla = 0; |
610 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; | ||
539 | bool linkback; | 611 | bool linkback; |
540 | 612 | ||
541 | /* | 613 | /* |
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
608 | &lli->src[lli_current], | 680 | &lli->src[lli_current], |
609 | next_lcla, flags); | 681 | next_lcla, flags); |
610 | 682 | ||
611 | dma_sync_single_range_for_device(chan->base->dev, | 683 | /* |
612 | pool->dma_addr, lcla_offset, | 684 | * Cache maintenance is not needed if lcla is |
613 | 2 * sizeof(struct d40_log_lli), | 685 | * mapped in esram |
614 | DMA_TO_DEVICE); | 686 | */ |
615 | 687 | if (!use_esram_lcla) { | |
688 | dma_sync_single_range_for_device(chan->base->dev, | ||
689 | pool->dma_addr, lcla_offset, | ||
690 | 2 * sizeof(struct d40_log_lli), | ||
691 | DMA_TO_DEVICE); | ||
692 | } | ||
616 | curr_lcla = next_lcla; | 693 | curr_lcla = next_lcla; |
617 | 694 | ||
618 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | 695 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
740 | return len; | 817 | return len; |
741 | } | 818 | } |
742 | 819 | ||
743 | /* Support functions for logical channels */ | 820 | |
821 | #ifdef CONFIG_PM | ||
822 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | ||
823 | u32 *regaddr, int num, bool save) | ||
824 | { | ||
825 | int i; | ||
826 | |||
827 | for (i = 0; i < num; i++) { | ||
828 | void __iomem *addr = baseaddr + regaddr[i]; | ||
829 | |||
830 | if (save) | ||
831 | backup[i] = readl_relaxed(addr); | ||
832 | else | ||
833 | writel_relaxed(backup[i], addr); | ||
834 | } | ||
835 | } | ||
836 | |||
837 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | /* Save/Restore channel specific registers */ | ||
842 | for (i = 0; i < base->num_phy_chans; i++) { | ||
843 | void __iomem *addr; | ||
844 | int idx; | ||
845 | |||
846 | if (base->phy_res[i].reserved) | ||
847 | continue; | ||
848 | |||
849 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
850 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
851 | |||
852 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
853 | d40_backup_regs_chan, | ||
854 | ARRAY_SIZE(d40_backup_regs_chan), | ||
855 | save); | ||
856 | } | ||
857 | |||
858 | /* Save/Restore global registers */ | ||
859 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
860 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
861 | save); | ||
862 | |||
863 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
864 | if (base->rev >= 3) | ||
865 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | ||
866 | d40_backup_regs_v3, | ||
867 | ARRAY_SIZE(d40_backup_regs_v3), | ||
868 | save); | ||
869 | } | ||
870 | #else | ||
871 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
872 | { | ||
873 | } | ||
874 | #endif | ||
744 | 875 | ||
745 | static int d40_channel_execute_command(struct d40_chan *d40c, | 876 | static int d40_channel_execute_command(struct d40_chan *d40c, |
746 | enum d40_command command) | 877 | enum d40_command command) |
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c) | |||
973 | /* Set LIDX for lcla */ | 1104 | /* Set LIDX for lcla */ |
974 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); | 1105 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
975 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | 1106 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
1107 | |||
1108 | /* Clear LNK which will be used by d40_chan_has_events() */ | ||
1109 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | ||
1110 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | ||
976 | } | 1111 | } |
977 | } | 1112 | } |
978 | 1113 | ||
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c) | |||
1013 | if (!d40c->busy) | 1148 | if (!d40c->busy) |
1014 | return 0; | 1149 | return 0; |
1015 | 1150 | ||
1151 | pm_runtime_get_sync(d40c->base->dev); | ||
1016 | spin_lock_irqsave(&d40c->lock, flags); | 1152 | spin_lock_irqsave(&d40c->lock, flags); |
1017 | 1153 | ||
1018 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1154 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c) | |||
1025 | D40_DMA_RUN); | 1161 | D40_DMA_RUN); |
1026 | } | 1162 | } |
1027 | } | 1163 | } |
1028 | 1164 | pm_runtime_mark_last_busy(d40c->base->dev); | |
1165 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1029 | spin_unlock_irqrestore(&d40c->lock, flags); | 1166 | spin_unlock_irqrestore(&d40c->lock, flags); |
1030 | return res; | 1167 | return res; |
1031 | } | 1168 | } |
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c) | |||
1039 | return 0; | 1176 | return 0; |
1040 | 1177 | ||
1041 | spin_lock_irqsave(&d40c->lock, flags); | 1178 | spin_lock_irqsave(&d40c->lock, flags); |
1042 | 1179 | pm_runtime_get_sync(d40c->base->dev); | |
1043 | if (d40c->base->rev == 0) | 1180 | if (d40c->base->rev == 0) |
1044 | if (chan_is_logical(d40c)) { | 1181 | if (chan_is_logical(d40c)) { |
1045 | res = d40_channel_execute_command(d40c, | 1182 | res = d40_channel_execute_command(d40c, |
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c) | |||
1057 | } | 1194 | } |
1058 | 1195 | ||
1059 | no_suspend: | 1196 | no_suspend: |
1197 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1198 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1060 | spin_unlock_irqrestore(&d40c->lock, flags); | 1199 | spin_unlock_irqrestore(&d40c->lock, flags); |
1061 | return res; | 1200 | return res; |
1062 | } | 1201 | } |
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1129 | d40d = d40_first_queued(d40c); | 1268 | d40d = d40_first_queued(d40c); |
1130 | 1269 | ||
1131 | if (d40d != NULL) { | 1270 | if (d40d != NULL) { |
1132 | d40c->busy = true; | 1271 | if (!d40c->busy) |
1272 | d40c->busy = true; | ||
1273 | |||
1274 | pm_runtime_get_sync(d40c->base->dev); | ||
1133 | 1275 | ||
1134 | /* Remove from queue */ | 1276 | /* Remove from queue */ |
1135 | d40_desc_remove(d40d); | 1277 | d40_desc_remove(d40d); |
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1190 | 1332 | ||
1191 | if (d40_queue_start(d40c) == NULL) | 1333 | if (d40_queue_start(d40c) == NULL) |
1192 | d40c->busy = false; | 1334 | d40c->busy = false; |
1335 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1336 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1193 | } | 1337 | } |
1194 | 1338 | ||
1195 | d40c->pending_tx++; | 1339 | d40c->pending_tx++; |
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1405 | return res; | 1549 | return res; |
1406 | } | 1550 | } |
1407 | 1551 | ||
1408 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, | 1552 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1409 | int log_event_line, bool is_log) | 1553 | bool is_src, int log_event_line, bool is_log, |
1554 | bool *first_user) | ||
1410 | { | 1555 | { |
1411 | unsigned long flags; | 1556 | unsigned long flags; |
1412 | spin_lock_irqsave(&phy->lock, flags); | 1557 | spin_lock_irqsave(&phy->lock, flags); |
1558 | |||
1559 | *first_user = ((phy->allocated_src | phy->allocated_dst) | ||
1560 | == D40_ALLOC_FREE); | ||
1561 | |||
1413 | if (!is_log) { | 1562 | if (!is_log) { |
1414 | /* Physical interrupts are masked per physical full channel */ | 1563 | /* Physical interrupts are masked per physical full channel */ |
1415 | if (phy->allocated_src == D40_ALLOC_FREE && | 1564 | if (phy->allocated_src == D40_ALLOC_FREE && |
@@ -1490,7 +1639,7 @@ out: | |||
1490 | return is_free; | 1639 | return is_free; |
1491 | } | 1640 | } |
1492 | 1641 | ||
1493 | static int d40_allocate_channel(struct d40_chan *d40c) | 1642 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
1494 | { | 1643 | { |
1495 | int dev_type; | 1644 | int dev_type; |
1496 | int event_group; | 1645 | int event_group; |
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1526 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1675 | for (i = 0; i < d40c->base->num_phy_chans; i++) { |
1527 | 1676 | ||
1528 | if (d40_alloc_mask_set(&phys[i], is_src, | 1677 | if (d40_alloc_mask_set(&phys[i], is_src, |
1529 | 0, is_log)) | 1678 | 0, is_log, |
1679 | first_phy_user)) | ||
1530 | goto found_phy; | 1680 | goto found_phy; |
1531 | } | 1681 | } |
1532 | } else | 1682 | } else |
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1536 | if (d40_alloc_mask_set(&phys[i], | 1686 | if (d40_alloc_mask_set(&phys[i], |
1537 | is_src, | 1687 | is_src, |
1538 | 0, | 1688 | 0, |
1539 | is_log)) | 1689 | is_log, |
1690 | first_phy_user)) | ||
1540 | goto found_phy; | 1691 | goto found_phy; |
1541 | } | 1692 | } |
1542 | } | 1693 | } |
@@ -1552,6 +1703,25 @@ found_phy: | |||
1552 | /* Find logical channel */ | 1703 | /* Find logical channel */ |
1553 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1704 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1554 | int phy_num = j + event_group * 2; | 1705 | int phy_num = j + event_group * 2; |
1706 | |||
1707 | if (d40c->dma_cfg.use_fixed_channel) { | ||
1708 | i = d40c->dma_cfg.phy_channel; | ||
1709 | |||
1710 | if ((i != phy_num) && (i != phy_num + 1)) { | ||
1711 | dev_err(chan2dev(d40c), | ||
1712 | "invalid fixed phy channel %d\n", i); | ||
1713 | return -EINVAL; | ||
1714 | } | ||
1715 | |||
1716 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | ||
1717 | is_log, first_phy_user)) | ||
1718 | goto found_log; | ||
1719 | |||
1720 | dev_err(chan2dev(d40c), | ||
1721 | "could not allocate fixed phy channel %d\n", i); | ||
1722 | return -EINVAL; | ||
1723 | } | ||
1724 | |||
1555 | /* | 1725 | /* |
1556 | * Spread logical channels across all available physical rather | 1726 | * Spread logical channels across all available physical rather |
1557 | * than pack every logical channel at the first available phy | 1727 | * than pack every logical channel at the first available phy |
@@ -1560,13 +1730,15 @@ found_phy: | |||
1560 | if (is_src) { | 1730 | if (is_src) { |
1561 | for (i = phy_num; i < phy_num + 2; i++) { | 1731 | for (i = phy_num; i < phy_num + 2; i++) { |
1562 | if (d40_alloc_mask_set(&phys[i], is_src, | 1732 | if (d40_alloc_mask_set(&phys[i], is_src, |
1563 | event_line, is_log)) | 1733 | event_line, is_log, |
1734 | first_phy_user)) | ||
1564 | goto found_log; | 1735 | goto found_log; |
1565 | } | 1736 | } |
1566 | } else { | 1737 | } else { |
1567 | for (i = phy_num + 1; i >= phy_num; i--) { | 1738 | for (i = phy_num + 1; i >= phy_num; i--) { |
1568 | if (d40_alloc_mask_set(&phys[i], is_src, | 1739 | if (d40_alloc_mask_set(&phys[i], is_src, |
1569 | event_line, is_log)) | 1740 | event_line, is_log, |
1741 | first_phy_user)) | ||
1570 | goto found_log; | 1742 | goto found_log; |
1571 | } | 1743 | } |
1572 | } | 1744 | } |
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1643 | return -EINVAL; | 1815 | return -EINVAL; |
1644 | } | 1816 | } |
1645 | 1817 | ||
1818 | pm_runtime_get_sync(d40c->base->dev); | ||
1646 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1819 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1647 | if (res) { | 1820 | if (res) { |
1648 | chan_err(d40c, "suspend failed\n"); | 1821 | chan_err(d40c, "suspend failed\n"); |
1649 | return res; | 1822 | goto out; |
1650 | } | 1823 | } |
1651 | 1824 | ||
1652 | if (chan_is_logical(d40c)) { | 1825 | if (chan_is_logical(d40c)) { |
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1664 | if (d40_chan_has_events(d40c)) { | 1837 | if (d40_chan_has_events(d40c)) { |
1665 | res = d40_channel_execute_command(d40c, | 1838 | res = d40_channel_execute_command(d40c, |
1666 | D40_DMA_RUN); | 1839 | D40_DMA_RUN); |
1667 | if (res) { | 1840 | if (res) |
1668 | chan_err(d40c, | 1841 | chan_err(d40c, |
1669 | "Executing RUN command\n"); | 1842 | "Executing RUN command\n"); |
1670 | return res; | ||
1671 | } | ||
1672 | } | 1843 | } |
1673 | return 0; | 1844 | goto out; |
1674 | } | 1845 | } |
1675 | } else { | 1846 | } else { |
1676 | (void) d40_alloc_mask_free(phy, is_src, 0); | 1847 | (void) d40_alloc_mask_free(phy, is_src, 0); |
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1680 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1851 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1681 | if (res) { | 1852 | if (res) { |
1682 | chan_err(d40c, "Failed to stop channel\n"); | 1853 | chan_err(d40c, "Failed to stop channel\n"); |
1683 | return res; | 1854 | goto out; |
1684 | } | 1855 | } |
1856 | |||
1857 | if (d40c->busy) { | ||
1858 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1859 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1860 | } | ||
1861 | |||
1862 | d40c->busy = false; | ||
1685 | d40c->phy_chan = NULL; | 1863 | d40c->phy_chan = NULL; |
1686 | d40c->configured = false; | 1864 | d40c->configured = false; |
1687 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1865 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1866 | out: | ||
1688 | 1867 | ||
1689 | return 0; | 1868 | pm_runtime_mark_last_busy(d40c->base->dev); |
1869 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1870 | return res; | ||
1690 | } | 1871 | } |
1691 | 1872 | ||
1692 | static bool d40_is_paused(struct d40_chan *d40c) | 1873 | static bool d40_is_paused(struct d40_chan *d40c) |
@@ -1855,7 +2036,7 @@ err: | |||
1855 | } | 2036 | } |
1856 | 2037 | ||
1857 | static dma_addr_t | 2038 | static dma_addr_t |
1858 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | 2039 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) |
1859 | { | 2040 | { |
1860 | struct stedma40_platform_data *plat = chan->base->plat_data; | 2041 | struct stedma40_platform_data *plat = chan->base->plat_data; |
1861 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 2042 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1864 | if (chan->runtime_addr) | 2045 | if (chan->runtime_addr) |
1865 | return chan->runtime_addr; | 2046 | return chan->runtime_addr; |
1866 | 2047 | ||
1867 | if (direction == DMA_FROM_DEVICE) | 2048 | if (direction == DMA_DEV_TO_MEM) |
1868 | addr = plat->dev_rx[cfg->src_dev_type]; | 2049 | addr = plat->dev_rx[cfg->src_dev_type]; |
1869 | else if (direction == DMA_TO_DEVICE) | 2050 | else if (direction == DMA_MEM_TO_DEV) |
1870 | addr = plat->dev_tx[cfg->dst_dev_type]; | 2051 | addr = plat->dev_tx[cfg->dst_dev_type]; |
1871 | 2052 | ||
1872 | return addr; | 2053 | return addr; |
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1875 | static struct dma_async_tx_descriptor * | 2056 | static struct dma_async_tx_descriptor * |
1876 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 2057 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
1877 | struct scatterlist *sg_dst, unsigned int sg_len, | 2058 | struct scatterlist *sg_dst, unsigned int sg_len, |
1878 | enum dma_data_direction direction, unsigned long dma_flags) | 2059 | enum dma_transfer_direction direction, unsigned long dma_flags) |
1879 | { | 2060 | { |
1880 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | 2061 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
1881 | dma_addr_t src_dev_addr = 0; | 2062 | dma_addr_t src_dev_addr = 0; |
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
1902 | if (direction != DMA_NONE) { | 2083 | if (direction != DMA_NONE) { |
1903 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 2084 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
1904 | 2085 | ||
1905 | if (direction == DMA_FROM_DEVICE) | 2086 | if (direction == DMA_DEV_TO_MEM) |
1906 | src_dev_addr = dev_addr; | 2087 | src_dev_addr = dev_addr; |
1907 | else if (direction == DMA_TO_DEVICE) | 2088 | else if (direction == DMA_MEM_TO_DEV) |
1908 | dst_dev_addr = dev_addr; | 2089 | dst_dev_addr = dev_addr; |
1909 | } | 2090 | } |
1910 | 2091 | ||
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2011 | goto fail; | 2192 | goto fail; |
2012 | } | 2193 | } |
2013 | } | 2194 | } |
2014 | is_free_phy = (d40c->phy_chan == NULL); | ||
2015 | 2195 | ||
2016 | err = d40_allocate_channel(d40c); | 2196 | err = d40_allocate_channel(d40c, &is_free_phy); |
2017 | if (err) { | 2197 | if (err) { |
2018 | chan_err(d40c, "Failed to allocate channel\n"); | 2198 | chan_err(d40c, "Failed to allocate channel\n"); |
2199 | d40c->configured = false; | ||
2019 | goto fail; | 2200 | goto fail; |
2020 | } | 2201 | } |
2021 | 2202 | ||
2203 | pm_runtime_get_sync(d40c->base->dev); | ||
2022 | /* Fill in basic CFG register values */ | 2204 | /* Fill in basic CFG register values */ |
2023 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 2205 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2024 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | 2206 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2038 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2220 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2039 | } | 2221 | } |
2040 | 2222 | ||
2223 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | ||
2224 | chan_is_logical(d40c) ? "logical" : "physical", | ||
2225 | d40c->phy_chan->num, | ||
2226 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | ||
2227 | |||
2228 | |||
2041 | /* | 2229 | /* |
2042 | * Only write channel configuration to the DMA if the physical | 2230 | * Only write channel configuration to the DMA if the physical |
2043 | * resource is free. In case of multiple logical channels | 2231 | * resource is free. In case of multiple logical channels |
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2046 | if (is_free_phy) | 2234 | if (is_free_phy) |
2047 | d40_config_write(d40c); | 2235 | d40_config_write(d40c); |
2048 | fail: | 2236 | fail: |
2237 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2238 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2049 | spin_unlock_irqrestore(&d40c->lock, flags); | 2239 | spin_unlock_irqrestore(&d40c->lock, flags); |
2050 | return err; | 2240 | return err; |
2051 | } | 2241 | } |
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
2108 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2298 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
2109 | struct scatterlist *sgl, | 2299 | struct scatterlist *sgl, |
2110 | unsigned int sg_len, | 2300 | unsigned int sg_len, |
2111 | enum dma_data_direction direction, | 2301 | enum dma_transfer_direction direction, |
2112 | unsigned long dma_flags) | 2302 | unsigned long dma_flags) |
2113 | { | 2303 | { |
2114 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) | 2304 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) |
2115 | return NULL; | 2305 | return NULL; |
2116 | 2306 | ||
2117 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2307 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2120 | static struct dma_async_tx_descriptor * | 2310 | static struct dma_async_tx_descriptor * |
2121 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2311 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2122 | size_t buf_len, size_t period_len, | 2312 | size_t buf_len, size_t period_len, |
2123 | enum dma_data_direction direction) | 2313 | enum dma_transfer_direction direction) |
2124 | { | 2314 | { |
2125 | unsigned int periods = buf_len / period_len; | 2315 | unsigned int periods = buf_len / period_len; |
2126 | struct dma_async_tx_descriptor *txd; | 2316 | struct dma_async_tx_descriptor *txd; |
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2269 | dst_addr_width = config->dst_addr_width; | 2459 | dst_addr_width = config->dst_addr_width; |
2270 | dst_maxburst = config->dst_maxburst; | 2460 | dst_maxburst = config->dst_maxburst; |
2271 | 2461 | ||
2272 | if (config->direction == DMA_FROM_DEVICE) { | 2462 | if (config->direction == DMA_DEV_TO_MEM) { |
2273 | dma_addr_t dev_addr_rx = | 2463 | dma_addr_t dev_addr_rx = |
2274 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | 2464 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; |
2275 | 2465 | ||
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2292 | if (dst_maxburst == 0) | 2482 | if (dst_maxburst == 0) |
2293 | dst_maxburst = src_maxburst; | 2483 | dst_maxburst = src_maxburst; |
2294 | 2484 | ||
2295 | } else if (config->direction == DMA_TO_DEVICE) { | 2485 | } else if (config->direction == DMA_MEM_TO_DEV) { |
2296 | dma_addr_t dev_addr_tx = | 2486 | dma_addr_t dev_addr_tx = |
2297 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | 2487 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; |
2298 | 2488 | ||
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2357 | "configured channel %s for %s, data width %d/%d, " | 2547 | "configured channel %s for %s, data width %d/%d, " |
2358 | "maxburst %d/%d elements, LE, no flow control\n", | 2548 | "maxburst %d/%d elements, LE, no flow control\n", |
2359 | dma_chan_name(chan), | 2549 | dma_chan_name(chan), |
2360 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 2550 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
2361 | src_addr_width, dst_addr_width, | 2551 | src_addr_width, dst_addr_width, |
2362 | src_maxburst, dst_maxburst); | 2552 | src_maxburst, dst_maxburst); |
2363 | 2553 | ||
@@ -2519,6 +2709,72 @@ failure1: | |||
2519 | return err; | 2709 | return err; |
2520 | } | 2710 | } |
2521 | 2711 | ||
2712 | /* Suspend resume functionality */ | ||
2713 | #ifdef CONFIG_PM | ||
2714 | static int dma40_pm_suspend(struct device *dev) | ||
2715 | { | ||
2716 | struct platform_device *pdev = to_platform_device(dev); | ||
2717 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2718 | int ret = 0; | ||
2719 | if (!pm_runtime_suspended(dev)) | ||
2720 | return -EBUSY; | ||
2721 | |||
2722 | if (base->lcpa_regulator) | ||
2723 | ret = regulator_disable(base->lcpa_regulator); | ||
2724 | return ret; | ||
2725 | } | ||
2726 | |||
2727 | static int dma40_runtime_suspend(struct device *dev) | ||
2728 | { | ||
2729 | struct platform_device *pdev = to_platform_device(dev); | ||
2730 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2731 | |||
2732 | d40_save_restore_registers(base, true); | ||
2733 | |||
2734 | /* Don't disable/enable clocks for v1 due to HW bugs */ | ||
2735 | if (base->rev != 1) | ||
2736 | writel_relaxed(base->gcc_pwr_off_mask, | ||
2737 | base->virtbase + D40_DREG_GCC); | ||
2738 | |||
2739 | return 0; | ||
2740 | } | ||
2741 | |||
2742 | static int dma40_runtime_resume(struct device *dev) | ||
2743 | { | ||
2744 | struct platform_device *pdev = to_platform_device(dev); | ||
2745 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2746 | |||
2747 | if (base->initialized) | ||
2748 | d40_save_restore_registers(base, false); | ||
2749 | |||
2750 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | ||
2751 | base->virtbase + D40_DREG_GCC); | ||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2755 | static int dma40_resume(struct device *dev) | ||
2756 | { | ||
2757 | struct platform_device *pdev = to_platform_device(dev); | ||
2758 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2759 | int ret = 0; | ||
2760 | |||
2761 | if (base->lcpa_regulator) | ||
2762 | ret = regulator_enable(base->lcpa_regulator); | ||
2763 | |||
2764 | return ret; | ||
2765 | } | ||
2766 | |||
2767 | static const struct dev_pm_ops dma40_pm_ops = { | ||
2768 | .suspend = dma40_pm_suspend, | ||
2769 | .runtime_suspend = dma40_runtime_suspend, | ||
2770 | .runtime_resume = dma40_runtime_resume, | ||
2771 | .resume = dma40_resume, | ||
2772 | }; | ||
2773 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
2774 | #else | ||
2775 | #define DMA40_PM_OPS NULL | ||
2776 | #endif | ||
2777 | |||
2522 | /* Initialization functions. */ | 2778 | /* Initialization functions. */ |
2523 | 2779 | ||
2524 | static int __init d40_phy_res_init(struct d40_base *base) | 2780 | static int __init d40_phy_res_init(struct d40_base *base) |
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2527 | int num_phy_chans_avail = 0; | 2783 | int num_phy_chans_avail = 0; |
2528 | u32 val[2]; | 2784 | u32 val[2]; |
2529 | int odd_even_bit = -2; | 2785 | int odd_even_bit = -2; |
2786 | int gcc = D40_DREG_GCC_ENA; | ||
2530 | 2787 | ||
2531 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2788 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2532 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2789 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2538 | /* Mark security only channels as occupied */ | 2795 | /* Mark security only channels as occupied */ |
2539 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2796 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2540 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2797 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2798 | base->phy_res[i].reserved = true; | ||
2799 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2800 | D40_DREG_GCC_SRC); | ||
2801 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2802 | D40_DREG_GCC_DST); | ||
2803 | |||
2804 | |||
2541 | } else { | 2805 | } else { |
2542 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2806 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2543 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2807 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2808 | base->phy_res[i].reserved = false; | ||
2544 | num_phy_chans_avail++; | 2809 | num_phy_chans_avail++; |
2545 | } | 2810 | } |
2546 | spin_lock_init(&base->phy_res[i].lock); | 2811 | spin_lock_init(&base->phy_res[i].lock); |
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2552 | 2817 | ||
2553 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 2818 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2554 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 2819 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
2820 | base->phy_res[chan].reserved = true; | ||
2821 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2822 | D40_DREG_GCC_SRC); | ||
2823 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2824 | D40_DREG_GCC_DST); | ||
2555 | num_phy_chans_avail--; | 2825 | num_phy_chans_avail--; |
2556 | } | 2826 | } |
2557 | 2827 | ||
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2572 | val[0] = val[0] >> 2; | 2842 | val[0] = val[0] >> 2; |
2573 | } | 2843 | } |
2574 | 2844 | ||
2845 | /* | ||
2846 | * To keep things simple, Enable all clocks initially. | ||
2847 | * The clocks will get managed later post channel allocation. | ||
2848 | * The clocks for the event lines on which reserved channels exists | ||
2849 | * are not managed here. | ||
2850 | */ | ||
2851 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | ||
2852 | base->gcc_pwr_off_mask = gcc; | ||
2853 | |||
2575 | return num_phy_chans_avail; | 2854 | return num_phy_chans_avail; |
2576 | } | 2855 | } |
2577 | 2856 | ||
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2699 | goto failure; | 2978 | goto failure; |
2700 | } | 2979 | } |
2701 | 2980 | ||
2702 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * | 2981 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
2703 | sizeof(struct d40_desc *) * | 2982 | sizeof(d40_backup_regs_chan), |
2704 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
2705 | GFP_KERNEL); | 2983 | GFP_KERNEL); |
2984 | if (!base->reg_val_backup_chan) | ||
2985 | goto failure; | ||
2986 | |||
2987 | base->lcla_pool.alloc_map = | ||
2988 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | ||
2989 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | ||
2706 | if (!base->lcla_pool.alloc_map) | 2990 | if (!base->lcla_pool.alloc_map) |
2707 | goto failure; | 2991 | goto failure; |
2708 | 2992 | ||
@@ -2741,9 +3025,9 @@ failure: | |||
2741 | static void __init d40_hw_init(struct d40_base *base) | 3025 | static void __init d40_hw_init(struct d40_base *base) |
2742 | { | 3026 | { |
2743 | 3027 | ||
2744 | static const struct d40_reg_val dma_init_reg[] = { | 3028 | static struct d40_reg_val dma_init_reg[] = { |
2745 | /* Clock every part of the DMA block from start */ | 3029 | /* Clock every part of the DMA block from start */ |
2746 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, | 3030 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
2747 | 3031 | ||
2748 | /* Interrupts on all logical channels */ | 3032 | /* Interrupts on all logical channels */ |
2749 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 3033 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2943 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); | 3227 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
2944 | goto failure; | 3228 | goto failure; |
2945 | } | 3229 | } |
3230 | /* If lcla has to be located in ESRAM we don't need to allocate */ | ||
3231 | if (base->plat_data->use_esram_lcla) { | ||
3232 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
3233 | "lcla_esram"); | ||
3234 | if (!res) { | ||
3235 | ret = -ENOENT; | ||
3236 | d40_err(&pdev->dev, | ||
3237 | "No \"lcla_esram\" memory resource\n"); | ||
3238 | goto failure; | ||
3239 | } | ||
3240 | base->lcla_pool.base = ioremap(res->start, | ||
3241 | resource_size(res)); | ||
3242 | if (!base->lcla_pool.base) { | ||
3243 | ret = -ENOMEM; | ||
3244 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | ||
3245 | goto failure; | ||
3246 | } | ||
3247 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
2946 | 3248 | ||
2947 | ret = d40_lcla_allocate(base); | 3249 | } else { |
2948 | if (ret) { | 3250 | ret = d40_lcla_allocate(base); |
2949 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | 3251 | if (ret) { |
2950 | goto failure; | 3252 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
3253 | goto failure; | ||
3254 | } | ||
2951 | } | 3255 | } |
2952 | 3256 | ||
2953 | spin_lock_init(&base->lcla_pool.lock); | 3257 | spin_lock_init(&base->lcla_pool.lock); |
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2960 | goto failure; | 3264 | goto failure; |
2961 | } | 3265 | } |
2962 | 3266 | ||
3267 | pm_runtime_irq_safe(base->dev); | ||
3268 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3269 | pm_runtime_use_autosuspend(base->dev); | ||
3270 | pm_runtime_enable(base->dev); | ||
3271 | pm_runtime_resume(base->dev); | ||
3272 | |||
3273 | if (base->plat_data->use_esram_lcla) { | ||
3274 | |||
3275 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | ||
3276 | if (IS_ERR(base->lcpa_regulator)) { | ||
3277 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | ||
3278 | base->lcpa_regulator = NULL; | ||
3279 | goto failure; | ||
3280 | } | ||
3281 | |||
3282 | ret = regulator_enable(base->lcpa_regulator); | ||
3283 | if (ret) { | ||
3284 | d40_err(&pdev->dev, | ||
3285 | "Failed to enable lcpa_regulator\n"); | ||
3286 | regulator_put(base->lcpa_regulator); | ||
3287 | base->lcpa_regulator = NULL; | ||
3288 | goto failure; | ||
3289 | } | ||
3290 | } | ||
3291 | |||
3292 | base->initialized = true; | ||
2963 | err = d40_dmaengine_init(base, num_reserved_chans); | 3293 | err = d40_dmaengine_init(base, num_reserved_chans); |
2964 | if (err) | 3294 | if (err) |
2965 | goto failure; | 3295 | goto failure; |
@@ -2976,6 +3306,11 @@ failure: | |||
2976 | if (base->virtbase) | 3306 | if (base->virtbase) |
2977 | iounmap(base->virtbase); | 3307 | iounmap(base->virtbase); |
2978 | 3308 | ||
3309 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | ||
3310 | iounmap(base->lcla_pool.base); | ||
3311 | base->lcla_pool.base = NULL; | ||
3312 | } | ||
3313 | |||
2979 | if (base->lcla_pool.dma_addr) | 3314 | if (base->lcla_pool.dma_addr) |
2980 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 3315 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
2981 | SZ_1K * base->num_phy_chans, | 3316 | SZ_1K * base->num_phy_chans, |
@@ -2998,6 +3333,11 @@ failure: | |||
2998 | clk_put(base->clk); | 3333 | clk_put(base->clk); |
2999 | } | 3334 | } |
3000 | 3335 | ||
3336 | if (base->lcpa_regulator) { | ||
3337 | regulator_disable(base->lcpa_regulator); | ||
3338 | regulator_put(base->lcpa_regulator); | ||
3339 | } | ||
3340 | |||
3001 | kfree(base->lcla_pool.alloc_map); | 3341 | kfree(base->lcla_pool.alloc_map); |
3002 | kfree(base->lookup_log_chans); | 3342 | kfree(base->lookup_log_chans); |
3003 | kfree(base->lookup_phy_chans); | 3343 | kfree(base->lookup_phy_chans); |
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = { | |||
3013 | .driver = { | 3353 | .driver = { |
3014 | .owner = THIS_MODULE, | 3354 | .owner = THIS_MODULE, |
3015 | .name = D40_NAME, | 3355 | .name = D40_NAME, |
3356 | .pm = DMA40_PM_OPS, | ||
3016 | }, | 3357 | }, |
3017 | }; | 3358 | }; |
3018 | 3359 | ||