diff options
author | Narayanan G <narayanan.gopalakrishnan@stericsson.com> | 2011-11-17 06:56:41 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2011-11-21 23:16:06 -0500 |
commit | 7fb3e75e1833743d5faf3adbae46b63f503c6fdf (patch) | |
tree | 32ae4d6b39e5a552e9727ff15a891e4ba2b980fb /drivers/dma | |
parent | ca21a146a45a179a2a7bc86d938a2fbf571a7510 (diff) |
dmaengine/ste_dma40: support pm in dma40
This patch adds power management support to the dma40
driver. The DMA registers are backed up and restored,
during suspend/resume. Also flags to track the dma usage
have been introduced to facilitate this. Patch also includes
few other minor changes, related to formatting, comments.
Signed-off-by: Narayanan G <narayanan.gopalakrishnan@stericsson.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ste_dma40.c | 267 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 11 |
2 files changed, 259 insertions, 19 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 15b311d54b74..c2cf8cfaf7d4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/pm.h> | ||
18 | #include <linux/pm_runtime.h> | ||
17 | #include <linux/err.h> | 19 | #include <linux/err.h> |
18 | #include <linux/amba/bus.h> | 20 | #include <linux/amba/bus.h> |
19 | 21 | ||
@@ -32,6 +34,9 @@ | |||
32 | /* Maximum iterations taken before giving up suspending a channel */ | 34 | /* Maximum iterations taken before giving up suspending a channel */ |
33 | #define D40_SUSPEND_MAX_IT 500 | 35 | #define D40_SUSPEND_MAX_IT 500 |
34 | 36 | ||
37 | /* Milliseconds */ | ||
38 | #define DMA40_AUTOSUSPEND_DELAY 100 | ||
39 | |||
35 | /* Hardware requirement on LCLA alignment */ | 40 | /* Hardware requirement on LCLA alignment */ |
36 | #define LCLA_ALIGNMENT 0x40000 | 41 | #define LCLA_ALIGNMENT 0x40000 |
37 | 42 | ||
@@ -62,6 +67,55 @@ enum d40_command { | |||
62 | D40_DMA_SUSPENDED = 3 | 67 | D40_DMA_SUSPENDED = 3 |
63 | }; | 68 | }; |
64 | 69 | ||
70 | /* | ||
71 | * These are the registers that has to be saved and later restored | ||
72 | * when the DMA hw is powered off. | ||
73 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | ||
74 | */ | ||
75 | static u32 d40_backup_regs[] = { | ||
76 | D40_DREG_LCPA, | ||
77 | D40_DREG_LCLA, | ||
78 | D40_DREG_PRMSE, | ||
79 | D40_DREG_PRMSO, | ||
80 | D40_DREG_PRMOE, | ||
81 | D40_DREG_PRMOO, | ||
82 | }; | ||
83 | |||
84 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | ||
85 | |||
86 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | ||
87 | static u32 d40_backup_regs_v3[] = { | ||
88 | D40_DREG_PSEG1, | ||
89 | D40_DREG_PSEG2, | ||
90 | D40_DREG_PSEG3, | ||
91 | D40_DREG_PSEG4, | ||
92 | D40_DREG_PCEG1, | ||
93 | D40_DREG_PCEG2, | ||
94 | D40_DREG_PCEG3, | ||
95 | D40_DREG_PCEG4, | ||
96 | D40_DREG_RSEG1, | ||
97 | D40_DREG_RSEG2, | ||
98 | D40_DREG_RSEG3, | ||
99 | D40_DREG_RSEG4, | ||
100 | D40_DREG_RCEG1, | ||
101 | D40_DREG_RCEG2, | ||
102 | D40_DREG_RCEG3, | ||
103 | D40_DREG_RCEG4, | ||
104 | }; | ||
105 | |||
106 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | ||
107 | |||
108 | static u32 d40_backup_regs_chan[] = { | ||
109 | D40_CHAN_REG_SSCFG, | ||
110 | D40_CHAN_REG_SSELT, | ||
111 | D40_CHAN_REG_SSPTR, | ||
112 | D40_CHAN_REG_SSLNK, | ||
113 | D40_CHAN_REG_SDCFG, | ||
114 | D40_CHAN_REG_SDELT, | ||
115 | D40_CHAN_REG_SDPTR, | ||
116 | D40_CHAN_REG_SDLNK, | ||
117 | }; | ||
118 | |||
65 | /** | 119 | /** |
66 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 120 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
67 | * | 121 | * |
@@ -96,7 +150,7 @@ struct d40_lli_pool { | |||
96 | * during a transfer. | 150 | * during a transfer. |
97 | * @node: List entry. | 151 | * @node: List entry. |
98 | * @is_in_client_list: true if the client owns this descriptor. | 152 | * @is_in_client_list: true if the client owns this descriptor. |
99 | * the previous one. | 153 | * @cyclic: true if this is a cyclic job |
100 | * | 154 | * |
101 | * This descriptor is used for both logical and physical transfers. | 155 | * This descriptor is used for both logical and physical transfers. |
102 | */ | 156 | */ |
@@ -143,6 +197,7 @@ struct d40_lcla_pool { | |||
143 | * channels. | 197 | * channels. |
144 | * | 198 | * |
145 | * @lock: A lock protection this entity. | 199 | * @lock: A lock protection this entity. |
200 | * @reserved: True if used by secure world or otherwise. | ||
146 | * @num: The physical channel number of this entity. | 201 | * @num: The physical channel number of this entity. |
147 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 202 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
148 | * this physical channel. Can also be free or physically allocated. | 203 | * this physical channel. Can also be free or physically allocated. |
@@ -152,6 +207,7 @@ struct d40_lcla_pool { | |||
152 | */ | 207 | */ |
153 | struct d40_phy_res { | 208 | struct d40_phy_res { |
154 | spinlock_t lock; | 209 | spinlock_t lock; |
210 | bool reserved; | ||
155 | int num; | 211 | int num; |
156 | u32 allocated_src; | 212 | u32 allocated_src; |
157 | u32 allocated_dst; | 213 | u32 allocated_dst; |
@@ -185,7 +241,6 @@ struct d40_base; | |||
185 | * @src_def_cfg: Default cfg register setting for src. | 241 | * @src_def_cfg: Default cfg register setting for src. |
186 | * @dst_def_cfg: Default cfg register setting for dst. | 242 | * @dst_def_cfg: Default cfg register setting for dst. |
187 | * @log_def: Default logical channel settings. | 243 | * @log_def: Default logical channel settings. |
188 | * @lcla: Space for one dst src pair for logical channel transfers. | ||
189 | * @lcpa: Pointer to dst and src lcpa settings. | 244 | * @lcpa: Pointer to dst and src lcpa settings. |
190 | * @runtime_addr: runtime configured address. | 245 | * @runtime_addr: runtime configured address. |
191 | * @runtime_direction: runtime configured direction. | 246 | * @runtime_direction: runtime configured direction. |
@@ -241,6 +296,7 @@ struct d40_chan { | |||
241 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 296 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
242 | * @dma_slave: dma_device channels that can do only do slave transfers. | 297 | * @dma_slave: dma_device channels that can do only do slave transfers. |
243 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 298 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
299 | * @phy_chans: Room for all possible physical channels in system. | ||
244 | * @log_chans: Room for all possible logical channels in system. | 300 | * @log_chans: Room for all possible logical channels in system. |
245 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 301 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
246 | * to log_chans entries. | 302 | * to log_chans entries. |
@@ -254,6 +310,13 @@ struct d40_chan { | |||
254 | * @phy_lcpa: The physical address of the LCPA. | 310 | * @phy_lcpa: The physical address of the LCPA. |
255 | * @lcpa_size: The size of the LCPA area. | 311 | * @lcpa_size: The size of the LCPA area. |
256 | * @desc_slab: cache for descriptors. | 312 | * @desc_slab: cache for descriptors. |
313 | * @reg_val_backup: Here the values of some hardware registers are stored | ||
314 | * before the DMA is powered off. They are restored when the power is back on. | ||
315 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | ||
316 | * later. | ||
317 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | ||
318 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | ||
319 | * @initialized: true if the dma has been initialized | ||
257 | */ | 320 | */ |
258 | struct d40_base { | 321 | struct d40_base { |
259 | spinlock_t interrupt_lock; | 322 | spinlock_t interrupt_lock; |
@@ -282,6 +345,11 @@ struct d40_base { | |||
282 | dma_addr_t phy_lcpa; | 345 | dma_addr_t phy_lcpa; |
283 | resource_size_t lcpa_size; | 346 | resource_size_t lcpa_size; |
284 | struct kmem_cache *desc_slab; | 347 | struct kmem_cache *desc_slab; |
348 | u32 reg_val_backup[BACKUP_REGS_SZ]; | ||
349 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | ||
350 | u32 *reg_val_backup_chan; | ||
351 | u16 gcc_pwr_off_mask; | ||
352 | bool initialized; | ||
285 | }; | 353 | }; |
286 | 354 | ||
287 | /** | 355 | /** |
@@ -479,13 +547,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
479 | struct d40_desc *d; | 547 | struct d40_desc *d; |
480 | struct d40_desc *_d; | 548 | struct d40_desc *_d; |
481 | 549 | ||
482 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 550 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
483 | if (async_tx_test_ack(&d->txd)) { | 551 | if (async_tx_test_ack(&d->txd)) { |
484 | d40_desc_remove(d); | 552 | d40_desc_remove(d); |
485 | desc = d; | 553 | desc = d; |
486 | memset(desc, 0, sizeof(*desc)); | 554 | memset(desc, 0, sizeof(*desc)); |
487 | break; | 555 | break; |
488 | } | 556 | } |
557 | } | ||
489 | } | 558 | } |
490 | 559 | ||
491 | if (!desc) | 560 | if (!desc) |
@@ -740,7 +809,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
740 | return len; | 809 | return len; |
741 | } | 810 | } |
742 | 811 | ||
743 | /* Support functions for logical channels */ | 812 | |
813 | #ifdef CONFIG_PM | ||
814 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | ||
815 | u32 *regaddr, int num, bool save) | ||
816 | { | ||
817 | int i; | ||
818 | |||
819 | for (i = 0; i < num; i++) { | ||
820 | void __iomem *addr = baseaddr + regaddr[i]; | ||
821 | |||
822 | if (save) | ||
823 | backup[i] = readl_relaxed(addr); | ||
824 | else | ||
825 | writel_relaxed(backup[i], addr); | ||
826 | } | ||
827 | } | ||
828 | |||
829 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
830 | { | ||
831 | int i; | ||
832 | |||
833 | /* Save/Restore channel specific registers */ | ||
834 | for (i = 0; i < base->num_phy_chans; i++) { | ||
835 | void __iomem *addr; | ||
836 | int idx; | ||
837 | |||
838 | if (base->phy_res[i].reserved) | ||
839 | continue; | ||
840 | |||
841 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
842 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
843 | |||
844 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
845 | d40_backup_regs_chan, | ||
846 | ARRAY_SIZE(d40_backup_regs_chan), | ||
847 | save); | ||
848 | } | ||
849 | |||
850 | /* Save/Restore global registers */ | ||
851 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
852 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
853 | save); | ||
854 | |||
855 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
856 | if (base->rev >= 3) | ||
857 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | ||
858 | d40_backup_regs_v3, | ||
859 | ARRAY_SIZE(d40_backup_regs_v3), | ||
860 | save); | ||
861 | } | ||
862 | #else | ||
863 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
864 | { | ||
865 | } | ||
866 | #endif | ||
744 | 867 | ||
745 | static int d40_channel_execute_command(struct d40_chan *d40c, | 868 | static int d40_channel_execute_command(struct d40_chan *d40c, |
746 | enum d40_command command) | 869 | enum d40_command command) |
@@ -1013,6 +1136,7 @@ static int d40_pause(struct d40_chan *d40c) | |||
1013 | if (!d40c->busy) | 1136 | if (!d40c->busy) |
1014 | return 0; | 1137 | return 0; |
1015 | 1138 | ||
1139 | pm_runtime_get_sync(d40c->base->dev); | ||
1016 | spin_lock_irqsave(&d40c->lock, flags); | 1140 | spin_lock_irqsave(&d40c->lock, flags); |
1017 | 1141 | ||
1018 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1142 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
@@ -1025,7 +1149,8 @@ static int d40_pause(struct d40_chan *d40c) | |||
1025 | D40_DMA_RUN); | 1149 | D40_DMA_RUN); |
1026 | } | 1150 | } |
1027 | } | 1151 | } |
1028 | 1152 | pm_runtime_mark_last_busy(d40c->base->dev); | |
1153 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1029 | spin_unlock_irqrestore(&d40c->lock, flags); | 1154 | spin_unlock_irqrestore(&d40c->lock, flags); |
1030 | return res; | 1155 | return res; |
1031 | } | 1156 | } |
@@ -1039,7 +1164,7 @@ static int d40_resume(struct d40_chan *d40c) | |||
1039 | return 0; | 1164 | return 0; |
1040 | 1165 | ||
1041 | spin_lock_irqsave(&d40c->lock, flags); | 1166 | spin_lock_irqsave(&d40c->lock, flags); |
1042 | 1167 | pm_runtime_get_sync(d40c->base->dev); | |
1043 | if (d40c->base->rev == 0) | 1168 | if (d40c->base->rev == 0) |
1044 | if (chan_is_logical(d40c)) { | 1169 | if (chan_is_logical(d40c)) { |
1045 | res = d40_channel_execute_command(d40c, | 1170 | res = d40_channel_execute_command(d40c, |
@@ -1057,6 +1182,8 @@ static int d40_resume(struct d40_chan *d40c) | |||
1057 | } | 1182 | } |
1058 | 1183 | ||
1059 | no_suspend: | 1184 | no_suspend: |
1185 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1186 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1060 | spin_unlock_irqrestore(&d40c->lock, flags); | 1187 | spin_unlock_irqrestore(&d40c->lock, flags); |
1061 | return res; | 1188 | return res; |
1062 | } | 1189 | } |
@@ -1129,7 +1256,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1129 | d40d = d40_first_queued(d40c); | 1256 | d40d = d40_first_queued(d40c); |
1130 | 1257 | ||
1131 | if (d40d != NULL) { | 1258 | if (d40d != NULL) { |
1132 | d40c->busy = true; | 1259 | if (!d40c->busy) |
1260 | d40c->busy = true; | ||
1261 | |||
1262 | pm_runtime_get_sync(d40c->base->dev); | ||
1133 | 1263 | ||
1134 | /* Remove from queue */ | 1264 | /* Remove from queue */ |
1135 | d40_desc_remove(d40d); | 1265 | d40_desc_remove(d40d); |
@@ -1190,6 +1320,8 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1190 | 1320 | ||
1191 | if (d40_queue_start(d40c) == NULL) | 1321 | if (d40_queue_start(d40c) == NULL) |
1192 | d40c->busy = false; | 1322 | d40c->busy = false; |
1323 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1324 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1193 | } | 1325 | } |
1194 | 1326 | ||
1195 | d40c->pending_tx++; | 1327 | d40c->pending_tx++; |
@@ -1643,10 +1775,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1643 | return -EINVAL; | 1775 | return -EINVAL; |
1644 | } | 1776 | } |
1645 | 1777 | ||
1778 | pm_runtime_get_sync(d40c->base->dev); | ||
1646 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1779 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1647 | if (res) { | 1780 | if (res) { |
1648 | chan_err(d40c, "suspend failed\n"); | 1781 | chan_err(d40c, "suspend failed\n"); |
1649 | return res; | 1782 | goto out; |
1650 | } | 1783 | } |
1651 | 1784 | ||
1652 | if (chan_is_logical(d40c)) { | 1785 | if (chan_is_logical(d40c)) { |
@@ -1664,13 +1797,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1664 | if (d40_chan_has_events(d40c)) { | 1797 | if (d40_chan_has_events(d40c)) { |
1665 | res = d40_channel_execute_command(d40c, | 1798 | res = d40_channel_execute_command(d40c, |
1666 | D40_DMA_RUN); | 1799 | D40_DMA_RUN); |
1667 | if (res) { | 1800 | if (res) |
1668 | chan_err(d40c, | 1801 | chan_err(d40c, |
1669 | "Executing RUN command\n"); | 1802 | "Executing RUN command\n"); |
1670 | return res; | ||
1671 | } | ||
1672 | } | 1803 | } |
1673 | return 0; | 1804 | goto out; |
1674 | } | 1805 | } |
1675 | } else { | 1806 | } else { |
1676 | (void) d40_alloc_mask_free(phy, is_src, 0); | 1807 | (void) d40_alloc_mask_free(phy, is_src, 0); |
@@ -1680,13 +1811,23 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1680 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1811 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1681 | if (res) { | 1812 | if (res) { |
1682 | chan_err(d40c, "Failed to stop channel\n"); | 1813 | chan_err(d40c, "Failed to stop channel\n"); |
1683 | return res; | 1814 | goto out; |
1684 | } | 1815 | } |
1816 | |||
1817 | if (d40c->busy) { | ||
1818 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1819 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1820 | } | ||
1821 | |||
1822 | d40c->busy = false; | ||
1685 | d40c->phy_chan = NULL; | 1823 | d40c->phy_chan = NULL; |
1686 | d40c->configured = false; | 1824 | d40c->configured = false; |
1687 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1825 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1826 | out: | ||
1688 | 1827 | ||
1689 | return 0; | 1828 | pm_runtime_mark_last_busy(d40c->base->dev); |
1829 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1830 | return res; | ||
1690 | } | 1831 | } |
1691 | 1832 | ||
1692 | static bool d40_is_paused(struct d40_chan *d40c) | 1833 | static bool d40_is_paused(struct d40_chan *d40c) |
@@ -2016,9 +2157,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2016 | err = d40_allocate_channel(d40c); | 2157 | err = d40_allocate_channel(d40c); |
2017 | if (err) { | 2158 | if (err) { |
2018 | chan_err(d40c, "Failed to allocate channel\n"); | 2159 | chan_err(d40c, "Failed to allocate channel\n"); |
2160 | d40c->configured = false; | ||
2019 | goto fail; | 2161 | goto fail; |
2020 | } | 2162 | } |
2021 | 2163 | ||
2164 | pm_runtime_get_sync(d40c->base->dev); | ||
2022 | /* Fill in basic CFG register values */ | 2165 | /* Fill in basic CFG register values */ |
2023 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 2166 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2024 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | 2167 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
@@ -2046,6 +2189,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2046 | if (is_free_phy) | 2189 | if (is_free_phy) |
2047 | d40_config_write(d40c); | 2190 | d40_config_write(d40c); |
2048 | fail: | 2191 | fail: |
2192 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2193 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2049 | spin_unlock_irqrestore(&d40c->lock, flags); | 2194 | spin_unlock_irqrestore(&d40c->lock, flags); |
2050 | return err; | 2195 | return err; |
2051 | } | 2196 | } |
@@ -2519,6 +2664,55 @@ failure1: | |||
2519 | return err; | 2664 | return err; |
2520 | } | 2665 | } |
2521 | 2666 | ||
2667 | /* Suspend resume functionality */ | ||
2668 | #ifdef CONFIG_PM | ||
2669 | static int dma40_pm_suspend(struct device *dev) | ||
2670 | { | ||
2671 | if (!pm_runtime_suspended(dev)) | ||
2672 | return -EBUSY; | ||
2673 | |||
2674 | return 0; | ||
2675 | } | ||
2676 | |||
2677 | static int dma40_runtime_suspend(struct device *dev) | ||
2678 | { | ||
2679 | struct platform_device *pdev = to_platform_device(dev); | ||
2680 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2681 | |||
2682 | d40_save_restore_registers(base, true); | ||
2683 | |||
2684 | /* Don't disable/enable clocks for v1 due to HW bugs */ | ||
2685 | if (base->rev != 1) | ||
2686 | writel_relaxed(base->gcc_pwr_off_mask, | ||
2687 | base->virtbase + D40_DREG_GCC); | ||
2688 | |||
2689 | return 0; | ||
2690 | } | ||
2691 | |||
2692 | static int dma40_runtime_resume(struct device *dev) | ||
2693 | { | ||
2694 | struct platform_device *pdev = to_platform_device(dev); | ||
2695 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2696 | |||
2697 | if (base->initialized) | ||
2698 | d40_save_restore_registers(base, false); | ||
2699 | |||
2700 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | ||
2701 | base->virtbase + D40_DREG_GCC); | ||
2702 | return 0; | ||
2703 | } | ||
2704 | |||
2705 | |||
2706 | static const struct dev_pm_ops dma40_pm_ops = { | ||
2707 | .suspend = dma40_pm_suspend, | ||
2708 | .runtime_suspend = dma40_runtime_suspend, | ||
2709 | .runtime_resume = dma40_runtime_resume, | ||
2710 | }; | ||
2711 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
2712 | #else | ||
2713 | #define DMA40_PM_OPS NULL | ||
2714 | #endif | ||
2715 | |||
2522 | /* Initialization functions. */ | 2716 | /* Initialization functions. */ |
2523 | 2717 | ||
2524 | static int __init d40_phy_res_init(struct d40_base *base) | 2718 | static int __init d40_phy_res_init(struct d40_base *base) |
@@ -2527,6 +2721,7 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2527 | int num_phy_chans_avail = 0; | 2721 | int num_phy_chans_avail = 0; |
2528 | u32 val[2]; | 2722 | u32 val[2]; |
2529 | int odd_even_bit = -2; | 2723 | int odd_even_bit = -2; |
2724 | int gcc = D40_DREG_GCC_ENA; | ||
2530 | 2725 | ||
2531 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2726 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2532 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2727 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
@@ -2538,9 +2733,17 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2538 | /* Mark security only channels as occupied */ | 2733 | /* Mark security only channels as occupied */ |
2539 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2734 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2540 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2735 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2736 | base->phy_res[i].reserved = true; | ||
2737 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2738 | D40_DREG_GCC_SRC); | ||
2739 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2740 | D40_DREG_GCC_DST); | ||
2741 | |||
2742 | |||
2541 | } else { | 2743 | } else { |
2542 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2744 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2543 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2745 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2746 | base->phy_res[i].reserved = false; | ||
2544 | num_phy_chans_avail++; | 2747 | num_phy_chans_avail++; |
2545 | } | 2748 | } |
2546 | spin_lock_init(&base->phy_res[i].lock); | 2749 | spin_lock_init(&base->phy_res[i].lock); |
@@ -2552,6 +2755,11 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2552 | 2755 | ||
2553 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 2756 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2554 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 2757 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
2758 | base->phy_res[chan].reserved = true; | ||
2759 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2760 | D40_DREG_GCC_SRC); | ||
2761 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2762 | D40_DREG_GCC_DST); | ||
2555 | num_phy_chans_avail--; | 2763 | num_phy_chans_avail--; |
2556 | } | 2764 | } |
2557 | 2765 | ||
@@ -2572,6 +2780,15 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2572 | val[0] = val[0] >> 2; | 2780 | val[0] = val[0] >> 2; |
2573 | } | 2781 | } |
2574 | 2782 | ||
2783 | /* | ||
2784 | * To keep things simple, Enable all clocks initially. | ||
2785 | * The clocks will get managed later post channel allocation. | ||
2786 | * The clocks for the event lines on which reserved channels exists | ||
2787 | * are not managed here. | ||
2788 | */ | ||
2789 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | ||
2790 | base->gcc_pwr_off_mask = gcc; | ||
2791 | |||
2575 | return num_phy_chans_avail; | 2792 | return num_phy_chans_avail; |
2576 | } | 2793 | } |
2577 | 2794 | ||
@@ -2699,10 +2916,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2699 | goto failure; | 2916 | goto failure; |
2700 | } | 2917 | } |
2701 | 2918 | ||
2702 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * | 2919 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
2703 | sizeof(struct d40_desc *) * | 2920 | sizeof(d40_backup_regs_chan), |
2704 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
2705 | GFP_KERNEL); | 2921 | GFP_KERNEL); |
2922 | if (!base->reg_val_backup_chan) | ||
2923 | goto failure; | ||
2924 | |||
2925 | base->lcla_pool.alloc_map = | ||
2926 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | ||
2927 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | ||
2706 | if (!base->lcla_pool.alloc_map) | 2928 | if (!base->lcla_pool.alloc_map) |
2707 | goto failure; | 2929 | goto failure; |
2708 | 2930 | ||
@@ -2741,9 +2963,9 @@ failure: | |||
2741 | static void __init d40_hw_init(struct d40_base *base) | 2963 | static void __init d40_hw_init(struct d40_base *base) |
2742 | { | 2964 | { |
2743 | 2965 | ||
2744 | static const struct d40_reg_val dma_init_reg[] = { | 2966 | static struct d40_reg_val dma_init_reg[] = { |
2745 | /* Clock every part of the DMA block from start */ | 2967 | /* Clock every part of the DMA block from start */ |
2746 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, | 2968 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
2747 | 2969 | ||
2748 | /* Interrupts on all logical channels */ | 2970 | /* Interrupts on all logical channels */ |
2749 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 2971 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
@@ -2960,6 +3182,12 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2960 | goto failure; | 3182 | goto failure; |
2961 | } | 3183 | } |
2962 | 3184 | ||
3185 | pm_runtime_irq_safe(base->dev); | ||
3186 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3187 | pm_runtime_use_autosuspend(base->dev); | ||
3188 | pm_runtime_enable(base->dev); | ||
3189 | pm_runtime_resume(base->dev); | ||
3190 | base->initialized = true; | ||
2963 | err = d40_dmaengine_init(base, num_reserved_chans); | 3191 | err = d40_dmaengine_init(base, num_reserved_chans); |
2964 | if (err) | 3192 | if (err) |
2965 | goto failure; | 3193 | goto failure; |
@@ -3013,6 +3241,7 @@ static struct platform_driver d40_driver = { | |||
3013 | .driver = { | 3241 | .driver = { |
3014 | .owner = THIS_MODULE, | 3242 | .owner = THIS_MODULE, |
3015 | .name = D40_NAME, | 3243 | .name = D40_NAME, |
3244 | .pm = DMA40_PM_OPS, | ||
3016 | }, | 3245 | }, |
3017 | }; | 3246 | }; |
3018 | 3247 | ||
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index b44c455158de..8d3d490968a3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #define D40_TYPE_TO_GROUP(type) (type / 16) | 17 | #define D40_TYPE_TO_GROUP(type) (type / 16) |
18 | #define D40_TYPE_TO_EVENT(type) (type % 16) | 18 | #define D40_TYPE_TO_EVENT(type) (type % 16) |
19 | #define D40_GROUP_SIZE 8 | ||
20 | #define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2) | ||
19 | 21 | ||
20 | /* Most bits of the CFG register are the same in log as in phy mode */ | 22 | /* Most bits of the CFG register are the same in log as in phy mode */ |
21 | #define D40_SREG_CFG_MST_POS 15 | 23 | #define D40_SREG_CFG_MST_POS 15 |
@@ -123,6 +125,15 @@ | |||
123 | 125 | ||
124 | /* DMA Register Offsets */ | 126 | /* DMA Register Offsets */ |
125 | #define D40_DREG_GCC 0x000 | 127 | #define D40_DREG_GCC 0x000 |
128 | #define D40_DREG_GCC_ENA 0x1 | ||
129 | /* This assumes that there are only 4 event groups */ | ||
130 | #define D40_DREG_GCC_ENABLE_ALL 0xff01 | ||
131 | #define D40_DREG_GCC_EVTGRP_POS 8 | ||
132 | #define D40_DREG_GCC_SRC 0 | ||
133 | #define D40_DREG_GCC_DST 1 | ||
134 | #define D40_DREG_GCC_EVTGRP_ENA(x, y) \ | ||
135 | (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y)) | ||
136 | |||
126 | #define D40_DREG_PRTYP 0x004 | 137 | #define D40_DREG_PRTYP 0x004 |
127 | #define D40_DREG_PRSME 0x008 | 138 | #define D40_DREG_PRSME 0x008 |
128 | #define D40_DREG_PRSMO 0x00C | 139 | #define D40_DREG_PRSMO 0x00C |