aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-07-15 14:59:36 -0400
committerGregory CLEMENT <gregory.clement@free-electrons.com>2015-08-05 12:36:49 -0400
commit44e259ac909f3b41786cf732a44b5cf8444e098a (patch)
tree64b295810cbc09821dc2717be1e0b60855cf3e63
parent482d638f98cc626bf01d4c9f6d6d35fc77d630c8 (diff)
ARM: dove: create a proper PMU driver for power domains, PMU IRQs and resets
The PMU device contains an interrupt controller, power control and resets. The interrupt controller is a little sub-standard in that there is no race free way to clear down pending interrupts, so we try to avoid problems by reducing the window as much as possible, and clearing as infrequently as possible. The interrupt support is implemented using an IRQ domain, and the parent interrupt referenced in the standard DT way. The power domains and reset support is closely related - there is a defined sequence for powering down a domain which is tightly coupled with asserting the reset. Hence, it makes sense to group these two together, and in order to avoid any locking contention disrupting this sequence, we avoid the use of syscon or regmap. This patch adds the core PMU driver: power domains must be defined in the DT file in order to make use of them. The reset controller can be referenced in the standard way for reset controllers. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
-rw-r--r--arch/arm/mach-mvebu/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/dove.c2
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/dove/Makefile1
-rw-r--r--drivers/soc/dove/pmu.c412
-rw-r--r--include/linux/soc/dove/pmu.h6
6 files changed, 423 insertions, 0 deletions
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 97473168d6b6..c86a5a0aefac 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -96,6 +96,7 @@ config MACH_DOVE
96 select MACH_MVEBU_ANY 96 select MACH_MVEBU_ANY
97 select ORION_IRQCHIP 97 select ORION_IRQCHIP
98 select ORION_TIMER 98 select ORION_TIMER
99 select PM_GENERIC_DOMAINS if PM
99 select PINCTRL_DOVE 100 select PINCTRL_DOVE
100 help 101 help
101 Say 'Y' here if you want your kernel to support the 102 Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-mvebu/dove.c b/arch/arm/mach-mvebu/dove.c
index 5a1741500a30..1aebb82e3d7b 100644
--- a/arch/arm/mach-mvebu/dove.c
+++ b/arch/arm/mach-mvebu/dove.c
@@ -12,6 +12,7 @@
12#include <linux/mbus.h> 12#include <linux/mbus.h>
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/soc/dove/pmu.h>
15#include <asm/hardware/cache-tauros2.h> 16#include <asm/hardware/cache-tauros2.h>
16#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
17#include "common.h" 18#include "common.h"
@@ -24,6 +25,7 @@ static void __init dove_init(void)
24 tauros2_init(0); 25 tauros2_init(0);
25#endif 26#endif
26 BUG_ON(mvebu_mbus_dt_init(false)); 27 BUG_ON(mvebu_mbus_dt_init(false));
28 dove_init_pmu();
27 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 29 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
28} 30}
29 31
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 7dc7c0d8a2c1..0b12d777d3c4 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -2,6 +2,7 @@
2# Makefile for the Linux Kernel SOC specific device drivers. 2# Makefile for the Linux Kernel SOC specific device drivers.
3# 3#
4 4
5obj-$(CONFIG_MACH_DOVE) += dove/
5obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ 6obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
6obj-$(CONFIG_ARCH_QCOM) += qcom/ 7obj-$(CONFIG_ARCH_QCOM) += qcom/
7obj-$(CONFIG_ARCH_SUNXI) += sunxi/ 8obj-$(CONFIG_ARCH_SUNXI) += sunxi/
diff --git a/drivers/soc/dove/Makefile b/drivers/soc/dove/Makefile
new file mode 100644
index 000000000000..2db8e65513a3
--- /dev/null
+++ b/drivers/soc/dove/Makefile
@@ -0,0 +1 @@
obj-y += pmu.o
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
new file mode 100644
index 000000000000..6792aae9e2e5
--- /dev/null
+++ b/drivers/soc/dove/pmu.c
@@ -0,0 +1,412 @@
1/*
2 * Marvell Dove PMU support
3 */
4#include <linux/io.h>
5#include <linux/irq.h>
6#include <linux/irqdomain.h>
7#include <linux/of.h>
8#include <linux/of_irq.h>
9#include <linux/of_address.h>
10#include <linux/platform_device.h>
11#include <linux/pm_domain.h>
12#include <linux/reset.h>
13#include <linux/reset-controller.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/soc/dove/pmu.h>
17#include <linux/spinlock.h>
18
19#define NR_PMU_IRQS 7
20
21#define PMC_SW_RST 0x30
22#define PMC_IRQ_CAUSE 0x50
23#define PMC_IRQ_MASK 0x54
24
25#define PMU_PWR 0x10
26#define PMU_ISO 0x58
27
28struct pmu_data {
29 spinlock_t lock;
30 struct device_node *of_node;
31 void __iomem *pmc_base;
32 void __iomem *pmu_base;
33 struct irq_chip_generic *irq_gc;
34 struct irq_domain *irq_domain;
35#ifdef CONFIG_RESET_CONTROLLER
36 struct reset_controller_dev reset;
37#endif
38};
39
40/*
41 * The PMU contains a register to reset various subsystems within the
42 * SoC. Export this as a reset controller.
43 */
44#ifdef CONFIG_RESET_CONTROLLER
45#define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
46
47static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
48{
49 struct pmu_data *pmu = rcdev_to_pmu(rc);
50 unsigned long flags;
51 u32 val;
52
53 spin_lock_irqsave(&pmu->lock, flags);
54 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
55 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
56 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
57 spin_unlock_irqrestore(&pmu->lock, flags);
58
59 return 0;
60}
61
62static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
63{
64 struct pmu_data *pmu = rcdev_to_pmu(rc);
65 unsigned long flags;
66 u32 val = ~BIT(id);
67
68 spin_lock_irqsave(&pmu->lock, flags);
69 val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
70 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
71 spin_unlock_irqrestore(&pmu->lock, flags);
72
73 return 0;
74}
75
76static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
77{
78 struct pmu_data *pmu = rcdev_to_pmu(rc);
79 unsigned long flags;
80 u32 val = BIT(id);
81
82 spin_lock_irqsave(&pmu->lock, flags);
83 val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
84 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
85 spin_unlock_irqrestore(&pmu->lock, flags);
86
87 return 0;
88}
89
90static struct reset_control_ops pmu_reset_ops = {
91 .reset = pmu_reset_reset,
92 .assert = pmu_reset_assert,
93 .deassert = pmu_reset_deassert,
94};
95
96static struct reset_controller_dev pmu_reset __initdata = {
97 .ops = &pmu_reset_ops,
98 .owner = THIS_MODULE,
99 .nr_resets = 32,
100};
101
102static void __init pmu_reset_init(struct pmu_data *pmu)
103{
104 int ret;
105
106 pmu->reset = pmu_reset;
107 pmu->reset.of_node = pmu->of_node;
108
109 ret = reset_controller_register(&pmu->reset);
110 if (ret)
111 pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
112}
113#else
114static void __init pmu_reset_init(struct pmu_data *pmu)
115{
116}
117#endif
118
119struct pmu_domain {
120 struct pmu_data *pmu;
121 u32 pwr_mask;
122 u32 rst_mask;
123 u32 iso_mask;
124 struct generic_pm_domain base;
125};
126
127#define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
128
129/*
130 * This deals with the "old" Marvell sequence of bringing a power domain
131 * down/up, which is: apply power, release reset, disable isolators.
132 *
133 * Later devices apparantly use a different sequence: power up, disable
134 * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
135 * enable module clock, deassert reset.
136 *
137 * Note: reading the assembly, it seems that the IO accessors have an
138 * unfortunate side-effect - they cause memory already read into registers
139 * for the if () to be re-read for the bit-set or bit-clear operation.
140 * The code is written to avoid this.
141 */
142static int pmu_domain_power_off(struct generic_pm_domain *domain)
143{
144 struct pmu_domain *pmu_dom = to_pmu_domain(domain);
145 struct pmu_data *pmu = pmu_dom->pmu;
146 unsigned long flags;
147 unsigned int val;
148 void __iomem *pmu_base = pmu->pmu_base;
149 void __iomem *pmc_base = pmu->pmc_base;
150
151 spin_lock_irqsave(&pmu->lock, flags);
152
153 /* Enable isolators */
154 if (pmu_dom->iso_mask) {
155 val = ~pmu_dom->iso_mask;
156 val &= readl_relaxed(pmu_base + PMU_ISO);
157 writel_relaxed(val, pmu_base + PMU_ISO);
158 }
159
160 /* Reset unit */
161 if (pmu_dom->rst_mask) {
162 val = ~pmu_dom->rst_mask;
163 val &= readl_relaxed(pmc_base + PMC_SW_RST);
164 writel_relaxed(val, pmc_base + PMC_SW_RST);
165 }
166
167 /* Power down */
168 val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
169 writel_relaxed(val, pmu_base + PMU_PWR);
170
171 spin_unlock_irqrestore(&pmu->lock, flags);
172
173 return 0;
174}
175
176static int pmu_domain_power_on(struct generic_pm_domain *domain)
177{
178 struct pmu_domain *pmu_dom = to_pmu_domain(domain);
179 struct pmu_data *pmu = pmu_dom->pmu;
180 unsigned long flags;
181 unsigned int val;
182 void __iomem *pmu_base = pmu->pmu_base;
183 void __iomem *pmc_base = pmu->pmc_base;
184
185 spin_lock_irqsave(&pmu->lock, flags);
186
187 /* Power on */
188 val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
189 writel_relaxed(val, pmu_base + PMU_PWR);
190
191 /* Release reset */
192 if (pmu_dom->rst_mask) {
193 val = pmu_dom->rst_mask;
194 val |= readl_relaxed(pmc_base + PMC_SW_RST);
195 writel_relaxed(val, pmc_base + PMC_SW_RST);
196 }
197
198 /* Disable isolators */
199 if (pmu_dom->iso_mask) {
200 val = pmu_dom->iso_mask;
201 val |= readl_relaxed(pmu_base + PMU_ISO);
202 writel_relaxed(val, pmu_base + PMU_ISO);
203 }
204
205 spin_unlock_irqrestore(&pmu->lock, flags);
206
207 return 0;
208}
209
210static void __pmu_domain_register(struct pmu_domain *domain,
211 struct device_node *np)
212{
213 unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
214
215 domain->base.power_off = pmu_domain_power_off;
216 domain->base.power_on = pmu_domain_power_on;
217
218 pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
219
220 if (np)
221 of_genpd_add_provider_simple(np, &domain->base);
222}
223
224/* PMU IRQ controller */
225static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
226{
227 struct pmu_data *pmu = irq_get_handler_data(irq);
228 struct irq_chip_generic *gc = pmu->irq_gc;
229 struct irq_domain *domain = pmu->irq_domain;
230 void __iomem *base = gc->reg_base;
231 u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
232 u32 done = ~0;
233
234 if (stat == 0) {
235 handle_bad_irq(irq, desc);
236 return;
237 }
238
239 while (stat) {
240 u32 hwirq = fls(stat) - 1;
241
242 stat &= ~(1 << hwirq);
243 done &= ~(1 << hwirq);
244
245 generic_handle_irq(irq_find_mapping(domain, hwirq));
246 }
247
248 /*
249 * The PMU mask register is not RW0C: it is RW. This means that
250 * the bits take whatever value is written to them; if you write
251 * a '1', you will set the interrupt.
252 *
253 * Unfortunately this means there is NO race free way to clear
254 * these interrupts.
255 *
256 * So, let's structure the code so that the window is as small as
257 * possible.
258 */
259 irq_gc_lock(gc);
260 done &= readl_relaxed(base + PMC_IRQ_CAUSE);
261 writel_relaxed(done, base + PMC_IRQ_CAUSE);
262 irq_gc_unlock(gc);
263}
264
265static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
266{
267 const char *name = "pmu_irq";
268 struct irq_chip_generic *gc;
269 struct irq_domain *domain;
270 int ret;
271
272 /* mask and clear all interrupts */
273 writel(0, pmu->pmc_base + PMC_IRQ_MASK);
274 writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
275
276 domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
277 &irq_generic_chip_ops, NULL);
278 if (!domain) {
279 pr_err("%s: unable to add irq domain\n", name);
280 return -ENOMEM;
281 }
282
283 ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
284 handle_level_irq,
285 IRQ_NOREQUEST | IRQ_NOPROBE, 0,
286 IRQ_GC_INIT_MASK_CACHE);
287 if (ret) {
288 pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
289 irq_domain_remove(domain);
290 return ret;
291 }
292
293 gc = irq_get_domain_generic_chip(domain, 0);
294 gc->reg_base = pmu->pmc_base;
295 gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
296 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
297 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
298
299 pmu->irq_domain = domain;
300 pmu->irq_gc = gc;
301
302 irq_set_handler_data(irq, pmu);
303 irq_set_chained_handler(irq, pmu_irq_handler);
304
305 return 0;
306}
307
308/*
309 * pmu: power-manager@d0000 {
310 * compatible = "marvell,dove-pmu";
311 * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
312 * interrupts = <33>;
313 * interrupt-controller;
314 * #reset-cells = 1;
315 * vpu_domain: vpu-domain {
316 * #power-domain-cells = <0>;
317 * marvell,pmu_pwr_mask = <0x00000008>;
318 * marvell,pmu_iso_mask = <0x00000001>;
319 * resets = <&pmu 16>;
320 * };
321 * gpu_domain: gpu-domain {
322 * #power-domain-cells = <0>;
323 * marvell,pmu_pwr_mask = <0x00000004>;
324 * marvell,pmu_iso_mask = <0x00000002>;
325 * resets = <&pmu 18>;
326 * };
327 * };
328 */
329int __init dove_init_pmu(void)
330{
331 struct device_node *np_pmu, *domains_node, *np;
332 struct pmu_data *pmu;
333 int ret, parent_irq;
334
335 /* Lookup the PMU node */
336 np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
337 if (!np_pmu)
338 return 0;
339
340 domains_node = of_get_child_by_name(np_pmu, "domains");
341 if (!domains_node) {
342 pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
343 return 0;
344 }
345
346 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
347 if (!pmu)
348 return -ENOMEM;
349
350 spin_lock_init(&pmu->lock);
351 pmu->of_node = np_pmu;
352 pmu->pmc_base = of_iomap(pmu->of_node, 0);
353 pmu->pmu_base = of_iomap(pmu->of_node, 1);
354 if (!pmu->pmc_base || !pmu->pmu_base) {
355 pr_err("%s: failed to map PMU\n", np_pmu->name);
356 iounmap(pmu->pmu_base);
357 iounmap(pmu->pmc_base);
358 kfree(pmu);
359 return -ENOMEM;
360 }
361
362 pmu_reset_init(pmu);
363
364 for_each_available_child_of_node(domains_node, np) {
365 struct of_phandle_args args;
366 struct pmu_domain *domain;
367
368 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
369 if (!domain)
370 break;
371
372 domain->pmu = pmu;
373 domain->base.name = kstrdup(np->name, GFP_KERNEL);
374 if (!domain->base.name) {
375 kfree(domain);
376 break;
377 }
378
379 of_property_read_u32(np, "marvell,pmu_pwr_mask",
380 &domain->pwr_mask);
381 of_property_read_u32(np, "marvell,pmu_iso_mask",
382 &domain->iso_mask);
383
384 /*
385 * We parse the reset controller property directly here
386 * to ensure that we can operate when the reset controller
387 * support is not configured into the kernel.
388 */
389 ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
390 0, &args);
391 if (ret == 0) {
392 if (args.np == pmu->of_node)
393 domain->rst_mask = BIT(args.args[0]);
394 of_node_put(args.np);
395 }
396
397 __pmu_domain_register(domain, np);
398 }
399 pm_genpd_poweroff_unused();
400
401 /* Loss of the interrupt controller is not a fatal error. */
402 parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
403 if (!parent_irq) {
404 pr_err("%s: no interrupt specified\n", np_pmu->name);
405 } else {
406 ret = dove_init_pmu_irq(pmu, parent_irq);
407 if (ret)
408 pr_err("dove_init_pmu_irq() failed: %d\n", ret);
409 }
410
411 return 0;
412}
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
new file mode 100644
index 000000000000..9c99f84bcc0e
--- /dev/null
+++ b/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
1#ifndef LINUX_SOC_DOVE_PMU_H
2#define LINUX_SOC_DOVE_PMU_H
3
4int dove_init_pmu(void);
5
6#endif