diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2012-07-16 22:07:10 -0400 |
---|---|---|
committer | Nicolas Pitre <nicolas.pitre@linaro.org> | 2013-05-29 15:50:35 -0400 |
commit | 13eae144ec754ab64521891d2cffc3156f6f083f (patch) | |
tree | 384c5edf609e0ffb3ebf0cbadf2ec19d1007b037 /arch/arm/mach-vexpress/dcscb.c | |
parent | 1e904e1bf6f1285cc2dd5696c44b7cf78cda643f (diff) |
ARM: vexpress/dcscb: add CPU use counts to the power up/down API implementation
It is possible for a CPU to be told to power up before it managed
to power itself down. Solve this race with a usage count to deal
with this possibility as mandated by the MCPM API definition.
Signed-off-by: nicolas Pitre <nico@linaro.org>
Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Pawel Moll <pawel.moll@arm.com>
Diffstat (limited to 'arch/arm/mach-vexpress/dcscb.c')
-rw-r--r-- | arch/arm/mach-vexpress/dcscb.c | 74 |
1 files changed, 59 insertions, 15 deletions
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c index 2ca4bbce530c..65dee7818082 100644 --- a/arch/arm/mach-vexpress/dcscb.c +++ b/arch/arm/mach-vexpress/dcscb.c | |||
@@ -44,6 +44,7 @@ | |||
44 | static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 44 | static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
45 | 45 | ||
46 | static void __iomem *dcscb_base; | 46 | static void __iomem *dcscb_base; |
47 | static int dcscb_use_count[4][2]; | ||
47 | 48 | ||
48 | static int dcscb_power_up(unsigned int cpu, unsigned int cluster) | 49 | static int dcscb_power_up(unsigned int cpu, unsigned int cluster) |
49 | { | 50 | { |
@@ -60,14 +61,27 @@ static int dcscb_power_up(unsigned int cpu, unsigned int cluster) | |||
60 | local_irq_disable(); | 61 | local_irq_disable(); |
61 | arch_spin_lock(&dcscb_lock); | 62 | arch_spin_lock(&dcscb_lock); |
62 | 63 | ||
63 | rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); | 64 | dcscb_use_count[cpu][cluster]++; |
64 | if (rst_hold & (1 << 8)) { | 65 | if (dcscb_use_count[cpu][cluster] == 1) { |
65 | /* remove cluster reset and add individual CPU's reset */ | 66 | rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); |
66 | rst_hold &= ~(1 << 8); | 67 | if (rst_hold & (1 << 8)) { |
67 | rst_hold |= 0xf; | 68 | /* remove cluster reset and add individual CPU's reset */ |
69 | rst_hold &= ~(1 << 8); | ||
70 | rst_hold |= 0xf; | ||
71 | } | ||
72 | rst_hold &= ~(cpumask | (cpumask << 4)); | ||
73 | writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); | ||
74 | } else if (dcscb_use_count[cpu][cluster] != 2) { | ||
75 | /* | ||
76 | * The only possible values are: | ||
77 | * 0 = CPU down | ||
78 | * 1 = CPU (still) up | ||
79 | * 2 = CPU requested to be up before it had a chance | ||
80 | * to actually make itself down. | ||
81 | * Any other value is a bug. | ||
82 | */ | ||
83 | BUG(); | ||
68 | } | 84 | } |
69 | rst_hold &= ~(cpumask | (cpumask << 4)); | ||
70 | writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); | ||
71 | 85 | ||
72 | arch_spin_unlock(&dcscb_lock); | 86 | arch_spin_unlock(&dcscb_lock); |
73 | local_irq_enable(); | 87 | local_irq_enable(); |
@@ -77,7 +91,8 @@ static int dcscb_power_up(unsigned int cpu, unsigned int cluster) | |||
77 | 91 | ||
78 | static void dcscb_power_down(void) | 92 | static void dcscb_power_down(void) |
79 | { | 93 | { |
80 | unsigned int mpidr, cpu, cluster, rst_hold, cpumask, last_man; | 94 | unsigned int mpidr, cpu, cluster, rst_hold, cpumask; |
95 | bool last_man = false, skip_wfi = false; | ||
81 | 96 | ||
82 | mpidr = read_cpuid_mpidr(); | 97 | mpidr = read_cpuid_mpidr(); |
83 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 98 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
@@ -88,13 +103,26 @@ static void dcscb_power_down(void) | |||
88 | BUG_ON(cpu >= 4 || cluster >= 2); | 103 | BUG_ON(cpu >= 4 || cluster >= 2); |
89 | 104 | ||
90 | arch_spin_lock(&dcscb_lock); | 105 | arch_spin_lock(&dcscb_lock); |
91 | rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); | 106 | dcscb_use_count[cpu][cluster]--; |
92 | rst_hold |= cpumask; | 107 | if (dcscb_use_count[cpu][cluster] == 0) { |
93 | if (((rst_hold | (rst_hold >> 4)) & 0xf) == 0xf) | 108 | rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); |
94 | rst_hold |= (1 << 8); | 109 | rst_hold |= cpumask; |
95 | writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); | 110 | if (((rst_hold | (rst_hold >> 4)) & 0xf) == 0xf) { |
111 | rst_hold |= (1 << 8); | ||
112 | last_man = true; | ||
113 | } | ||
114 | writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); | ||
115 | } else if (dcscb_use_count[cpu][cluster] == 1) { | ||
116 | /* | ||
117 | * A power_up request went ahead of us. | ||
118 | * Even if we do not want to shut this CPU down, | ||
119 | * the caller expects a certain state as if the WFI | ||
120 | * was aborted. So let's continue with cache cleaning. | ||
121 | */ | ||
122 | skip_wfi = true; | ||
123 | } else | ||
124 | BUG(); | ||
96 | arch_spin_unlock(&dcscb_lock); | 125 | arch_spin_unlock(&dcscb_lock); |
97 | last_man = (rst_hold & (1 << 8)); | ||
98 | 126 | ||
99 | /* | 127 | /* |
100 | * Now let's clean our L1 cache and shut ourself down. | 128 | * Now let's clean our L1 cache and shut ourself down. |
@@ -122,7 +150,8 @@ static void dcscb_power_down(void) | |||
122 | 150 | ||
123 | /* Now we are prepared for power-down, do it: */ | 151 | /* Now we are prepared for power-down, do it: */ |
124 | dsb(); | 152 | dsb(); |
125 | wfi(); | 153 | if (!skip_wfi) |
154 | wfi(); | ||
126 | 155 | ||
127 | /* Not dead at this point? Let our caller cope. */ | 156 | /* Not dead at this point? Let our caller cope. */ |
128 | } | 157 | } |
@@ -132,6 +161,19 @@ static const struct mcpm_platform_ops dcscb_power_ops = { | |||
132 | .power_down = dcscb_power_down, | 161 | .power_down = dcscb_power_down, |
133 | }; | 162 | }; |
134 | 163 | ||
164 | static void __init dcscb_usage_count_init(void) | ||
165 | { | ||
166 | unsigned int mpidr, cpu, cluster; | ||
167 | |||
168 | mpidr = read_cpuid_mpidr(); | ||
169 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
170 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
171 | |||
172 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
173 | BUG_ON(cpu >= 4 || cluster >= 2); | ||
174 | dcscb_use_count[cpu][cluster] = 1; | ||
175 | } | ||
176 | |||
135 | static int __init dcscb_init(void) | 177 | static int __init dcscb_init(void) |
136 | { | 178 | { |
137 | struct device_node *node; | 179 | struct device_node *node; |
@@ -144,6 +186,8 @@ static int __init dcscb_init(void) | |||
144 | if (!dcscb_base) | 186 | if (!dcscb_base) |
145 | return -EADDRNOTAVAIL; | 187 | return -EADDRNOTAVAIL; |
146 | 188 | ||
189 | dcscb_usage_count_init(); | ||
190 | |||
147 | ret = mcpm_platform_register(&dcscb_power_ops); | 191 | ret = mcpm_platform_register(&dcscb_power_ops); |
148 | if (ret) { | 192 | if (ret) { |
149 | iounmap(dcscb_base); | 193 | iounmap(dcscb_base); |