summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-vexpress
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2015-03-14 21:13:48 -0400
committerOlof Johansson <olof@lixom.net>2015-04-03 15:52:48 -0400
commit323ab95339b515330a4e0908e53b06498f2cafde (patch)
tree6d111fbf2e41f9527de706a68f97fef10e43a548 /arch/arm/mach-vexpress
parent03fd5db717918b36a4ac1dbfc197222041608eab (diff)
ARM: vexpress: migrate DCSCB to the new MCPM backend abstraction
Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/arm/mach-vexpress')
-rw-r--r--arch/arm/mach-vexpress/dcscb.c195
1 files changed, 66 insertions, 129 deletions
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 12c74734cd70..5cedcf572104 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/of_address.h> 16#include <linux/of_address.h>
18#include <linux/vexpress.h> 17#include <linux/vexpress.h>
@@ -36,163 +35,102 @@
36#define KFC_CFG_W 0x2c 35#define KFC_CFG_W 0x2c
37#define DCS_CFG_R 0x30 36#define DCS_CFG_R 0x30
38 37
39/*
40 * We can't use regular spinlocks. In the switcher case, it is possible
41 * for an outbound CPU to call power_down() while its inbound counterpart
42 * is already live using the same logical CPU number which trips lockdep
43 * debugging.
44 */
45static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
46
47static void __iomem *dcscb_base; 38static void __iomem *dcscb_base;
48static int dcscb_use_count[4][2];
49static int dcscb_allcpus_mask[2]; 39static int dcscb_allcpus_mask[2];
50 40
51static int dcscb_power_up(unsigned int cpu, unsigned int cluster) 41static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
52{ 42{
53 unsigned int rst_hold, cpumask = (1 << cpu); 43 unsigned int rst_hold, cpumask = (1 << cpu);
54 unsigned int all_mask;
55 44
56 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 45 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
57 if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) 46 if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
58 return -EINVAL; 47 return -EINVAL;
59 48
60 all_mask = dcscb_allcpus_mask[cluster]; 49 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
50 rst_hold &= ~(cpumask | (cpumask << 4));
51 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
52 return 0;
53}
61 54
62 /* 55static int dcscb_cluster_powerup(unsigned int cluster)
63 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 56{
64 * variant exists, we need to disable IRQs manually here. 57 unsigned int rst_hold;
65 */
66 local_irq_disable();
67 arch_spin_lock(&dcscb_lock);
68
69 dcscb_use_count[cpu][cluster]++;
70 if (dcscb_use_count[cpu][cluster] == 1) {
71 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
72 if (rst_hold & (1 << 8)) {
73 /* remove cluster reset and add individual CPU's reset */
74 rst_hold &= ~(1 << 8);
75 rst_hold |= all_mask;
76 }
77 rst_hold &= ~(cpumask | (cpumask << 4));
78 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
79 } else if (dcscb_use_count[cpu][cluster] != 2) {
80 /*
81 * The only possible values are:
82 * 0 = CPU down
83 * 1 = CPU (still) up
84 * 2 = CPU requested to be up before it had a chance
85 * to actually make itself down.
86 * Any other value is a bug.
87 */
88 BUG();
89 }
90 58
91 arch_spin_unlock(&dcscb_lock); 59 pr_debug("%s: cluster %u\n", __func__, cluster);
92 local_irq_enable(); 60 if (cluster >= 2)
61 return -EINVAL;
93 62
63 /* remove cluster reset and add individual CPU's reset */
64 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
65 rst_hold &= ~(1 << 8);
66 rst_hold |= dcscb_allcpus_mask[cluster];
67 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
94 return 0; 68 return 0;
95} 69}
96 70
97static void dcscb_power_down(void) 71static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
98{ 72{
99 unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask; 73 unsigned int rst_hold;
100 bool last_man = false, skip_wfi = false;
101
102 mpidr = read_cpuid_mpidr();
103 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
104 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
105 cpumask = (1 << cpu);
106 74
107 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 75 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
108 BUG_ON(cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])); 76 BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
109
110 all_mask = dcscb_allcpus_mask[cluster];
111
112 __mcpm_cpu_going_down(cpu, cluster);
113
114 arch_spin_lock(&dcscb_lock);
115 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
116 dcscb_use_count[cpu][cluster]--;
117 if (dcscb_use_count[cpu][cluster] == 0) {
118 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
119 rst_hold |= cpumask;
120 if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
121 rst_hold |= (1 << 8);
122 last_man = true;
123 }
124 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
125 } else if (dcscb_use_count[cpu][cluster] == 1) {
126 /*
127 * A power_up request went ahead of us.
128 * Even if we do not want to shut this CPU down,
129 * the caller expects a certain state as if the WFI
130 * was aborted. So let's continue with cache cleaning.
131 */
132 skip_wfi = true;
133 } else
134 BUG();
135
136 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
137 arch_spin_unlock(&dcscb_lock);
138
139 /* Flush all cache levels for this cluster. */
140 v7_exit_coherency_flush(all);
141
142 /*
143 * A full outer cache flush could be needed at this point
144 * on platforms with such a cache, depending on where the
145 * outer cache sits. In some cases the notion of a "last
146 * cluster standing" would need to be implemented if the
147 * outer cache is shared across clusters. In any case, when
148 * the outer cache needs flushing, there is no concurrent
149 * access to the cache controller to worry about and no
150 * special locking besides what is already provided by the
151 * MCPM state machinery is needed.
152 */
153
154 /*
155 * Disable cluster-level coherency by masking
156 * incoming snoops and DVM messages:
157 */
158 cci_disable_port_by_cpu(mpidr);
159
160 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
161 } else {
162 arch_spin_unlock(&dcscb_lock);
163
164 /* Disable and flush the local CPU cache. */
165 v7_exit_coherency_flush(louis);
166 }
167 77
168 __mcpm_cpu_down(cpu, cluster); 78 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
79 rst_hold |= (1 << cpu);
80 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
81}
169 82
170 /* Now we are prepared for power-down, do it: */ 83static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
171 dsb(); 84{
172 if (!skip_wfi) 85 unsigned int rst_hold;
173 wfi();
174 86
175 /* Not dead at this point? Let our caller cope. */ 87 pr_debug("%s: cluster %u\n", __func__, cluster);
88 BUG_ON(cluster >= 2);
89
90 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
91 rst_hold |= (1 << 8);
92 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
176} 93}
177 94
178static const struct mcpm_platform_ops dcscb_power_ops = { 95static void dcscb_cpu_cache_disable(void)
179 .power_up = dcscb_power_up, 96{
180 .power_down = dcscb_power_down, 97 /* Disable and flush the local CPU cache. */
181}; 98 v7_exit_coherency_flush(louis);
99}
182 100
183static void __init dcscb_usage_count_init(void) 101static void dcscb_cluster_cache_disable(void)
184{ 102{
185 unsigned int mpidr, cpu, cluster; 103 /* Flush all cache levels for this cluster. */
104 v7_exit_coherency_flush(all);
186 105
187 mpidr = read_cpuid_mpidr(); 106 /*
188 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 107 * A full outer cache flush could be needed at this point
189 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 108 * on platforms with such a cache, depending on where the
109 * outer cache sits. In some cases the notion of a "last
110 * cluster standing" would need to be implemented if the
111 * outer cache is shared across clusters. In any case, when
112 * the outer cache needs flushing, there is no concurrent
113 * access to the cache controller to worry about and no
114 * special locking besides what is already provided by the
115 * MCPM state machinery is needed.
116 */
190 117
191 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 118 /*
192 BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster])); 119 * Disable cluster-level coherency by masking
193 dcscb_use_count[cpu][cluster] = 1; 120 * incoming snoops and DVM messages:
121 */
122 cci_disable_port_by_cpu(read_cpuid_mpidr());
194} 123}
195 124
125static const struct mcpm_platform_ops dcscb_power_ops = {
126 .cpu_powerup = dcscb_cpu_powerup,
127 .cluster_powerup = dcscb_cluster_powerup,
128 .cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare,
129 .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
130 .cpu_cache_disable = dcscb_cpu_cache_disable,
131 .cluster_cache_disable = dcscb_cluster_cache_disable,
132};
133
196extern void dcscb_power_up_setup(unsigned int affinity_level); 134extern void dcscb_power_up_setup(unsigned int affinity_level);
197 135
198static int __init dcscb_init(void) 136static int __init dcscb_init(void)
@@ -213,7 +151,6 @@ static int __init dcscb_init(void)
213 cfg = readl_relaxed(dcscb_base + DCS_CFG_R); 151 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
214 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1; 152 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
215 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1; 153 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
216 dcscb_usage_count_init();
217 154
218 ret = mcpm_platform_register(&dcscb_power_ops); 155 ret = mcpm_platform_register(&dcscb_power_ops);
219 if (!ret) 156 if (!ret)