aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSudeep Holla <Sudeep.Holla@arm.com>2018-01-10 11:44:14 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-01-17 06:59:33 -0500
commitdff4113d5e3753c23e8b5bb6818f5829ccd0d06a (patch)
tree1e9dd6bfef82cb5476af042debdfd65cbb62e8dd
parent3fa4680b860bf48b437d6a2c039789c4abe202ae (diff)
drivers: psci: remove cluster terminology and dependency on physical_package_id
Since the definition of the term "cluster" is not well defined in the architecture, we should avoid using it. Also the physical package id is currently mapped to so called "clusters" in ARM/ARM64 platforms which is already argumentative. Currently PSCI checker uses the physical package id assuming that CPU power domains map to "clusters" and the physical package id in the code as it stands also maps to cluster boundaries. It does that trying to test "cluster" idle states to its best. However the CPU power domain often but not always maps directly to the processor topology. This patch removes the dependency on physical_package_id from the topology in this PSCI checker. Also it replaces all the occurences of clusters to cpu_groups which is derived from core_sibling_mask and may not directly map to physical "cluster". Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/firmware/psci_checker.c46
1 files changed, 22 insertions, 24 deletions
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index f3f4f810e5df..bb1c068bff19 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
77 return 0; 77 return 0;
78} 78}
79 79
80static int find_clusters(const struct cpumask *cpus, 80static int find_cpu_groups(const struct cpumask *cpus,
81 const struct cpumask **clusters) 81 const struct cpumask **cpu_groups)
82{ 82{
83 unsigned int nb = 0; 83 unsigned int nb = 0;
84 cpumask_var_t tmp; 84 cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
88 cpumask_copy(tmp, cpus); 88 cpumask_copy(tmp, cpus);
89 89
90 while (!cpumask_empty(tmp)) { 90 while (!cpumask_empty(tmp)) {
91 const struct cpumask *cluster = 91 const struct cpumask *cpu_group =
92 topology_core_cpumask(cpumask_any(tmp)); 92 topology_core_cpumask(cpumask_any(tmp));
93 93
94 clusters[nb++] = cluster; 94 cpu_groups[nb++] = cpu_group;
95 cpumask_andnot(tmp, tmp, cluster); 95 cpumask_andnot(tmp, tmp, cpu_group);
96 } 96 }
97 97
98 free_cpumask_var(tmp); 98 free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
170{ 170{
171 int err; 171 int err;
172 cpumask_var_t offlined_cpus; 172 cpumask_var_t offlined_cpus;
173 int i, nb_cluster; 173 int i, nb_cpu_group;
174 const struct cpumask **clusters; 174 const struct cpumask **cpu_groups;
175 char *page_buf; 175 char *page_buf;
176 176
177 err = -ENOMEM; 177 err = -ENOMEM;
178 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) 178 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
179 return err; 179 return err;
180 /* We may have up to nb_available_cpus clusters. */ 180 /* We may have up to nb_available_cpus cpu_groups. */
181 clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters), 181 cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
182 GFP_KERNEL); 182 GFP_KERNEL);
183 if (!clusters) 183 if (!cpu_groups)
184 goto out_free_cpus; 184 goto out_free_cpus;
185 page_buf = (char *)__get_free_page(GFP_KERNEL); 185 page_buf = (char *)__get_free_page(GFP_KERNEL);
186 if (!page_buf) 186 if (!page_buf)
187 goto out_free_clusters; 187 goto out_free_cpu_groups;
188 188
189 err = 0; 189 err = 0;
190 nb_cluster = find_clusters(cpu_online_mask, clusters); 190 nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
191 191
192 /* 192 /*
193 * Of course the last CPU cannot be powered down and cpu_down() should 193 * Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
197 err += down_and_up_cpus(cpu_online_mask, offlined_cpus); 197 err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
198 198
199 /* 199 /*
200 * Take down CPUs by cluster this time. When the last CPU is turned 200 * Take down CPUs by cpu group this time. When the last CPU is turned
201 * off, the cluster itself should shut down. 201 * off, the cpu group itself should shut down.
202 */ 202 */
203 for (i = 0; i < nb_cluster; ++i) { 203 for (i = 0; i < nb_cpu_group; ++i) {
204 int cluster_id =
205 topology_physical_package_id(cpumask_any(clusters[i]));
206 ssize_t len = cpumap_print_to_pagebuf(true, page_buf, 204 ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
207 clusters[i]); 205 cpu_groups[i]);
208 /* Remove trailing newline. */ 206 /* Remove trailing newline. */
209 page_buf[len - 1] = '\0'; 207 page_buf[len - 1] = '\0';
210 pr_info("Trying to turn off and on again cluster %d " 208 pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
211 "(CPUs %s)\n", cluster_id, page_buf); 209 i, page_buf);
212 err += down_and_up_cpus(clusters[i], offlined_cpus); 210 err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
213 } 211 }
214 212
215 free_page((unsigned long)page_buf); 213 free_page((unsigned long)page_buf);
216out_free_clusters: 214out_free_cpu_groups:
217 kfree(clusters); 215 kfree(cpu_groups);
218out_free_cpus: 216out_free_cpus:
219 free_cpumask_var(offlined_cpus); 217 free_cpumask_var(offlined_cpus);
220 return err; 218 return err;