summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-06-24 14:43:15 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-07-18 07:29:02 -0400
commitaf040ffc9ba1e079ee4c0748aff64fa3d4716fa5 (patch)
tree03d58af8c3d16a777ee93e35d9f7aef15293a163
parentee2593ef5680a8646c6465ce998fea30b5af9b2b (diff)
ARM: make it easier to check the CPU part number correctly
Ensure that platform maintainers check the CPU part number in the right manner: the CPU part number is meaningless without also checking the CPU implement(e|o)r (choose your preferred spelling!) Provide an interface which returns both the implementer and part number together, and update the definitions to include the implementer. Mark the old function as being deprecated... indeed, using the old function with the definitions will now always evaluate as false, so people must update their un-merged code to the new function. While this could be avoided by adding new definitions, we'd also have to create new names for them which would be awkward. Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/cputype.h37
-rw-r--r--arch/arm/include/asm/smp_scu.h2
-rw-r--r--arch/arm/kernel/perf_event_cpu.c55
-rw-r--r--arch/arm/kvm/guest.c8
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c4
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-exynos/pm.c11
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c4
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--drivers/clocksource/arm_global_timer.c2
10 files changed, 66 insertions, 63 deletions
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c2b7321a478..963a2515906d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -62,17 +62,18 @@
62#define ARM_CPU_IMP_ARM 0x41 62#define ARM_CPU_IMP_ARM 0x41
63#define ARM_CPU_IMP_INTEL 0x69 63#define ARM_CPU_IMP_INTEL 0x69
64 64
65#define ARM_CPU_PART_ARM1136 0xB360 65/* ARM implemented processors */
66#define ARM_CPU_PART_ARM1156 0xB560 66#define ARM_CPU_PART_ARM1136 0x4100b360
67#define ARM_CPU_PART_ARM1176 0xB760 67#define ARM_CPU_PART_ARM1156 0x4100b560
68#define ARM_CPU_PART_ARM11MPCORE 0xB020 68#define ARM_CPU_PART_ARM1176 0x4100b760
69#define ARM_CPU_PART_CORTEX_A8 0xC080 69#define ARM_CPU_PART_ARM11MPCORE 0x4100b020
70#define ARM_CPU_PART_CORTEX_A9 0xC090 70#define ARM_CPU_PART_CORTEX_A8 0x4100c080
71#define ARM_CPU_PART_CORTEX_A5 0xC050 71#define ARM_CPU_PART_CORTEX_A9 0x4100c090
72#define ARM_CPU_PART_CORTEX_A15 0xC0F0 72#define ARM_CPU_PART_CORTEX_A5 0x4100c050
73#define ARM_CPU_PART_CORTEX_A7 0xC070 73#define ARM_CPU_PART_CORTEX_A7 0x4100c070
74#define ARM_CPU_PART_CORTEX_A12 0xC0D0 74#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
75#define ARM_CPU_PART_CORTEX_A17 0xC0E0 75#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
76#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
76 77
77#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 78#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
78#define ARM_CPU_XSCALE_ARCH_V1 0x2000 79#define ARM_CPU_XSCALE_ARCH_V1 0x2000
@@ -171,14 +172,24 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
171 return (read_cpuid_id() & 0xFF000000) >> 24; 172 return (read_cpuid_id() & 0xFF000000) >> 24;
172} 173}
173 174
174static inline unsigned int __attribute_const__ read_cpuid_part_number(void) 175/*
176 * The CPU part number is meaningless without referring to the CPU
177 * implementer: implementers are free to define their own part numbers
178 * which are permitted to clash with other implementer part numbers.
179 */
180static inline unsigned int __attribute_const__ read_cpuid_part(void)
181{
182 return read_cpuid_id() & 0xff00fff0;
183}
184
185static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
175{ 186{
176 return read_cpuid_id() & 0xFFF0; 187 return read_cpuid_id() & 0xFFF0;
177} 188}
178 189
179static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) 190static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
180{ 191{
181 return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; 192 return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
182} 193}
183 194
184static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) 195static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 0393fbab8dd5..bfe163c40024 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -11,7 +11,7 @@
11 11
12static inline bool scu_a9_has_base(void) 12static inline bool scu_a9_has_base(void)
13{ 13{
14 return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 14 return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
15} 15}
16 16
17static inline unsigned long scu_a9_get_base(void) 17static inline unsigned long scu_a9_get_base(void)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index af9e35e8836f..c02c2e8c877d 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -250,40 +250,39 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
250static int probe_current_pmu(struct arm_pmu *pmu) 250static int probe_current_pmu(struct arm_pmu *pmu)
251{ 251{
252 int cpu = get_cpu(); 252 int cpu = get_cpu();
253 unsigned long implementor = read_cpuid_implementor();
254 unsigned long part_number = read_cpuid_part_number();
255 int ret = -ENODEV; 253 int ret = -ENODEV;
256 254
257 pr_info("probing PMU on CPU %d\n", cpu); 255 pr_info("probing PMU on CPU %d\n", cpu);
258 256
257 switch (read_cpuid_part()) {
259 /* ARM Ltd CPUs. */ 258 /* ARM Ltd CPUs. */
260 if (implementor == ARM_CPU_IMP_ARM) { 259 case ARM_CPU_PART_ARM1136:
261 switch (part_number) { 260 case ARM_CPU_PART_ARM1156:
262 case ARM_CPU_PART_ARM1136: 261 case ARM_CPU_PART_ARM1176:
263 case ARM_CPU_PART_ARM1156: 262 ret = armv6pmu_init(pmu);
264 case ARM_CPU_PART_ARM1176: 263 break;
265 ret = armv6pmu_init(pmu); 264 case ARM_CPU_PART_ARM11MPCORE:
266 break; 265 ret = armv6mpcore_pmu_init(pmu);
267 case ARM_CPU_PART_ARM11MPCORE: 266 break;
268 ret = armv6mpcore_pmu_init(pmu); 267 case ARM_CPU_PART_CORTEX_A8:
269 break; 268 ret = armv7_a8_pmu_init(pmu);
270 case ARM_CPU_PART_CORTEX_A8: 269 break;
271 ret = armv7_a8_pmu_init(pmu); 270 case ARM_CPU_PART_CORTEX_A9:
272 break; 271 ret = armv7_a9_pmu_init(pmu);
273 case ARM_CPU_PART_CORTEX_A9: 272 break;
274 ret = armv7_a9_pmu_init(pmu); 273
275 break; 274 default:
276 } 275 if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
277 /* Intel CPUs [xscale]. */ 276 switch (xscale_cpu_arch_version()) {
278 } else if (implementor == ARM_CPU_IMP_INTEL) { 277 case ARM_CPU_XSCALE_ARCH_V1:
279 switch (xscale_cpu_arch_version()) { 278 ret = xscale1pmu_init(pmu);
280 case ARM_CPU_XSCALE_ARCH_V1: 279 break;
281 ret = xscale1pmu_init(pmu); 280 case ARM_CPU_XSCALE_ARCH_V2:
282 break; 281 ret = xscale2pmu_init(pmu);
283 case ARM_CPU_XSCALE_ARCH_V2: 282 break;
284 ret = xscale2pmu_init(pmu); 283 }
285 break;
286 } 284 }
285 break;
287 } 286 }
288 287
289 put_cpu(); 288 put_cpu();
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index b23a59c1c522..70bf49b8b244 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -274,13 +274,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
274 274
275int __attribute_const__ kvm_target_cpu(void) 275int __attribute_const__ kvm_target_cpu(void)
276{ 276{
277 unsigned long implementor = read_cpuid_implementor(); 277 switch (read_cpuid_part()) {
278 unsigned long part_number = read_cpuid_part_number();
279
280 if (implementor != ARM_CPU_IMP_ARM)
281 return -EINVAL;
282
283 switch (part_number) {
284 case ARM_CPU_PART_CORTEX_A7: 278 case ARM_CPU_PART_CORTEX_A7:
285 return KVM_ARM_TARGET_CORTEX_A7; 279 return KVM_ARM_TARGET_CORTEX_A7;
286 case ARM_CPU_PART_CORTEX_A15: 280 case ARM_CPU_PART_CORTEX_A15:
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 0d95bc8e49d8..a96b78f93f2b 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -196,7 +196,7 @@ static void exynos_power_down(void)
196 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 196 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
197 arch_spin_unlock(&exynos_mcpm_lock); 197 arch_spin_unlock(&exynos_mcpm_lock);
198 198
199 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 199 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
200 /* 200 /*
201 * On the Cortex-A15 we need to disable 201 * On the Cortex-A15 we need to disable
202 * L2 prefetching before flushing the cache. 202 * L2 prefetching before flushing the cache.
@@ -291,7 +291,7 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
291 291
292static void __init exynos_cache_off(void) 292static void __init exynos_cache_off(void)
293{ 293{
294 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 294 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
295 /* disable L2 prefetching on the Cortex-A15 */ 295 /* disable L2 prefetching on the Cortex-A15 */
296 asm volatile( 296 asm volatile(
297 "mcr p15, 1, %0, c15, c0, 3\n\t" 297 "mcr p15, 1, %0, c15, c0, 3\n\t"
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..8dc1d3a3a8bf 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -188,7 +188,7 @@ static void __init exynos_smp_init_cpus(void)
188 void __iomem *scu_base = scu_base_addr(); 188 void __iomem *scu_base = scu_base_addr();
189 unsigned int i, ncores; 189 unsigned int i, ncores;
190 190
191 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 191 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
192 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 192 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
193 else 193 else
194 /* 194 /*
@@ -214,7 +214,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
214 214
215 exynos_sysram_init(); 215 exynos_sysram_init();
216 216
217 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 217 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
218 scu_enable(scu_base_addr()); 218 scu_enable(scu_base_addr());
219 219
220 /* 220 /*
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 202ca73e49c4..67d383de614f 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); 300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); 301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
302 302
303 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 303 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
304 exynos_cpu_save_register(); 304 exynos_cpu_save_register();
305 305
306 return 0; 306 return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
334 if (exynos_pm_central_resume()) 334 if (exynos_pm_central_resume())
335 goto early_wakeup; 335 goto early_wakeup;
336 336
337 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 337 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
338 exynos_cpu_restore_register(); 338 exynos_cpu_restore_register();
339 339
340 /* For release retention */ 340 /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
353 353
354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
355 355
356 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 356 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
357 scu_enable(S5P_VA_SCU); 357 scu_enable(S5P_VA_SCU);
358 358
359early_wakeup: 359early_wakeup:
@@ -440,15 +440,14 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
440 case CPU_PM_ENTER: 440 case CPU_PM_ENTER:
441 if (cpu == 0) { 441 if (cpu == 0) {
442 exynos_pm_central_suspend(); 442 exynos_pm_central_suspend();
443 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 443 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
444 exynos_cpu_save_register(); 444 exynos_cpu_save_register();
445 } 445 }
446 break; 446 break;
447 447
448 case CPU_PM_EXIT: 448 case CPU_PM_EXIT:
449 if (cpu == 0) { 449 if (cpu == 0) {
450 if (read_cpuid_part_number() == 450 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
451 ARM_CPU_PART_CORTEX_A9) {
452 scu_enable(S5P_VA_SCU); 451 scu_enable(S5P_VA_SCU);
453 exynos_cpu_restore_register(); 452 exynos_cpu_restore_register();
454 } 453 }
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 54a9fff77c7d..2fb78b4648cb 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -152,7 +152,7 @@ static void tc2_pm_down(u64 residency)
152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
153 arch_spin_unlock(&tc2_pm_lock); 153 arch_spin_unlock(&tc2_pm_lock);
154 154
155 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 155 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
156 /* 156 /*
157 * On the Cortex-A15 we need to disable 157 * On the Cortex-A15 we need to disable
158 * L2 prefetching before flushing the cache. 158 * L2 prefetching before flushing the cache.
@@ -326,7 +326,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
326static void __init tc2_cache_off(void) 326static void __init tc2_cache_off(void)
327{ 327{
328 pr_info("TC2: disabling cache during MCPM loopback test\n"); 328 pr_info("TC2: disabling cache during MCPM loopback test\n");
329 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 329 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
330 /* disable L2 prefetching on the Cortex-A15 */ 330 /* disable L2 prefetching on the Cortex-A15 */
331 asm volatile( 331 asm volatile(
332 "mcr p15, 1, %0, c15, c0, 3 \n\t" 332 "mcr p15, 1, %0, c15, c0, 3 \n\t"
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 076172b69422..556df22e89bd 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -665,7 +665,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
666{ 666{
667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 668 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
669 669
670 if (rev >= L310_CACHE_ID_RTL_R2P0) { 670 if (rev >= L310_CACHE_ID_RTL_R2P0) {
671 if (cortex_a9) { 671 if (cortex_a9) {
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 60e5a170c4d2..e6833771a716 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -250,7 +250,7 @@ static void __init global_timer_of_register(struct device_node *np)
250 * fire when the timer value is greater than or equal to. In previous 250 * fire when the timer value is greater than or equal to. In previous
251 * revisions the comparators fired when the timer value was equal to. 251 * revisions the comparators fired when the timer value was equal to.
252 */ 252 */
253 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 253 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
254 && (read_cpuid_id() & 0xf0000f) < 0x200000) { 254 && (read_cpuid_id() & 0xf0000f) < 0x200000) {
255 pr_warn("global-timer: non support for this cpu version.\n"); 255 pr_warn("global-timer: non support for this cpu version.\n");
256 return; 256 return;