aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/cputhreads.h15
-rw-r--r--arch/powerpc/kernel/smp.c19
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
3 files changed, 31 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e18447c62b..f71bb4c118b4 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -61,22 +61,25 @@ static inline cpumask_t cpu_online_cores_map(void)
61 return cpu_thread_mask_to_cores(cpu_online_map); 61 return cpu_thread_mask_to_cores(cpu_online_map);
62} 62}
63 63
64static inline int cpu_thread_to_core(int cpu) 64#ifdef CONFIG_SMP
65{ 65int cpu_core_index_of_thread(int cpu);
66 return cpu >> threads_shift; 66int cpu_first_thread_of_core(int core);
67} 67#else
68static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
69static inline int cpu_first_thread_of_core(int core) { return core; }
70#endif
68 71
69static inline int cpu_thread_in_core(int cpu) 72static inline int cpu_thread_in_core(int cpu)
70{ 73{
71 return cpu & (threads_per_core - 1); 74 return cpu & (threads_per_core - 1);
72} 75}
73 76
74static inline int cpu_first_thread_in_core(int cpu) 77static inline int cpu_first_thread_sibling(int cpu)
75{ 78{
76 return cpu & ~(threads_per_core - 1); 79 return cpu & ~(threads_per_core - 1);
77} 80}
78 81
79static inline int cpu_last_thread_in_core(int cpu) 82static inline int cpu_last_thread_sibling(int cpu)
80{ 83{
81 return cpu | (threads_per_core - 1); 84 return cpu | (threads_per_core - 1);
82} 85}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 68034bbf2e4f..981360509172 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -466,7 +466,20 @@ out:
466 return id; 466 return id;
467} 467}
468 468
469/* Must be called when no change can occur to cpu_present_mask, 469/* Helper routines for cpu to core mapping */
470int cpu_core_index_of_thread(int cpu)
471{
472 return cpu >> threads_shift;
473}
474EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
475
476int cpu_first_thread_of_core(int core)
477{
478 return core << threads_shift;
479}
480EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
481
482/* Must be called when no change can occur to cpu_present_map,
470 * i.e. during cpu online or offline. 483 * i.e. during cpu online or offline.
471 */ 484 */
472static struct device_node *cpu_to_l2cache(int cpu) 485static struct device_node *cpu_to_l2cache(int cpu)
@@ -514,7 +527,7 @@ int __devinit start_secondary(void *unused)
514 notify_cpu_starting(cpu); 527 notify_cpu_starting(cpu);
515 set_cpu_online(cpu, true); 528 set_cpu_online(cpu, true);
516 /* Update sibling maps */ 529 /* Update sibling maps */
517 base = cpu_first_thread_in_core(cpu); 530 base = cpu_first_thread_sibling(cpu);
518 for (i = 0; i < threads_per_core; i++) { 531 for (i = 0; i < threads_per_core; i++) {
519 if (cpu_is_offline(base + i)) 532 if (cpu_is_offline(base + i))
520 continue; 533 continue;
@@ -600,7 +613,7 @@ int __cpu_disable(void)
600 return err; 613 return err;
601 614
602 /* Update sibling maps */ 615 /* Update sibling maps */
603 base = cpu_first_thread_in_core(cpu); 616 base = cpu_first_thread_sibling(cpu);
604 for (i = 0; i < threads_per_core; i++) { 617 for (i = 0; i < threads_per_core; i++) {
605 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 618 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
606 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 619 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5ce99848d91e..c0aab52da3a5 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -111,8 +111,8 @@ static unsigned int steal_context_smp(unsigned int id)
111 * a core map instead but this will do for now. 111 * a core map instead but this will do for now.
112 */ 112 */
113 for_each_cpu(cpu, mm_cpumask(mm)) { 113 for_each_cpu(cpu, mm_cpumask(mm)) {
114 for (i = cpu_first_thread_in_core(cpu); 114 for (i = cpu_first_thread_sibling(cpu);
115 i <= cpu_last_thread_in_core(cpu); i++) 115 i <= cpu_last_thread_sibling(cpu); i++)
116 __set_bit(id, stale_map[i]); 116 __set_bit(id, stale_map[i]);
117 cpu = i - 1; 117 cpu = i - 1;
118 } 118 }
@@ -264,14 +264,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
264 */ 264 */
265 if (test_bit(id, stale_map[cpu])) { 265 if (test_bit(id, stale_map[cpu])) {
266 pr_hardcont(" | stale flush %d [%d..%d]", 266 pr_hardcont(" | stale flush %d [%d..%d]",
267 id, cpu_first_thread_in_core(cpu), 267 id, cpu_first_thread_sibling(cpu),
268 cpu_last_thread_in_core(cpu)); 268 cpu_last_thread_sibling(cpu));
269 269
270 local_flush_tlb_mm(next); 270 local_flush_tlb_mm(next);
271 271
272 /* XXX This clear should ultimately be part of local_flush_tlb_mm */ 272 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
273 for (i = cpu_first_thread_in_core(cpu); 273 for (i = cpu_first_thread_sibling(cpu);
274 i <= cpu_last_thread_in_core(cpu); i++) { 274 i <= cpu_last_thread_sibling(cpu); i++) {
275 __clear_bit(id, stale_map[i]); 275 __clear_bit(id, stale_map[i]);
276 } 276 }
277 } 277 }