aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic/common/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic/common/smp.c')
-rw-r--r--arch/mips/netlogic/common/smp.c89
1 files changed, 51 insertions, 38 deletions
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index fab316de57e9..a080d9ee3cd7 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
59 59
60void nlm_send_ipi_single(int logical_cpu, unsigned int action) 60void nlm_send_ipi_single(int logical_cpu, unsigned int action)
61{ 61{
62 int cpu = cpu_logical_map(logical_cpu); 62 int cpu, node;
63 uint64_t picbase;
64
65 cpu = cpu_logical_map(logical_cpu);
66 node = cpu / NLM_CPUS_PER_NODE;
67 picbase = nlm_get_node(node)->picbase;
63 68
64 if (action & SMP_CALL_FUNCTION) 69 if (action & SMP_CALL_FUNCTION)
65 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0); 70 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
66 if (action & SMP_RESCHEDULE_YOURSELF) 71 if (action & SMP_RESCHEDULE_YOURSELF)
67 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 72 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
68} 73}
69 74
70void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) 75void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
96void nlm_early_init_secondary(int cpu) 101void nlm_early_init_secondary(int cpu)
97{ 102{
98 change_c0_config(CONF_CM_CMASK, 0x3); 103 change_c0_config(CONF_CM_CMASK, 0x3);
99 write_c0_ebase((uint32_t)nlm_common_ebase);
100#ifdef CONFIG_CPU_XLP 104#ifdef CONFIG_CPU_XLP
101 if (hard_smp_processor_id() % 4 == 0) 105 /* mmu init, once per core */
106 if (cpu % NLM_THREADS_PER_CORE == 0)
102 xlp_mmu_init(); 107 xlp_mmu_init();
103#endif 108#endif
109 write_c0_ebase(nlm_current_node()->ebase);
104} 110}
105 111
106/* 112/*
@@ -108,8 +114,12 @@ void nlm_early_init_secondary(int cpu)
108 */ 114 */
109static void __cpuinit nlm_init_secondary(void) 115static void __cpuinit nlm_init_secondary(void)
110{ 116{
111 current_cpu_data.core = hard_smp_processor_id() / 4; 117 int hwtid;
112 nlm_smp_irq_init(); 118
119 hwtid = hard_smp_processor_id();
120 current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
121 nlm_percpu_init(hwtid);
122 nlm_smp_irq_init(hwtid);
113} 123}
114 124
115void nlm_prepare_cpus(unsigned int max_cpus) 125void nlm_prepare_cpus(unsigned int max_cpus)
@@ -120,9 +130,6 @@ void nlm_prepare_cpus(unsigned int max_cpus)
120 130
121void nlm_smp_finish(void) 131void nlm_smp_finish(void)
122{ 132{
123#ifdef notyet
124 nlm_common_msgring_cpu_init();
125#endif
126 local_irq_enable(); 133 local_irq_enable();
127} 134}
128 135
@@ -142,27 +149,27 @@ cpumask_t phys_cpu_present_map;
142 149
143void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 150void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
144{ 151{
145 unsigned long gp = (unsigned long)task_thread_info(idle); 152 int cpu, node;
146 unsigned long sp = (unsigned long)__KSTK_TOS(idle);
147 int cpu = cpu_logical_map(logical_cpu);
148 153
149 nlm_next_sp = sp; 154 cpu = cpu_logical_map(logical_cpu);
150 nlm_next_gp = gp; 155 node = cpu / NLM_CPUS_PER_NODE;
156 nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
157 nlm_next_gp = (unsigned long)task_thread_info(idle);
151 158
152 /* barrier */ 159 /* barrier for sp/gp store above */
153 __sync(); 160 __sync();
154 nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1); 161 nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
155} 162}
156 163
157void __init nlm_smp_setup(void) 164void __init nlm_smp_setup(void)
158{ 165{
159 unsigned int boot_cpu; 166 unsigned int boot_cpu;
160 int num_cpus, i; 167 int num_cpus, i, ncore;
161 168
162 boot_cpu = hard_smp_processor_id(); 169 boot_cpu = hard_smp_processor_id();
163 cpus_clear(phys_cpu_present_map); 170 cpumask_clear(&phys_cpu_present_map);
164 171
165 cpu_set(boot_cpu, phys_cpu_present_map); 172 cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
166 __cpu_number_map[boot_cpu] = 0; 173 __cpu_number_map[boot_cpu] = 0;
167 __cpu_logical_map[0] = boot_cpu; 174 __cpu_logical_map[0] = boot_cpu;
168 set_cpu_possible(0, true); 175 set_cpu_possible(0, true);
@@ -174,7 +181,7 @@ void __init nlm_smp_setup(void)
174 * it is only set for ASPs (see smpboot.S) 181 * it is only set for ASPs (see smpboot.S)
175 */ 182 */
176 if (nlm_cpu_ready[i]) { 183 if (nlm_cpu_ready[i]) {
177 cpu_set(i, phys_cpu_present_map); 184 cpumask_set_cpu(i, &phys_cpu_present_map);
178 __cpu_number_map[i] = num_cpus; 185 __cpu_number_map[i] = num_cpus;
179 __cpu_logical_map[num_cpus] = i; 186 __cpu_logical_map[num_cpus] = i;
180 set_cpu_possible(num_cpus, true); 187 set_cpu_possible(num_cpus, true);
@@ -182,20 +189,28 @@ void __init nlm_smp_setup(void)
182 } 189 }
183 } 190 }
184 191
192 /* check with the cores we have worken up */
193 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
194 ncore += hweight32(nlm_get_node(i)->coremask);
195
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 196 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)phys_cpu_present_map.bits[0], 197 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]); 198 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 199
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 200 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
201 nlm_threads_per_core, num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 202 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
191} 203}
192 204
193static int nlm_parse_cpumask(u32 cpu_mask) 205static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
194{ 206{
195 uint32_t core0_thr_mask, core_thr_mask; 207 uint32_t core0_thr_mask, core_thr_mask;
196 int threadmode, i; 208 int threadmode, i, j;
197 209
198 core0_thr_mask = cpu_mask & 0xf; 210 core0_thr_mask = 0;
211 for (i = 0; i < NLM_THREADS_PER_CORE; i++)
212 if (cpumask_test_cpu(i, wakeup_mask))
213 core0_thr_mask |= (1 << i);
199 switch (core0_thr_mask) { 214 switch (core0_thr_mask) {
200 case 1: 215 case 1:
201 nlm_threads_per_core = 1; 216 nlm_threads_per_core = 1;
@@ -214,25 +229,23 @@ static int nlm_parse_cpumask(u32 cpu_mask)
214 } 229 }
215 230
216 /* Verify other cores CPU masks */ 231 /* Verify other cores CPU masks */
217 nlm_coremask = 1; 232 for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
218 nlm_cpumask = core0_thr_mask; 233 core_thr_mask = 0;
219 for (i = 1; i < 8; i++) { 234 for (j = 0; j < NLM_THREADS_PER_CORE; j++)
220 core_thr_mask = (cpu_mask >> (i * 4)) & 0xf; 235 if (cpumask_test_cpu(i + j, wakeup_mask))
221 if (core_thr_mask) { 236 core_thr_mask |= (1 << j);
222 if (core_thr_mask != core0_thr_mask) 237 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
223 goto unsupp; 238 goto unsupp;
224 nlm_coremask |= 1 << i;
225 nlm_cpumask |= core0_thr_mask << (4 * i);
226 }
227 } 239 }
228 return threadmode; 240 return threadmode;
229 241
230unsupp: 242unsupp:
231 panic("Unsupported CPU mask %x\n", cpu_mask); 243 panic("Unsupported CPU mask %lx\n",
244 (unsigned long)cpumask_bits(wakeup_mask)[0]);
232 return 0; 245 return 0;
233} 246}
234 247
235int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask) 248int __cpuinit nlm_wakeup_secondary_cpus(void)
236{ 249{
237 unsigned long reset_vec; 250 unsigned long reset_vec;
238 char *reset_data; 251 char *reset_data;
@@ -244,7 +257,7 @@ int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
244 (nlm_reset_entry_end - nlm_reset_entry)); 257 (nlm_reset_entry_end - nlm_reset_entry));
245 258
246 /* verify the mask and setup core config variables */ 259 /* verify the mask and setup core config variables */
247 threadmode = nlm_parse_cpumask(wakeup_mask); 260 threadmode = nlm_parse_cpumask(&nlm_cpumask);
248 261
249 /* Setup CPU init parameters */ 262 /* Setup CPU init parameters */
250 reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS); 263 reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);