diff options
Diffstat (limited to 'arch/cris/arch-v32/kernel/smp.c')
-rw-r--r-- | arch/cris/arch-v32/kernel/smp.c | 348 |
1 files changed, 348 insertions, 0 deletions
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c new file mode 100644 index 000000000000..2c5cae04a95c --- /dev/null +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -0,0 +1,348 @@ | |||
1 | #include <asm/delay.h> | ||
2 | #include <asm/arch/irq.h> | ||
3 | #include <asm/arch/hwregs/intr_vect.h> | ||
4 | #include <asm/arch/hwregs/intr_vect_defs.h> | ||
5 | #include <asm/tlbflush.h> | ||
6 | #include <asm/mmu_context.h> | ||
7 | #include <asm/arch/hwregs/mmu_defs_asm.h> | ||
8 | #include <asm/arch/hwregs/supp_reg.h> | ||
9 | #include <asm/atomic.h> | ||
10 | |||
11 | #include <linux/err.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/timex.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/cpumask.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | |||
19 | #define IPI_SCHEDULE 1 | ||
20 | #define IPI_CALL 2 | ||
21 | #define IPI_FLUSH_TLB 4 | ||
22 | |||
23 | #define FLUSH_ALL (void*)0xffffffff | ||
24 | |||
25 | /* Vector of locks used for various atomic operations */ | ||
26 | spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; | ||
27 | |||
28 | /* CPU masks */ | ||
29 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
30 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | ||
31 | |||
32 | /* Variables used during SMP boot */ | ||
33 | volatile int cpu_now_booting = 0; | ||
34 | volatile struct thread_info *smp_init_current_idle_thread; | ||
35 | |||
36 | /* Variables used during IPI */ | ||
37 | static DEFINE_SPINLOCK(call_lock); | ||
38 | static DEFINE_SPINLOCK(tlbstate_lock); | ||
39 | |||
40 | struct call_data_struct { | ||
41 | void (*func) (void *info); | ||
42 | void *info; | ||
43 | int wait; | ||
44 | }; | ||
45 | |||
46 | static struct call_data_struct * call_data; | ||
47 | |||
48 | static struct mm_struct* flush_mm; | ||
49 | static struct vm_area_struct* flush_vma; | ||
50 | static unsigned long flush_addr; | ||
51 | |||
52 | extern int setup_irq(int, struct irqaction *); | ||
53 | |||
54 | /* Mode registers */ | ||
55 | static unsigned long irq_regs[NR_CPUS] = | ||
56 | { | ||
57 | regi_irq, | ||
58 | regi_irq2 | ||
59 | }; | ||
60 | |||
61 | static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs); | ||
62 | static int send_ipi(int vector, int wait, cpumask_t cpu_mask); | ||
63 | static struct irqaction irq_ipi = { crisv32_ipi_interrupt, SA_INTERRUPT, | ||
64 | CPU_MASK_NONE, "ipi", NULL, NULL}; | ||
65 | |||
66 | extern void cris_mmu_init(void); | ||
67 | extern void cris_timer_init(void); | ||
68 | |||
69 | /* SMP initialization */ | ||
70 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
71 | { | ||
72 | int i; | ||
73 | |||
74 | /* From now on we can expect IPIs so set them up */ | ||
75 | setup_irq(IPI_INTR_VECT, &irq_ipi); | ||
76 | |||
77 | /* Mark all possible CPUs as present */ | ||
78 | for (i = 0; i < max_cpus; i++) | ||
79 | cpu_set(i, phys_cpu_present_map); | ||
80 | } | ||
81 | |||
82 | void __devinit smp_prepare_boot_cpu(void) | ||
83 | { | ||
84 | /* PGD pointer has moved after per_cpu initialization so | ||
85 | * update the MMU. | ||
86 | */ | ||
87 | pgd_t **pgd; | ||
88 | pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); | ||
89 | |||
90 | SUPP_BANK_SEL(1); | ||
91 | SUPP_REG_WR(RW_MM_TLB_PGD, pgd); | ||
92 | SUPP_BANK_SEL(2); | ||
93 | SUPP_REG_WR(RW_MM_TLB_PGD, pgd); | ||
94 | |||
95 | cpu_set(0, cpu_online_map); | ||
96 | cpu_set(0, phys_cpu_present_map); | ||
97 | } | ||
98 | |||
99 | void __init smp_cpus_done(unsigned int max_cpus) | ||
100 | { | ||
101 | } | ||
102 | |||
103 | /* Bring one cpu online.*/ | ||
104 | static int __init | ||
105 | smp_boot_one_cpu(int cpuid) | ||
106 | { | ||
107 | unsigned timeout; | ||
108 | struct task_struct *idle; | ||
109 | |||
110 | idle = fork_idle(cpuid); | ||
111 | if (IS_ERR(idle)) | ||
112 | panic("SMP: fork failed for CPU:%d", cpuid); | ||
113 | |||
114 | idle->thread_info->cpu = cpuid; | ||
115 | |||
116 | /* Information to the CPU that is about to boot */ | ||
117 | smp_init_current_idle_thread = idle->thread_info; | ||
118 | cpu_now_booting = cpuid; | ||
119 | |||
120 | /* Wait for CPU to come online */ | ||
121 | for (timeout = 0; timeout < 10000; timeout++) { | ||
122 | if(cpu_online(cpuid)) { | ||
123 | cpu_now_booting = 0; | ||
124 | smp_init_current_idle_thread = NULL; | ||
125 | return 0; /* CPU online */ | ||
126 | } | ||
127 | udelay(100); | ||
128 | barrier(); | ||
129 | } | ||
130 | |||
131 | put_task_struct(idle); | ||
132 | idle = NULL; | ||
133 | |||
134 | printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); | ||
135 | return -1; | ||
136 | } | ||
137 | |||
138 | /* Secondary CPUs starts uing C here. Here we need to setup CPU | ||
139 | * specific stuff such as the local timer and the MMU. */ | ||
140 | void __init smp_callin(void) | ||
141 | { | ||
142 | extern void cpu_idle(void); | ||
143 | |||
144 | int cpu = cpu_now_booting; | ||
145 | reg_intr_vect_rw_mask vect_mask = {0}; | ||
146 | |||
147 | /* Initialise the idle task for this CPU */ | ||
148 | atomic_inc(&init_mm.mm_count); | ||
149 | current->active_mm = &init_mm; | ||
150 | |||
151 | /* Set up MMU */ | ||
152 | cris_mmu_init(); | ||
153 | __flush_tlb_all(); | ||
154 | |||
155 | /* Setup local timer. */ | ||
156 | cris_timer_init(); | ||
157 | |||
158 | /* Enable IRQ and idle */ | ||
159 | REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); | ||
160 | unmask_irq(IPI_INTR_VECT); | ||
161 | unmask_irq(TIMER_INTR_VECT); | ||
162 | local_irq_enable(); | ||
163 | |||
164 | cpu_set(cpu, cpu_online_map); | ||
165 | cpu_idle(); | ||
166 | } | ||
167 | |||
168 | /* Stop execution on this CPU.*/ | ||
169 | void stop_this_cpu(void* dummy) | ||
170 | { | ||
171 | local_irq_disable(); | ||
172 | asm volatile("halt"); | ||
173 | } | ||
174 | |||
175 | /* Other calls */ | ||
176 | void smp_send_stop(void) | ||
177 | { | ||
178 | smp_call_function(stop_this_cpu, NULL, 1, 0); | ||
179 | } | ||
180 | |||
181 | int setup_profiling_timer(unsigned int multiplier) | ||
182 | { | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | |||
186 | |||
187 | /* cache_decay_ticks is used by the scheduler to decide if a process | ||
188 | * is "hot" on one CPU. A higher value means a higher penalty to move | ||
189 | * a process to another CPU. Our cache is rather small so we report | ||
190 | * 1 tick. | ||
191 | */ | ||
192 | unsigned long cache_decay_ticks = 1; | ||
193 | |||
194 | int __devinit __cpu_up(unsigned int cpu) | ||
195 | { | ||
196 | smp_boot_one_cpu(cpu); | ||
197 | return cpu_online(cpu) ? 0 : -ENOSYS; | ||
198 | } | ||
199 | |||
200 | void smp_send_reschedule(int cpu) | ||
201 | { | ||
202 | cpumask_t cpu_mask = CPU_MASK_NONE; | ||
203 | cpu_set(cpu, cpu_mask); | ||
204 | send_ipi(IPI_SCHEDULE, 0, cpu_mask); | ||
205 | } | ||
206 | |||
207 | /* TLB flushing | ||
208 | * | ||
209 | * Flush needs to be done on the local CPU and on any other CPU that | ||
210 | * may have the same mapping. The mm->cpu_vm_mask is used to keep track | ||
211 | * of which CPUs that a specific process has been executed on. | ||
212 | */ | ||
213 | void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr) | ||
214 | { | ||
215 | unsigned long flags; | ||
216 | cpumask_t cpu_mask; | ||
217 | |||
218 | spin_lock_irqsave(&tlbstate_lock, flags); | ||
219 | cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask); | ||
220 | cpu_clear(smp_processor_id(), cpu_mask); | ||
221 | flush_mm = mm; | ||
222 | flush_vma = vma; | ||
223 | flush_addr = addr; | ||
224 | send_ipi(IPI_FLUSH_TLB, 1, cpu_mask); | ||
225 | spin_unlock_irqrestore(&tlbstate_lock, flags); | ||
226 | } | ||
227 | |||
228 | void flush_tlb_all(void) | ||
229 | { | ||
230 | __flush_tlb_all(); | ||
231 | flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0); | ||
232 | } | ||
233 | |||
234 | void flush_tlb_mm(struct mm_struct *mm) | ||
235 | { | ||
236 | __flush_tlb_mm(mm); | ||
237 | flush_tlb_common(mm, FLUSH_ALL, 0); | ||
238 | /* No more mappings in other CPUs */ | ||
239 | cpus_clear(mm->cpu_vm_mask); | ||
240 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | ||
241 | } | ||
242 | |||
243 | void flush_tlb_page(struct vm_area_struct *vma, | ||
244 | unsigned long addr) | ||
245 | { | ||
246 | __flush_tlb_page(vma, addr); | ||
247 | flush_tlb_common(vma->vm_mm, vma, addr); | ||
248 | } | ||
249 | |||
250 | /* Inter processor interrupts | ||
251 | * | ||
252 | * The IPIs are used for: | ||
253 | * * Force a schedule on a CPU | ||
254 | * * FLush TLB on other CPUs | ||
255 | * * Call a function on other CPUs | ||
256 | */ | ||
257 | |||
258 | int send_ipi(int vector, int wait, cpumask_t cpu_mask) | ||
259 | { | ||
260 | int i = 0; | ||
261 | reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); | ||
262 | int ret = 0; | ||
263 | |||
264 | /* Calculate CPUs to send to. */ | ||
265 | cpus_and(cpu_mask, cpu_mask, cpu_online_map); | ||
266 | |||
267 | /* Send the IPI. */ | ||
268 | for_each_cpu_mask(i, cpu_mask) | ||
269 | { | ||
270 | ipi.vector |= vector; | ||
271 | REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); | ||
272 | } | ||
273 | |||
274 | /* Wait for IPI to finish on other CPUS */ | ||
275 | if (wait) { | ||
276 | for_each_cpu_mask(i, cpu_mask) { | ||
277 | int j; | ||
278 | for (j = 0 ; j < 1000; j++) { | ||
279 | ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); | ||
280 | if (!ipi.vector) | ||
281 | break; | ||
282 | udelay(100); | ||
283 | } | ||
284 | |||
285 | /* Timeout? */ | ||
286 | if (ipi.vector) { | ||
287 | printk("SMP call timeout from %d to %d\n", smp_processor_id(), i); | ||
288 | ret = -ETIMEDOUT; | ||
289 | dump_stack(); | ||
290 | } | ||
291 | } | ||
292 | } | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * You must not call this function with disabled interrupts or from a | ||
298 | * hardware interrupt handler or from a bottom half handler. | ||
299 | */ | ||
300 | int smp_call_function(void (*func)(void *info), void *info, | ||
301 | int nonatomic, int wait) | ||
302 | { | ||
303 | cpumask_t cpu_mask = CPU_MASK_ALL; | ||
304 | struct call_data_struct data; | ||
305 | int ret; | ||
306 | |||
307 | cpu_clear(smp_processor_id(), cpu_mask); | ||
308 | |||
309 | WARN_ON(irqs_disabled()); | ||
310 | |||
311 | data.func = func; | ||
312 | data.info = info; | ||
313 | data.wait = wait; | ||
314 | |||
315 | spin_lock(&call_lock); | ||
316 | call_data = &data; | ||
317 | ret = send_ipi(IPI_CALL, wait, cpu_mask); | ||
318 | spin_unlock(&call_lock); | ||
319 | |||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
324 | { | ||
325 | void (*func) (void *info) = call_data->func; | ||
326 | void *info = call_data->info; | ||
327 | reg_intr_vect_rw_ipi ipi; | ||
328 | |||
329 | ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); | ||
330 | |||
331 | if (ipi.vector & IPI_CALL) { | ||
332 | func(info); | ||
333 | } | ||
334 | if (ipi.vector & IPI_FLUSH_TLB) { | ||
335 | if (flush_mm == FLUSH_ALL) | ||
336 | __flush_tlb_all(); | ||
337 | else if (flush_vma == FLUSH_ALL) | ||
338 | __flush_tlb_mm(flush_mm); | ||
339 | else | ||
340 | __flush_tlb_page(flush_vma, flush_addr); | ||
341 | } | ||
342 | |||
343 | ipi.vector = 0; | ||
344 | REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi); | ||
345 | |||
346 | return IRQ_HANDLED; | ||
347 | } | ||
348 | |||