aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/Makefile1
-rw-r--r--arch/arc/kernel/ctx_sw.c11
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/head.S33
-rw-r--r--arch/arc/kernel/irq.c5
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/smp.c320
7 files changed, 378 insertions, 0 deletions
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index f32f65f98850..46c15ff97e97 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -13,6 +13,7 @@ obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o clk.o
13obj-y += devtree.o 13obj-y += devtree.o
14 14
15obj-$(CONFIG_MODULES) += arcksyms.o module.o 15obj-$(CONFIG_MODULES) += arcksyms.o module.o
16obj-$(CONFIG_SMP) += smp.o
16 17
17obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o 18obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
18CFLAGS_fpu.o += -mdpfp 19CFLAGS_fpu.o += -mdpfp
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index fbf739cbaf7d..60844dac6132 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -58,7 +58,18 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
58 * For SMP extra work to get to &_current_task[cpu] 58 * For SMP extra work to get to &_current_task[cpu]
59 * (open coded SET_CURR_TASK_ON_CPU) 59 * (open coded SET_CURR_TASK_ON_CPU)
60 */ 60 */
61#ifndef CONFIG_SMP
61 "st %2, [@_current_task] \n\t" 62 "st %2, [@_current_task] \n\t"
63#else
64 "lr r24, [identity] \n\t"
65 "lsr r24, r24, 8 \n\t"
66 "bmsk r24, r24, 7 \n\t"
67 "add2 r24, @_current_task, r24 \n\t"
68 "st %2, [r24] \n\t"
69#endif
70#ifdef CONFIG_ARC_CURR_IN_REG
71 "mov r25, %2 \n\t"
72#endif
62 73
63 /* get ksp of incoming task from tsk->thread.ksp */ 74 /* get ksp of incoming task from tsk->thread.ksp */
64 "ld.as sp, [%2, %1] \n\t" 75 "ld.as sp, [%2, %1] \n\t"
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index e33a0bf45589..3f6ce98fea11 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -232,7 +232,11 @@ ARC_EXIT handle_interrupt_level2
232ARC_ENTRY handle_interrupt_level1 232ARC_ENTRY handle_interrupt_level1
233 233
234 /* free up r9 as scratchpad */ 234 /* free up r9 as scratchpad */
235#ifdef CONFIG_SMP
236 sr r9, [ARC_REG_SCRATCH_DATA0]
237#else
235 st r9, [@int1_saved_reg] 238 st r9, [@int1_saved_reg]
239#endif
236 240
237 ;Which mode (user/kernel) was the system in when intr occured 241 ;Which mode (user/kernel) was the system in when intr occured
238 lr r9, [status32_l1] 242 lr r9, [status32_l1]
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index e63f6a43abb1..006dec3fc353 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -27,6 +27,15 @@ stext:
27 ; Don't clobber r0-r4 yet. It might have bootloader provided info 27 ; Don't clobber r0-r4 yet. It might have bootloader provided info
28 ;------------------------------------------------------------------- 28 ;-------------------------------------------------------------------
29 29
30#ifdef CONFIG_SMP
31 ; Only Boot (Master) proceeds. Others wait in platform dependent way
32 ; IDENTITY Reg [ 3 2 1 0 ]
33 ; (cpu-id) ^^^ => Zero for UP ARC700
34 ; => #Core-ID if SMP (Master 0)
35 GET_CPU_ID r5
36 cmp r5, 0
37 jnz arc_platform_smp_wait_to_boot
38#endif
30 ; Clear BSS before updating any globals 39 ; Clear BSS before updating any globals
31 ; XXX: use ZOL here 40 ; XXX: use ZOL here
32 mov r5, __bss_start 41 mov r5, __bss_start
@@ -76,3 +85,27 @@ stext:
76 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) 85 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
77 86
78 j start_kernel ; "C" entry point 87 j start_kernel ; "C" entry point
88
89#ifdef CONFIG_SMP
90;----------------------------------------------------------------
91; First lines of code run by secondary before jumping to 'C'
92;----------------------------------------------------------------
93 .section .init.text, "ax",@progbits
94 .type first_lines_of_secondary, @function
95 .globl first_lines_of_secondary
96
97first_lines_of_secondary:
98
99 ; setup per-cpu idle task as "current" on this CPU
100 ld r0, [@secondary_idle_tsk]
101 SET_CURR_TASK_ON_CPU r0, r1
102
103 ; setup stack (fp, sp)
104 mov fp, 0
105
106 ; set it's stack base to tsk->thread_info bottom
107 GET_TSK_STACK_BASE r0, sp
108
109 j start_kernel_secondary
110
111#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index ca70894e2309..df7da2b5a5bd 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -124,6 +124,11 @@ void __init init_IRQ(void)
124{ 124{
125 init_onchip_IRQ(); 125 init_onchip_IRQ();
126 plat_init_IRQ(); 126 plat_init_IRQ();
127
128#ifdef CONFIG_SMP
129 /* Master CPU can initialize it's side of IPI */
130 arc_platform_smp_init_cpu();
131#endif
127} 132}
128 133
129/* 134/*
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 27aebd6d9513..4026b5a004d2 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -86,6 +86,10 @@ void __init setup_arch(char **cmdline_p)
86 86
87 setup_processor(); 87 setup_processor();
88 88
89#ifdef CONFIG_SMP
90 smp_init_cpus();
91#endif
92
89 setup_arch_memory(); 93 setup_arch_memory();
90 94
91 unflatten_device_tree(); 95 unflatten_device_tree();
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 000000000000..1f762ad6969b
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * RajeshwarR: Dec 11, 2007
9 * -- Added support for Inter Processor Interrupts
10 *
11 * Vineetg: Nov 1st, 2007
12 * -- Initial Write (Borrowed heavily from ARM)
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/profile.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/mm.h>
24#include <linux/cpu.h>
25#include <linux/smp.h>
26#include <linux/irq.h>
27#include <linux/delay.h>
28#include <linux/atomic.h>
29#include <linux/percpu.h>
30#include <linux/cpumask.h>
31#include <linux/spinlock_types.h>
32#include <linux/reboot.h>
33#include <asm/processor.h>
34#include <asm/setup.h>
35
36arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
38
39/* XXX: per cpu ? Only needed once in early seconday boot */
40struct task_struct *secondary_idle_tsk;
41
42/* Called from start_kernel */
43void __init smp_prepare_boot_cpu(void)
44{
45}
46
47/*
48 * Initialise the CPU possible map early - this describes the CPUs
49 * which may be present or become present in the system.
50 */
51void __init smp_init_cpus(void)
52{
53 unsigned int i;
54
55 for (i = 0; i < NR_CPUS; i++)
56 set_cpu_possible(i, true);
57}
58
59/* called from init ( ) => process 1 */
60void __init smp_prepare_cpus(unsigned int max_cpus)
61{
62 int i;
63
64 /*
65 * Initialise the present map, which describes the set of CPUs
66 * actually populated at the present time.
67 */
68 for (i = 0; i < max_cpus; i++)
69 set_cpu_present(i, true);
70}
71
72void __init smp_cpus_done(unsigned int max_cpus)
73{
74
75}
76
77/*
78 * After power-up, a non Master CPU needs to wait for Master to kick start it
79 *
80 * The default implementation halts
81 *
82 * This relies on platform specific support allowing Master to directly set
83 * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
84 *
85 * In lack of such h/w assist, platforms can override this function
86 * - make this function busy-spin on a token, eventually set by Master
87 * (from arc_platform_smp_wakeup_cpu())
88 * - Once token is available, jump to @first_lines_of_secondary
89 * (using inline asm).
90 *
91 * Alert: can NOT use stack here as it has not been determined/setup for CPU.
92 * If it turns out to be elaborate, it's better to code it in assembly
93 *
94 */
95void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
96{
97 /*
98 * As a hack for debugging - since debugger will single-step over the
99 * FLAG insn - wrap the halt itself it in a self loop
100 */
101 __asm__ __volatile__(
102 "1: \n"
103 " flag 1 \n"
104 " b 1b \n");
105}
106
107/*
108 * The very first "C" code executed by secondary
109 * Called from asm stub in head.S
110 * "current"/R25 already setup by low level boot code
111 */
112void __cpuinit start_kernel_secondary(void)
113{
114 struct mm_struct *mm = &init_mm;
115 unsigned int cpu = smp_processor_id();
116
117 /* MMU, Caches, Vector Table, Interrupts etc */
118 setup_processor();
119
120 atomic_inc(&mm->mm_users);
121 atomic_inc(&mm->mm_count);
122 current->active_mm = mm;
123
124 notify_cpu_starting(cpu);
125 set_cpu_online(cpu, true);
126
127 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
128
129 arc_platform_smp_init_cpu();
130
131 arc_local_timer_setup(cpu);
132
133 local_irq_enable();
134 preempt_disable();
135 cpu_idle();
136}
137
138/*
139 * Called from kernel_init( ) -> smp_init( ) - for each CPU
140 *
141 * At this point, Secondary Processor is "HALT"ed:
142 * -It booted, but was halted in head.S
143 * -It was configured to halt-on-reset
144 * So need to wake it up.
145 *
146 * Essential requirements being where to run from (PC) and stack (SP)
147*/
148int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
149{
150 unsigned long wait_till;
151
152 secondary_idle_tsk = idle;
153
154 pr_info("Idle Task [%d] %p", cpu, idle);
155 pr_info("Trying to bring up CPU%u ...\n", cpu);
156
157 arc_platform_smp_wakeup_cpu(cpu,
158 (unsigned long)first_lines_of_secondary);
159
160 /* wait for 1 sec after kicking the secondary */
161 wait_till = jiffies + HZ;
162 while (time_before(jiffies, wait_till)) {
163 if (cpu_online(cpu))
164 break;
165 }
166
167 if (!cpu_online(cpu)) {
168 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
169 return -1;
170 }
171
172 secondary_idle_tsk = NULL;
173
174 return 0;
175}
176
177/*
178 * not supported here
179 */
180int __init setup_profiling_timer(unsigned int multiplier)
181{
182 return -EINVAL;
183}
184
185/*****************************************************************************/
186/* Inter Processor Interrupt Handling */
187/*****************************************************************************/
188
189/*
190 * structures for inter-processor calls
191 * A Collection of single bit ipi messages
192 *
193 */
194
195/*
196 * TODO_rajesh investigate tlb message types.
197 * IPI Timer not needed because each ARC has an individual Interrupting Timer
198 */
199enum ipi_msg_type {
200 IPI_NOP = 0,
201 IPI_RESCHEDULE = 1,
202 IPI_CALL_FUNC,
203 IPI_CALL_FUNC_SINGLE,
204 IPI_CPU_STOP
205};
206
207struct ipi_data {
208 unsigned long bits;
209};
210
211static DEFINE_PER_CPU(struct ipi_data, ipi_data);
212
213static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
214{
215 unsigned long flags;
216 unsigned int cpu;
217
218 local_irq_save(flags);
219
220 for_each_cpu(cpu, callmap) {
221 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
222 set_bit(msg, &ipi->bits);
223 }
224
225 /* Call the platform specific cross-CPU call function */
226 arc_platform_ipi_send(callmap);
227
228 local_irq_restore(flags);
229}
230
231void smp_send_reschedule(int cpu)
232{
233 ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
234}
235
236void smp_send_stop(void)
237{
238 struct cpumask targets;
239 cpumask_copy(&targets, cpu_online_mask);
240 cpumask_clear_cpu(smp_processor_id(), &targets);
241 ipi_send_msg(&targets, IPI_CPU_STOP);
242}
243
244void arch_send_call_function_single_ipi(int cpu)
245{
246 ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
247}
248
249void arch_send_call_function_ipi_mask(const struct cpumask *mask)
250{
251 ipi_send_msg(mask, IPI_CALL_FUNC);
252}
253
254/*
255 * ipi_cpu_stop - handle IPI from smp_send_stop()
256 */
257static void ipi_cpu_stop(unsigned int cpu)
258{
259 machine_halt();
260}
261
262static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
263{
264 unsigned long msg = 0;
265
266 do {
267 msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
268
269 switch (msg) {
270 case IPI_RESCHEDULE:
271 scheduler_ipi();
272 break;
273
274 case IPI_CALL_FUNC:
275 generic_smp_call_function_interrupt();
276 break;
277
278 case IPI_CALL_FUNC_SINGLE:
279 generic_smp_call_function_single_interrupt();
280 break;
281
282 case IPI_CPU_STOP:
283 ipi_cpu_stop(cpu);
284 break;
285 }
286 } while (msg < BITS_PER_LONG);
287
288}
289
290/*
291 * arch-common ISR to handle for inter-processor interrupts
292 * Has hooks for platform specific IPI
293 */
294irqreturn_t do_IPI(int irq, void *dev_id)
295{
296 int cpu = smp_processor_id();
297 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
298 unsigned long ops;
299
300 arc_platform_ipi_clear(cpu, irq);
301
302 /*
303 * XXX: is this loop really needed
304 * And do we need to move ipi_clean inside
305 */
306 while ((ops = xchg(&ipi->bits, 0)) != 0)
307 __do_IPI(&ops, ipi, cpu);
308
309 return IRQ_HANDLED;
310}
311
312/*
313 * API called by platform code to hookup arch-common ISR to their IPI IRQ
314 */
315static DEFINE_PER_CPU(int, ipi_dev);
316int smp_ipi_irq_setup(int cpu, int irq)
317{
318 int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
319 return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
320}