diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-01-18 04:42:23 -0500 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-02-15 12:46:02 -0500 |
commit | 41195d236e84458bebd4fdc218610a92231ac791 (patch) | |
tree | c0049630c1a21a071c9c942086041029ebdf2866 /arch/arc | |
parent | 0ef88a54aa341f754707414500158addbf35c780 (diff) |
ARC: SMP support
ARC common code to enable a SMP system + ISS provided SMP extensions.
ARC700 natively lacks SMP support, hence some of the core features are
are only enabled if SoCs have the necessary h/w pixie-dust. This
includes:
-Inter Processor Interrupts (IPI)
-Cache coherency
-load-locked/store-conditional
...
The low level exception handling would be completely broken in SMP
because we don't have hardware assisted stack switching. Thus a fair bit
of this code is repurposing the MMU_SCRATCH reg for event handler
prologues to keep them re-entrant.
Many thanks to Rajeshwar Ranga for his initial "major" contributions to
SMP Port (back in 2008), and to Noam Camus and Gilad Ben-Yossef for help
with resurrecting that in 3.2 kernel (2012).
Note that this platform code is again singleton design pattern - so
multiple SMP platforms won't build at the moment - this deficiency is
addressed in subsequent patches within this series.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Rajeshwar Ranga <rajeshwar.ranga@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/Kconfig | 39 | ||||
-rw-r--r-- | arch/arc/Makefile | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/entry.h | 49 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/mutex.h | 9 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/processor.h | 8 | ||||
-rw-r--r-- | arch/arc/include/asm/smp.h | 107 | ||||
-rw-r--r-- | arch/arc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/arc/kernel/ctx_sw.c | 11 | ||||
-rw-r--r-- | arch/arc/kernel/entry.S | 4 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 33 | ||||
-rw-r--r-- | arch/arc/kernel/irq.c | 5 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 320 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 6 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 38 | ||||
-rw-r--r-- | arch/arc/plat-arcfpga/Kconfig | 14 | ||||
-rw-r--r-- | arch/arc/plat-arcfpga/Makefile | 1 | ||||
-rw-r--r-- | arch/arc/plat-arcfpga/include/plat/irq.h | 10 | ||||
-rw-r--r-- | arch/arc/plat-arcfpga/include/plat/smp.h | 115 | ||||
-rw-r--r-- | arch/arc/plat-arcfpga/irq.c | 10 | ||||
-rw-r--r-- | arch/arc/plat-arcfpga/smp.c | 167 |
23 files changed, 960 insertions, 2 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 68350aa3d297..52f5c072f6da 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -116,9 +116,42 @@ config CPU_BIG_ENDIAN | |||
116 | help | 116 | help |
117 | Build kernel for Big Endian Mode of ARC CPU | 117 | Build kernel for Big Endian Mode of ARC CPU |
118 | 118 | ||
119 | config SMP | ||
120 | bool "Symmetric Multi-Processing (Incomplete)" | ||
121 | default n | ||
122 | select USE_GENERIC_SMP_HELPERS | ||
123 | help | ||
124 | This enables support for systems with more than one CPU. If you have | ||
125 | a system with only one CPU, like most personal computers, say N. If | ||
126 | you have a system with more than one CPU, say Y. | ||
127 | |||
128 | if SMP | ||
129 | |||
130 | config ARC_HAS_COH_CACHES | ||
131 | def_bool n | ||
132 | |||
133 | config ARC_HAS_COH_LLSC | ||
134 | def_bool n | ||
135 | |||
136 | config ARC_HAS_COH_RTSC | ||
137 | def_bool n | ||
138 | |||
139 | config ARC_HAS_REENTRANT_IRQ_LV2 | ||
140 | def_bool n | ||
141 | |||
142 | endif | ||
143 | |||
144 | config NR_CPUS | ||
145 | int "Maximum number of CPUs (2-32)" | ||
146 | range 2 32 | ||
147 | depends on SMP | ||
148 | default "2" | ||
149 | |||
119 | menuconfig ARC_CACHE | 150 | menuconfig ARC_CACHE |
120 | bool "Enable Cache Support" | 151 | bool "Enable Cache Support" |
121 | default y | 152 | default y |
153 | # if SMP, cache enabled ONLY if ARC implementation has cache coherency | ||
154 | depends on !SMP || ARC_HAS_COH_CACHES | ||
122 | 155 | ||
123 | if ARC_CACHE | 156 | if ARC_CACHE |
124 | 157 | ||
@@ -213,6 +246,8 @@ config ARC_COMPACT_IRQ_LEVELS | |||
213 | default n | 246 | default n |
214 | # Timer HAS to be high priority, for any other high priority config | 247 | # Timer HAS to be high priority, for any other high priority config |
215 | select ARC_IRQ3_LV2 | 248 | select ARC_IRQ3_LV2 |
249 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy | ||
250 | depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 | ||
216 | 251 | ||
217 | if ARC_COMPACT_IRQ_LEVELS | 252 | if ARC_COMPACT_IRQ_LEVELS |
218 | 253 | ||
@@ -261,6 +296,8 @@ config ARC_HAS_RTSC | |||
261 | bool "Insn: RTSC (64-bit r/o cycle counter)" | 296 | bool "Insn: RTSC (64-bit r/o cycle counter)" |
262 | default y | 297 | default y |
263 | depends on ARC_CPU_REL_4_10 | 298 | depends on ARC_CPU_REL_4_10 |
299 | # if SMP, enable RTSC only if counter is coherent across cores | ||
300 | depends on !SMP || ARC_HAS_COH_RTSC | ||
264 | 301 | ||
265 | endmenu # "ARC CPU Configuration" | 302 | endmenu # "ARC CPU Configuration" |
266 | 303 | ||
@@ -309,7 +346,7 @@ menuconfig ARC_DBG | |||
309 | 346 | ||
310 | config ARC_DBG_TLB_PARANOIA | 347 | config ARC_DBG_TLB_PARANOIA |
311 | bool "Paranoia Checks in Low Level TLB Handlers" | 348 | bool "Paranoia Checks in Low Level TLB Handlers" |
312 | depends on ARC_DBG | 349 | depends on ARC_DBG && !SMP |
313 | default n | 350 | default n |
314 | 351 | ||
315 | config ARC_DBG_TLB_MISS_COUNT | 352 | config ARC_DBG_TLB_MISS_COUNT |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 642c0406d600..fae66bfc2bdb 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
@@ -133,3 +133,6 @@ archclean: | |||
133 | # Thus forcing all exten calls in this file to be long calls | 133 | # Thus forcing all exten calls in this file to be long calls |
134 | export CFLAGS_decompress_inflate.o = -mmedium-calls | 134 | export CFLAGS_decompress_inflate.o = -mmedium-calls |
135 | export CFLAGS_initramfs.o = -mmedium-calls | 135 | export CFLAGS_initramfs.o = -mmedium-calls |
136 | ifdef CONFIG_SMP | ||
137 | export CFLAGS_core.o = -mmedium-calls | ||
138 | endif | ||
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 23ef2de1e09f..23daa326fc9b 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h | |||
@@ -389,11 +389,19 @@ | |||
389 | * to be saved again on kernel mode stack, as part of ptregs. | 389 | * to be saved again on kernel mode stack, as part of ptregs. |
390 | *-------------------------------------------------------------*/ | 390 | *-------------------------------------------------------------*/ |
391 | .macro EXCPN_PROLOG_FREEUP_REG reg | 391 | .macro EXCPN_PROLOG_FREEUP_REG reg |
392 | #ifdef CONFIG_SMP | ||
393 | sr \reg, [ARC_REG_SCRATCH_DATA0] | ||
394 | #else | ||
392 | st \reg, [@ex_saved_reg1] | 395 | st \reg, [@ex_saved_reg1] |
396 | #endif | ||
393 | .endm | 397 | .endm |
394 | 398 | ||
395 | .macro EXCPN_PROLOG_RESTORE_REG reg | 399 | .macro EXCPN_PROLOG_RESTORE_REG reg |
400 | #ifdef CONFIG_SMP | ||
401 | lr \reg, [ARC_REG_SCRATCH_DATA0] | ||
402 | #else | ||
396 | ld \reg, [@ex_saved_reg1] | 403 | ld \reg, [@ex_saved_reg1] |
404 | #endif | ||
397 | .endm | 405 | .endm |
398 | 406 | ||
399 | /*-------------------------------------------------------------- | 407 | /*-------------------------------------------------------------- |
@@ -508,7 +516,11 @@ | |||
508 | /* restore original r9 , saved in int1_saved_reg | 516 | /* restore original r9 , saved in int1_saved_reg |
509 | * It will be saved on stack in macro: SAVE_CALLER_SAVED | 517 | * It will be saved on stack in macro: SAVE_CALLER_SAVED |
510 | */ | 518 | */ |
519 | #ifdef CONFIG_SMP | ||
520 | lr r9, [ARC_REG_SCRATCH_DATA0] | ||
521 | #else | ||
511 | ld r9, [@int1_saved_reg] | 522 | ld r9, [@int1_saved_reg] |
523 | #endif | ||
512 | 524 | ||
513 | /* now we are ready to save the remaining context :) */ | 525 | /* now we are ready to save the remaining context :) */ |
514 | st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ | 526 | st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ |
@@ -639,6 +651,41 @@ | |||
639 | bmsk \reg, \reg, 7 | 651 | bmsk \reg, \reg, 7 |
640 | .endm | 652 | .endm |
641 | 653 | ||
654 | #ifdef CONFIG_SMP | ||
655 | |||
656 | /*------------------------------------------------- | ||
657 | * Retrieve the current running task on this CPU | ||
658 | * 1. Determine curr CPU id. | ||
659 | * 2. Use it to index into _current_task[ ] | ||
660 | */ | ||
661 | .macro GET_CURR_TASK_ON_CPU reg | ||
662 | GET_CPU_ID \reg | ||
663 | ld.as \reg, [@_current_task, \reg] | ||
664 | .endm | ||
665 | |||
666 | /*------------------------------------------------- | ||
667 | * Save a new task as the "current" task on this CPU | ||
668 | * 1. Determine curr CPU id. | ||
669 | * 2. Use it to index into _current_task[ ] | ||
670 | * | ||
671 | * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS) | ||
672 | * because ST r0, [r1, offset] can ONLY have s9 @offset | ||
673 | * while LD can take s9 (4 byte insn) or LIMM (8 byte insn) | ||
674 | */ | ||
675 | |||
676 | .macro SET_CURR_TASK_ON_CPU tsk, tmp | ||
677 | GET_CPU_ID \tmp | ||
678 | add2 \tmp, @_current_task, \tmp | ||
679 | st \tsk, [\tmp] | ||
680 | #ifdef CONFIG_ARC_CURR_IN_REG | ||
681 | mov r25, \tsk | ||
682 | #endif | ||
683 | |||
684 | .endm | ||
685 | |||
686 | |||
687 | #else /* Uniprocessor implementation of macros */ | ||
688 | |||
642 | .macro GET_CURR_TASK_ON_CPU reg | 689 | .macro GET_CURR_TASK_ON_CPU reg |
643 | ld \reg, [@_current_task] | 690 | ld \reg, [@_current_task] |
644 | .endm | 691 | .endm |
@@ -650,6 +697,8 @@ | |||
650 | #endif | 697 | #endif |
651 | .endm | 698 | .endm |
652 | 699 | ||
700 | #endif /* SMP / UNI */ | ||
701 | |||
653 | /* ------------------------------------------------------------------ | 702 | /* ------------------------------------------------------------------ |
654 | * Get the ptr to some field of Current Task at @off in task struct | 703 | * Get the ptr to some field of Current Task at @off in task struct |
655 | * -Uses r25 for Current task ptr if that is enabled | 704 | * -Uses r25 for Current task ptr if that is enabled |
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index d12f3dec8b70..0d71fb11b57c 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -147,8 +147,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
147 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 147 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
148 | struct task_struct *tsk) | 148 | struct task_struct *tsk) |
149 | { | 149 | { |
150 | #ifndef CONFIG_SMP | ||
150 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ | 151 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
151 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | 152 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
153 | #endif | ||
152 | 154 | ||
153 | /* | 155 | /* |
154 | * Get a new ASID if task doesn't have a valid one. Possible when | 156 | * Get a new ASID if task doesn't have a valid one. Possible when |
@@ -197,7 +199,9 @@ static inline void destroy_context(struct mm_struct *mm) | |||
197 | 199 | ||
198 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | 200 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |
199 | { | 201 | { |
202 | #ifndef CONFIG_SMP | ||
200 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | 203 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
204 | #endif | ||
201 | 205 | ||
202 | /* Unconditionally get a new ASID */ | 206 | /* Unconditionally get a new ASID */ |
203 | get_new_mmu_context(next); | 207 | get_new_mmu_context(next); |
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h index 3be5e64da139..a2f88ff9f506 100644 --- a/arch/arc/include/asm/mutex.h +++ b/arch/arc/include/asm/mutex.h | |||
@@ -6,4 +6,13 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | ||
10 | * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to | ||
11 | * atomic dec based which can "count" any number of lock contenders. | ||
12 | * This ideally needs to be fixed in core, but for now switching to dec ver. | ||
13 | */ | ||
14 | #if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2) | ||
15 | #include <asm-generic/mutex-dec.h> | ||
16 | #else | ||
9 | #include <asm-generic/mutex-xchg.h> | 17 | #include <asm-generic/mutex-xchg.h> |
18 | #endif | ||
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index dcb0701528aa..b7e36684c091 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -354,11 +354,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
354 | * Thus use this macro only when you are certain that "current" is current | 354 | * Thus use this macro only when you are certain that "current" is current |
355 | * e.g. when dealing with signal frame setup code etc | 355 | * e.g. when dealing with signal frame setup code etc |
356 | */ | 356 | */ |
357 | #ifndef CONFIG_SMP | ||
357 | #define pgd_offset_fast(mm, addr) \ | 358 | #define pgd_offset_fast(mm, addr) \ |
358 | ({ \ | 359 | ({ \ |
359 | pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ | 360 | pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ |
360 | pgd_base + pgd_index(addr); \ | 361 | pgd_base + pgd_index(addr); \ |
361 | }) | 362 | }) |
363 | #else | ||
364 | #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) | ||
365 | #endif | ||
362 | 366 | ||
363 | extern void paging_init(void); | 367 | extern void paging_init(void); |
364 | extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); | 368 | extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index b7b155610067..5f26b2c1cba0 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -58,7 +58,15 @@ unsigned long thread_saved_pc(struct task_struct *t); | |||
58 | /* Prepare to copy thread state - unlazy all lazy status */ | 58 | /* Prepare to copy thread state - unlazy all lazy status */ |
59 | #define prepare_to_copy(tsk) do { } while (0) | 59 | #define prepare_to_copy(tsk) do { } while (0) |
60 | 60 | ||
61 | /* | ||
62 | * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise | ||
63 | * get optimised away by gcc | ||
64 | */ | ||
65 | #ifdef CONFIG_SMP | ||
66 | #define cpu_relax() __asm__ __volatile__ ("" : : : "memory") | ||
67 | #else | ||
61 | #define cpu_relax() do { } while (0) | 68 | #define cpu_relax() do { } while (0) |
69 | #endif | ||
62 | 70 | ||
63 | #define copy_segments(tsk, mm) do { } while (0) | 71 | #define copy_segments(tsk, mm) do { } while (0) |
64 | #define release_segments(mm) do { } while (0) | 72 | #define release_segments(mm) do { } while (0) |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 4341f3ba7d92..f91f1946272f 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
@@ -9,6 +9,69 @@ | |||
9 | #ifndef __ASM_ARC_SMP_H | 9 | #ifndef __ASM_ARC_SMP_H |
10 | #define __ASM_ARC_SMP_H | 10 | #define __ASM_ARC_SMP_H |
11 | 11 | ||
12 | #ifdef CONFIG_SMP | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/threads.h> | ||
17 | |||
18 | #define raw_smp_processor_id() (current_thread_info()->cpu) | ||
19 | |||
20 | /* including cpumask.h leads to cyclic deps hence this Forward declaration */ | ||
21 | struct cpumask; | ||
22 | |||
23 | /* | ||
24 | * APIs provided by arch SMP code to generic code | ||
25 | */ | ||
26 | extern void arch_send_call_function_single_ipi(int cpu); | ||
27 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | ||
28 | |||
29 | /* | ||
30 | * APIs provided by arch SMP code to rest of arch code | ||
31 | */ | ||
32 | extern void __init smp_init_cpus(void); | ||
33 | extern void __init first_lines_of_secondary(void); | ||
34 | |||
35 | /* | ||
36 | * API expected BY platform smp code (FROM arch smp code) | ||
37 | * | ||
38 | * smp_ipi_irq_setup: | ||
39 | * Takes @cpu and @irq to which the arch-common ISR is hooked up | ||
40 | */ | ||
41 | extern int smp_ipi_irq_setup(int cpu, int irq); | ||
42 | |||
43 | /* | ||
44 | * APIs expected FROM platform smp code | ||
45 | * | ||
46 | * arc_platform_smp_cpuinfo: | ||
47 | * returns a string containing info for /proc/cpuinfo | ||
48 | * | ||
49 | * arc_platform_smp_init_cpu: | ||
50 | * Called from start_kernel_secondary to do any CPU local setup | ||
51 | * such as starting a timer, setting up IPI etc | ||
52 | * | ||
53 | * arc_platform_smp_wait_to_boot: | ||
54 | * Called from early bootup code for non-Master CPUs to "park" them | ||
55 | * | ||
56 | * arc_platform_smp_wakeup_cpu: | ||
57 | * Called from __cpu_up (Master CPU) to kick start another one | ||
58 | * | ||
59 | * arc_platform_ipi_send: | ||
60 | * Takes @cpumask to which IPI(s) would be sent. | ||
61 | * The actual msg-id/buffer is manager in arch-common code | ||
62 | * | ||
63 | * arc_platform_ipi_clear: | ||
64 | * Takes @cpu which got IPI at @irq to do any IPI clearing | ||
65 | */ | ||
66 | extern const char *arc_platform_smp_cpuinfo(void); | ||
67 | extern void arc_platform_smp_init_cpu(void); | ||
68 | extern void arc_platform_smp_wait_to_boot(int cpu); | ||
69 | extern void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc); | ||
70 | extern void arc_platform_ipi_send(const struct cpumask *callmap); | ||
71 | extern void arc_platform_ipi_clear(int cpu, int irq); | ||
72 | |||
73 | #endif /* CONFIG_SMP */ | ||
74 | |||
12 | /* | 75 | /* |
13 | * ARC700 doesn't support atomic Read-Modify-Write ops. | 76 | * ARC700 doesn't support atomic Read-Modify-Write ops. |
14 | * Originally Interrupts had to be disabled around code to gaurantee atomicity. | 77 | * Originally Interrupts had to be disabled around code to gaurantee atomicity. |
@@ -18,10 +81,52 @@ | |||
18 | * | 81 | * |
19 | * (1) These insn were introduced only in 4.10 release. So for older released | 82 | * (1) These insn were introduced only in 4.10 release. So for older released |
20 | * support needed. | 83 | * support needed. |
84 | * | ||
85 | * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be | ||
86 | * gaurantted by the platform (not something which core handles). | ||
87 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ | ||
88 | * disabling for atomicity. | ||
89 | * | ||
90 | * However exported spinlock API is not usable due to cyclic hdr deps | ||
91 | * (even after system.h disintegration upstream) | ||
92 | * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h | ||
93 | * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h | ||
94 | * | ||
95 | * So the workaround is to use the lowest level arch spinlock API. | ||
96 | * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP, | ||
97 | * but same is not true for ARCH backend, hence the need for 2 variants | ||
21 | */ | 98 | */ |
22 | #ifndef CONFIG_ARC_HAS_LLSC | 99 | #ifndef CONFIG_ARC_HAS_LLSC |
23 | 100 | ||
24 | #include <linux/irqflags.h> | 101 | #include <linux/irqflags.h> |
102 | #ifdef CONFIG_SMP | ||
103 | |||
104 | #include <asm/spinlock.h> | ||
105 | |||
106 | extern arch_spinlock_t smp_atomic_ops_lock; | ||
107 | extern arch_spinlock_t smp_bitops_lock; | ||
108 | |||
109 | #define atomic_ops_lock(flags) do { \ | ||
110 | local_irq_save(flags); \ | ||
111 | arch_spin_lock(&smp_atomic_ops_lock); \ | ||
112 | } while (0) | ||
113 | |||
114 | #define atomic_ops_unlock(flags) do { \ | ||
115 | arch_spin_unlock(&smp_atomic_ops_lock); \ | ||
116 | local_irq_restore(flags); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define bitops_lock(flags) do { \ | ||
120 | local_irq_save(flags); \ | ||
121 | arch_spin_lock(&smp_bitops_lock); \ | ||
122 | } while (0) | ||
123 | |||
124 | #define bitops_unlock(flags) do { \ | ||
125 | arch_spin_unlock(&smp_bitops_lock); \ | ||
126 | local_irq_restore(flags); \ | ||
127 | } while (0) | ||
128 | |||
129 | #else /* !CONFIG_SMP */ | ||
25 | 130 | ||
26 | #define atomic_ops_lock(flags) local_irq_save(flags) | 131 | #define atomic_ops_lock(flags) local_irq_save(flags) |
27 | #define atomic_ops_unlock(flags) local_irq_restore(flags) | 132 | #define atomic_ops_unlock(flags) local_irq_restore(flags) |
@@ -29,6 +134,8 @@ | |||
29 | #define bitops_lock(flags) local_irq_save(flags) | 134 | #define bitops_lock(flags) local_irq_save(flags) |
30 | #define bitops_unlock(flags) local_irq_restore(flags) | 135 | #define bitops_unlock(flags) local_irq_restore(flags) |
31 | 136 | ||
137 | #endif /* !CONFIG_SMP */ | ||
138 | |||
32 | #endif /* !CONFIG_ARC_HAS_LLSC */ | 139 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
33 | 140 | ||
34 | #endif | 141 | #endif |
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile index f32f65f98850..46c15ff97e97 100644 --- a/arch/arc/kernel/Makefile +++ b/arch/arc/kernel/Makefile | |||
@@ -13,6 +13,7 @@ obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o clk.o | |||
13 | obj-y += devtree.o | 13 | obj-y += devtree.o |
14 | 14 | ||
15 | obj-$(CONFIG_MODULES) += arcksyms.o module.o | 15 | obj-$(CONFIG_MODULES) += arcksyms.o module.o |
16 | obj-$(CONFIG_SMP) += smp.o | ||
16 | 17 | ||
17 | obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o | 18 | obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o |
18 | CFLAGS_fpu.o += -mdpfp | 19 | CFLAGS_fpu.o += -mdpfp |
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index fbf739cbaf7d..60844dac6132 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c | |||
@@ -58,7 +58,18 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) | |||
58 | * For SMP extra work to get to &_current_task[cpu] | 58 | * For SMP extra work to get to &_current_task[cpu] |
59 | * (open coded SET_CURR_TASK_ON_CPU) | 59 | * (open coded SET_CURR_TASK_ON_CPU) |
60 | */ | 60 | */ |
61 | #ifndef CONFIG_SMP | ||
61 | "st %2, [@_current_task] \n\t" | 62 | "st %2, [@_current_task] \n\t" |
63 | #else | ||
64 | "lr r24, [identity] \n\t" | ||
65 | "lsr r24, r24, 8 \n\t" | ||
66 | "bmsk r24, r24, 7 \n\t" | ||
67 | "add2 r24, @_current_task, r24 \n\t" | ||
68 | "st %2, [r24] \n\t" | ||
69 | #endif | ||
70 | #ifdef CONFIG_ARC_CURR_IN_REG | ||
71 | "mov r25, %2 \n\t" | ||
72 | #endif | ||
62 | 73 | ||
63 | /* get ksp of incoming task from tsk->thread.ksp */ | 74 | /* get ksp of incoming task from tsk->thread.ksp */ |
64 | "ld.as sp, [%2, %1] \n\t" | 75 | "ld.as sp, [%2, %1] \n\t" |
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index e33a0bf45589..3f6ce98fea11 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S | |||
@@ -232,7 +232,11 @@ ARC_EXIT handle_interrupt_level2 | |||
232 | ARC_ENTRY handle_interrupt_level1 | 232 | ARC_ENTRY handle_interrupt_level1 |
233 | 233 | ||
234 | /* free up r9 as scratchpad */ | 234 | /* free up r9 as scratchpad */ |
235 | #ifdef CONFIG_SMP | ||
236 | sr r9, [ARC_REG_SCRATCH_DATA0] | ||
237 | #else | ||
235 | st r9, [@int1_saved_reg] | 238 | st r9, [@int1_saved_reg] |
239 | #endif | ||
236 | 240 | ||
237 | ;Which mode (user/kernel) was the system in when intr occured | 241 | ;Which mode (user/kernel) was the system in when intr occured |
238 | lr r9, [status32_l1] | 242 | lr r9, [status32_l1] |
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index e63f6a43abb1..006dec3fc353 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S | |||
@@ -27,6 +27,15 @@ stext: | |||
27 | ; Don't clobber r0-r4 yet. It might have bootloader provided info | 27 | ; Don't clobber r0-r4 yet. It might have bootloader provided info |
28 | ;------------------------------------------------------------------- | 28 | ;------------------------------------------------------------------- |
29 | 29 | ||
30 | #ifdef CONFIG_SMP | ||
31 | ; Only Boot (Master) proceeds. Others wait in platform dependent way | ||
32 | ; IDENTITY Reg [ 3 2 1 0 ] | ||
33 | ; (cpu-id) ^^^ => Zero for UP ARC700 | ||
34 | ; => #Core-ID if SMP (Master 0) | ||
35 | GET_CPU_ID r5 | ||
36 | cmp r5, 0 | ||
37 | jnz arc_platform_smp_wait_to_boot | ||
38 | #endif | ||
30 | ; Clear BSS before updating any globals | 39 | ; Clear BSS before updating any globals |
31 | ; XXX: use ZOL here | 40 | ; XXX: use ZOL here |
32 | mov r5, __bss_start | 41 | mov r5, __bss_start |
@@ -76,3 +85,27 @@ stext: | |||
76 | GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) | 85 | GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) |
77 | 86 | ||
78 | j start_kernel ; "C" entry point | 87 | j start_kernel ; "C" entry point |
88 | |||
89 | #ifdef CONFIG_SMP | ||
90 | ;---------------------------------------------------------------- | ||
91 | ; First lines of code run by secondary before jumping to 'C' | ||
92 | ;---------------------------------------------------------------- | ||
93 | .section .init.text, "ax",@progbits | ||
94 | .type first_lines_of_secondary, @function | ||
95 | .globl first_lines_of_secondary | ||
96 | |||
97 | first_lines_of_secondary: | ||
98 | |||
99 | ; setup per-cpu idle task as "current" on this CPU | ||
100 | ld r0, [@secondary_idle_tsk] | ||
101 | SET_CURR_TASK_ON_CPU r0, r1 | ||
102 | |||
103 | ; setup stack (fp, sp) | ||
104 | mov fp, 0 | ||
105 | |||
106 | ; set it's stack base to tsk->thread_info bottom | ||
107 | GET_TSK_STACK_BASE r0, sp | ||
108 | |||
109 | j start_kernel_secondary | ||
110 | |||
111 | #endif | ||
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index ca70894e2309..df7da2b5a5bd 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c | |||
@@ -124,6 +124,11 @@ void __init init_IRQ(void) | |||
124 | { | 124 | { |
125 | init_onchip_IRQ(); | 125 | init_onchip_IRQ(); |
126 | plat_init_IRQ(); | 126 | plat_init_IRQ(); |
127 | |||
128 | #ifdef CONFIG_SMP | ||
129 | /* Master CPU can initialize it's side of IPI */ | ||
130 | arc_platform_smp_init_cpu(); | ||
131 | #endif | ||
127 | } | 132 | } |
128 | 133 | ||
129 | /* | 134 | /* |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 27aebd6d9513..4026b5a004d2 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -86,6 +86,10 @@ void __init setup_arch(char **cmdline_p) | |||
86 | 86 | ||
87 | setup_processor(); | 87 | setup_processor(); |
88 | 88 | ||
89 | #ifdef CONFIG_SMP | ||
90 | smp_init_cpus(); | ||
91 | #endif | ||
92 | |||
89 | setup_arch_memory(); | 93 | setup_arch_memory(); |
90 | 94 | ||
91 | unflatten_device_tree(); | 95 | unflatten_device_tree(); |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c new file mode 100644 index 000000000000..1f762ad6969b --- /dev/null +++ b/arch/arc/kernel/smp.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * RajeshwarR: Dec 11, 2007 | ||
9 | * -- Added support for Inter Processor Interrupts | ||
10 | * | ||
11 | * Vineetg: Nov 1st, 2007 | ||
12 | * -- Initial Write (Borrowed heavily from ARM) | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/profile.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/cpu.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/atomic.h> | ||
29 | #include <linux/percpu.h> | ||
30 | #include <linux/cpumask.h> | ||
31 | #include <linux/spinlock_types.h> | ||
32 | #include <linux/reboot.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/setup.h> | ||
35 | |||
36 | arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
37 | arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
38 | |||
39 | /* XXX: per cpu ? Only needed once in early seconday boot */ | ||
40 | struct task_struct *secondary_idle_tsk; | ||
41 | |||
42 | /* Called from start_kernel */ | ||
43 | void __init smp_prepare_boot_cpu(void) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Initialise the CPU possible map early - this describes the CPUs | ||
49 | * which may be present or become present in the system. | ||
50 | */ | ||
51 | void __init smp_init_cpus(void) | ||
52 | { | ||
53 | unsigned int i; | ||
54 | |||
55 | for (i = 0; i < NR_CPUS; i++) | ||
56 | set_cpu_possible(i, true); | ||
57 | } | ||
58 | |||
59 | /* called from init ( ) => process 1 */ | ||
60 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
61 | { | ||
62 | int i; | ||
63 | |||
64 | /* | ||
65 | * Initialise the present map, which describes the set of CPUs | ||
66 | * actually populated at the present time. | ||
67 | */ | ||
68 | for (i = 0; i < max_cpus; i++) | ||
69 | set_cpu_present(i, true); | ||
70 | } | ||
71 | |||
72 | void __init smp_cpus_done(unsigned int max_cpus) | ||
73 | { | ||
74 | |||
75 | } | ||
76 | |||
77 | /* | ||
78 | * After power-up, a non Master CPU needs to wait for Master to kick start it | ||
79 | * | ||
80 | * The default implementation halts | ||
81 | * | ||
82 | * This relies on platform specific support allowing Master to directly set | ||
83 | * this CPU's PC (to be @first_lines_of_secondary() and kick start it. | ||
84 | * | ||
85 | * In lack of such h/w assist, platforms can override this function | ||
86 | * - make this function busy-spin on a token, eventually set by Master | ||
87 | * (from arc_platform_smp_wakeup_cpu()) | ||
88 | * - Once token is available, jump to @first_lines_of_secondary | ||
89 | * (using inline asm). | ||
90 | * | ||
91 | * Alert: can NOT use stack here as it has not been determined/setup for CPU. | ||
92 | * If it turns out to be elaborate, it's better to code it in assembly | ||
93 | * | ||
94 | */ | ||
95 | void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu) | ||
96 | { | ||
97 | /* | ||
98 | * As a hack for debugging - since debugger will single-step over the | ||
99 | * FLAG insn - wrap the halt itself it in a self loop | ||
100 | */ | ||
101 | __asm__ __volatile__( | ||
102 | "1: \n" | ||
103 | " flag 1 \n" | ||
104 | " b 1b \n"); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * The very first "C" code executed by secondary | ||
109 | * Called from asm stub in head.S | ||
110 | * "current"/R25 already setup by low level boot code | ||
111 | */ | ||
112 | void __cpuinit start_kernel_secondary(void) | ||
113 | { | ||
114 | struct mm_struct *mm = &init_mm; | ||
115 | unsigned int cpu = smp_processor_id(); | ||
116 | |||
117 | /* MMU, Caches, Vector Table, Interrupts etc */ | ||
118 | setup_processor(); | ||
119 | |||
120 | atomic_inc(&mm->mm_users); | ||
121 | atomic_inc(&mm->mm_count); | ||
122 | current->active_mm = mm; | ||
123 | |||
124 | notify_cpu_starting(cpu); | ||
125 | set_cpu_online(cpu, true); | ||
126 | |||
127 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | ||
128 | |||
129 | arc_platform_smp_init_cpu(); | ||
130 | |||
131 | arc_local_timer_setup(cpu); | ||
132 | |||
133 | local_irq_enable(); | ||
134 | preempt_disable(); | ||
135 | cpu_idle(); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Called from kernel_init( ) -> smp_init( ) - for each CPU | ||
140 | * | ||
141 | * At this point, Secondary Processor is "HALT"ed: | ||
142 | * -It booted, but was halted in head.S | ||
143 | * -It was configured to halt-on-reset | ||
144 | * So need to wake it up. | ||
145 | * | ||
146 | * Essential requirements being where to run from (PC) and stack (SP) | ||
147 | */ | ||
148 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | ||
149 | { | ||
150 | unsigned long wait_till; | ||
151 | |||
152 | secondary_idle_tsk = idle; | ||
153 | |||
154 | pr_info("Idle Task [%d] %p", cpu, idle); | ||
155 | pr_info("Trying to bring up CPU%u ...\n", cpu); | ||
156 | |||
157 | arc_platform_smp_wakeup_cpu(cpu, | ||
158 | (unsigned long)first_lines_of_secondary); | ||
159 | |||
160 | /* wait for 1 sec after kicking the secondary */ | ||
161 | wait_till = jiffies + HZ; | ||
162 | while (time_before(jiffies, wait_till)) { | ||
163 | if (cpu_online(cpu)) | ||
164 | break; | ||
165 | } | ||
166 | |||
167 | if (!cpu_online(cpu)) { | ||
168 | pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu); | ||
169 | return -1; | ||
170 | } | ||
171 | |||
172 | secondary_idle_tsk = NULL; | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * not supported here | ||
179 | */ | ||
180 | int __init setup_profiling_timer(unsigned int multiplier) | ||
181 | { | ||
182 | return -EINVAL; | ||
183 | } | ||
184 | |||
185 | /*****************************************************************************/ | ||
186 | /* Inter Processor Interrupt Handling */ | ||
187 | /*****************************************************************************/ | ||
188 | |||
189 | /* | ||
190 | * structures for inter-processor calls | ||
191 | * A Collection of single bit ipi messages | ||
192 | * | ||
193 | */ | ||
194 | |||
195 | /* | ||
196 | * TODO_rajesh investigate tlb message types. | ||
197 | * IPI Timer not needed because each ARC has an individual Interrupting Timer | ||
198 | */ | ||
199 | enum ipi_msg_type { | ||
200 | IPI_NOP = 0, | ||
201 | IPI_RESCHEDULE = 1, | ||
202 | IPI_CALL_FUNC, | ||
203 | IPI_CALL_FUNC_SINGLE, | ||
204 | IPI_CPU_STOP | ||
205 | }; | ||
206 | |||
207 | struct ipi_data { | ||
208 | unsigned long bits; | ||
209 | }; | ||
210 | |||
211 | static DEFINE_PER_CPU(struct ipi_data, ipi_data); | ||
212 | |||
213 | static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) | ||
214 | { | ||
215 | unsigned long flags; | ||
216 | unsigned int cpu; | ||
217 | |||
218 | local_irq_save(flags); | ||
219 | |||
220 | for_each_cpu(cpu, callmap) { | ||
221 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
222 | set_bit(msg, &ipi->bits); | ||
223 | } | ||
224 | |||
225 | /* Call the platform specific cross-CPU call function */ | ||
226 | arc_platform_ipi_send(callmap); | ||
227 | |||
228 | local_irq_restore(flags); | ||
229 | } | ||
230 | |||
231 | void smp_send_reschedule(int cpu) | ||
232 | { | ||
233 | ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE); | ||
234 | } | ||
235 | |||
236 | void smp_send_stop(void) | ||
237 | { | ||
238 | struct cpumask targets; | ||
239 | cpumask_copy(&targets, cpu_online_mask); | ||
240 | cpumask_clear_cpu(smp_processor_id(), &targets); | ||
241 | ipi_send_msg(&targets, IPI_CPU_STOP); | ||
242 | } | ||
243 | |||
244 | void arch_send_call_function_single_ipi(int cpu) | ||
245 | { | ||
246 | ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
247 | } | ||
248 | |||
249 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
250 | { | ||
251 | ipi_send_msg(mask, IPI_CALL_FUNC); | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * ipi_cpu_stop - handle IPI from smp_send_stop() | ||
256 | */ | ||
257 | static void ipi_cpu_stop(unsigned int cpu) | ||
258 | { | ||
259 | machine_halt(); | ||
260 | } | ||
261 | |||
262 | static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu) | ||
263 | { | ||
264 | unsigned long msg = 0; | ||
265 | |||
266 | do { | ||
267 | msg = find_next_bit(ops, BITS_PER_LONG, msg+1); | ||
268 | |||
269 | switch (msg) { | ||
270 | case IPI_RESCHEDULE: | ||
271 | scheduler_ipi(); | ||
272 | break; | ||
273 | |||
274 | case IPI_CALL_FUNC: | ||
275 | generic_smp_call_function_interrupt(); | ||
276 | break; | ||
277 | |||
278 | case IPI_CALL_FUNC_SINGLE: | ||
279 | generic_smp_call_function_single_interrupt(); | ||
280 | break; | ||
281 | |||
282 | case IPI_CPU_STOP: | ||
283 | ipi_cpu_stop(cpu); | ||
284 | break; | ||
285 | } | ||
286 | } while (msg < BITS_PER_LONG); | ||
287 | |||
288 | } | ||
289 | |||
290 | /* | ||
291 | * arch-common ISR to handle for inter-processor interrupts | ||
292 | * Has hooks for platform specific IPI | ||
293 | */ | ||
294 | irqreturn_t do_IPI(int irq, void *dev_id) | ||
295 | { | ||
296 | int cpu = smp_processor_id(); | ||
297 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
298 | unsigned long ops; | ||
299 | |||
300 | arc_platform_ipi_clear(cpu, irq); | ||
301 | |||
302 | /* | ||
303 | * XXX: is this loop really needed | ||
304 | * And do we need to move ipi_clean inside | ||
305 | */ | ||
306 | while ((ops = xchg(&ipi->bits, 0)) != 0) | ||
307 | __do_IPI(&ops, ipi, cpu); | ||
308 | |||
309 | return IRQ_HANDLED; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * API called by platform code to hookup arch-common ISR to their IPI IRQ | ||
314 | */ | ||
315 | static DEFINE_PER_CPU(int, ipi_dev); | ||
316 | int smp_ipi_irq_setup(int cpu, int irq) | ||
317 | { | ||
318 | int *dev_id = &per_cpu(ipi_dev, smp_processor_id()); | ||
319 | return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id); | ||
320 | } | ||
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 232a0ff80a5e..e96030c13b52 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -474,6 +474,12 @@ void __init arc_mmu_init(void) | |||
474 | 474 | ||
475 | /* Enable the MMU */ | 475 | /* Enable the MMU */ |
476 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | 476 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); |
477 | |||
478 | /* In smp we use this reg for interrupt 1 scratch */ | ||
479 | #ifndef CONFIG_SMP | ||
480 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ | ||
481 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); | ||
482 | #endif | ||
477 | } | 483 | } |
478 | 484 | ||
479 | /* | 485 | /* |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 164b02169498..4b1ad2d905ca 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -57,9 +57,15 @@ | |||
57 | .global ex_saved_reg1 | 57 | .global ex_saved_reg1 |
58 | .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned | 58 | .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned |
59 | .type ex_saved_reg1, @object | 59 | .type ex_saved_reg1, @object |
60 | #ifdef CONFIG_SMP | ||
61 | .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) | ||
62 | ex_saved_reg1: | ||
63 | .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) | ||
64 | #else | ||
60 | .size ex_saved_reg1, 16 | 65 | .size ex_saved_reg1, 16 |
61 | ex_saved_reg1: | 66 | ex_saved_reg1: |
62 | .zero 16 | 67 | .zero 16 |
68 | #endif | ||
63 | 69 | ||
64 | ;============================================================================ | 70 | ;============================================================================ |
65 | ; Troubleshooting Stuff | 71 | ; Troubleshooting Stuff |
@@ -116,7 +122,13 @@ ex_saved_reg1: | |||
116 | 122 | ||
117 | lr r2, [efa] | 123 | lr r2, [efa] |
118 | 124 | ||
125 | #ifndef CONFIG_SMP | ||
119 | lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd | 126 | lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd |
127 | #else | ||
128 | GET_CURR_TASK_ON_CPU r1 | ||
129 | ld r1, [r1, TASK_ACT_MM] | ||
130 | ld r1, [r1, MM_PGD] | ||
131 | #endif | ||
120 | 132 | ||
121 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD | 133 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD |
122 | ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr | 134 | ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr |
@@ -192,12 +204,28 @@ ex_saved_reg1: | |||
192 | ; ".size ex_saved_reg1, 16" | 204 | ; ".size ex_saved_reg1, 16" |
193 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we | 205 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we |
194 | ; only need to save only a handful of regs, as opposed to complete reg file] | 206 | ; only need to save only a handful of regs, as opposed to complete reg file] |
207 | ; | ||
208 | ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST | ||
209 | ; core reg as it will not be SMP safe. | ||
210 | ; Thus scratch AUX reg is used (and no longer used to cache task PGD). | ||
211 | ; To save the rest of 3 regs - per cpu, the global is made "per-cpu". | ||
212 | ; Epilogue thus has to locate the "per-cpu" storage for regs. | ||
213 | ; To avoid cache line bouncing the per-cpu global is aligned/sized per | ||
214 | ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence | ||
215 | ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" | ||
195 | 216 | ||
196 | ; As simple as that.... | 217 | ; As simple as that.... |
197 | 218 | ||
198 | .macro TLBMISS_FREEUP_REGS | 219 | .macro TLBMISS_FREEUP_REGS |
220 | #ifdef CONFIG_SMP | ||
221 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with | ||
222 | GET_CPU_ID r0 ; get to per cpu scratch mem, | ||
223 | lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu | ||
224 | add r0, @ex_saved_reg1, r0 | ||
225 | #else | ||
199 | st r0, [@ex_saved_reg1] | 226 | st r0, [@ex_saved_reg1] |
200 | mov_s r0, @ex_saved_reg1 | 227 | mov_s r0, @ex_saved_reg1 |
228 | #endif | ||
201 | st_s r1, [r0, 4] | 229 | st_s r1, [r0, 4] |
202 | st_s r2, [r0, 8] | 230 | st_s r2, [r0, 8] |
203 | st_s r3, [r0, 12] | 231 | st_s r3, [r0, 12] |
@@ -210,11 +238,21 @@ ex_saved_reg1: | |||
210 | 238 | ||
211 | ;----------------------------------------------------------------- | 239 | ;----------------------------------------------------------------- |
212 | .macro TLBMISS_RESTORE_REGS | 240 | .macro TLBMISS_RESTORE_REGS |
241 | #ifdef CONFIG_SMP | ||
242 | GET_CPU_ID r0 ; get to per cpu scratch mem | ||
243 | lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide | ||
244 | add r0, @ex_saved_reg1, r0 | ||
245 | ld_s r3, [r0,12] | ||
246 | ld_s r2, [r0, 8] | ||
247 | ld_s r1, [r0, 4] | ||
248 | lr r0, [ARC_REG_SCRATCH_DATA0] | ||
249 | #else | ||
213 | mov_s r0, @ex_saved_reg1 | 250 | mov_s r0, @ex_saved_reg1 |
214 | ld_s r3, [r0,12] | 251 | ld_s r3, [r0,12] |
215 | ld_s r2, [r0, 8] | 252 | ld_s r2, [r0, 8] |
216 | ld_s r1, [r0, 4] | 253 | ld_s r1, [r0, 4] |
217 | ld_s r0, [r0] | 254 | ld_s r0, [r0] |
255 | #endif | ||
218 | .endm | 256 | .endm |
219 | 257 | ||
220 | .section .text, "ax",@progbits ;Fast Path Code, candidate for ICCM | 258 | .section .text, "ax",@progbits ;Fast Path Code, candidate for ICCM |
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig index 7af3a4e7f7d2..38752bfb91e0 100644 --- a/arch/arc/plat-arcfpga/Kconfig +++ b/arch/arc/plat-arcfpga/Kconfig | |||
@@ -13,6 +13,7 @@ choice | |||
13 | 13 | ||
14 | config ARC_BOARD_ANGEL4 | 14 | config ARC_BOARD_ANGEL4 |
15 | bool "ARC Angel4" | 15 | bool "ARC Angel4" |
16 | select ISS_SMP_EXTN if SMP | ||
16 | help | 17 | help |
17 | ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based) | 18 | ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based) |
18 | 19 | ||
@@ -21,6 +22,19 @@ config ARC_BOARD_ML509 | |||
21 | help | 22 | help |
22 | ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based) | 23 | ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based) |
23 | 24 | ||
25 | config ISS_SMP_EXTN | ||
26 | bool "ARC SMP Extensions (ISS Models only)" | ||
27 | default n | ||
28 | depends on SMP | ||
29 | select ARC_HAS_COH_RTSC | ||
30 | help | ||
31 | SMP Extensions to ARC700, in a "simulation only" Model, supported in | ||
32 | ARC ISS (Instruction Set Simulator). | ||
33 | The SMP extensions include: | ||
34 | -IDU (Interrupt Distribution Unit) | ||
35 | -XTL (To enable CPU start/stop/set-PC for another CPU) | ||
36 | It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND) | ||
37 | |||
24 | endchoice | 38 | endchoice |
25 | 39 | ||
26 | config ARC_SERIAL_BAUD | 40 | config ARC_SERIAL_BAUD |
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile index 385eb9d83b63..2a828bec8212 100644 --- a/arch/arc/plat-arcfpga/Makefile +++ b/arch/arc/plat-arcfpga/Makefile | |||
@@ -7,3 +7,4 @@ | |||
7 | # | 7 | # |
8 | 8 | ||
9 | obj-y := platform.o irq.o | 9 | obj-y := platform.o irq.o |
10 | obj-$(CONFIG_SMP) += smp.o | ||
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h index b34e08734c65..255b90e973ee 100644 --- a/arch/arc/plat-arcfpga/include/plat/irq.h +++ b/arch/arc/plat-arcfpga/include/plat/irq.h | |||
@@ -12,7 +12,11 @@ | |||
12 | #ifndef __PLAT_IRQ_H | 12 | #ifndef __PLAT_IRQ_H |
13 | #define __PLAT_IRQ_H | 13 | #define __PLAT_IRQ_H |
14 | 14 | ||
15 | #define NR_IRQS 16 | 15 | #ifdef CONFIG_SMP |
16 | #define NR_IRQS 32 | ||
17 | #else | ||
18 | #define NR_IRQS 16 | ||
19 | #endif | ||
16 | 20 | ||
17 | #define UART0_IRQ 5 | 21 | #define UART0_IRQ 5 |
18 | #define UART1_IRQ 10 | 22 | #define UART1_IRQ 10 |
@@ -24,4 +28,8 @@ | |||
24 | #define PCI_IRQ 14 | 28 | #define PCI_IRQ 14 |
25 | #define PS2_IRQ 15 | 29 | #define PS2_IRQ 15 |
26 | 30 | ||
31 | #ifdef CONFIG_SMP | ||
32 | #define IDU_INTERRUPT_0 16 | ||
33 | #endif | ||
34 | |||
27 | #endif | 35 | #endif |
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h new file mode 100644 index 000000000000..8c5e46c01b15 --- /dev/null +++ b/arch/arc/plat-arcfpga/include/plat/smp.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * Rajeshwar Ranga: Interrupt Distribution Unit API's | ||
9 | */ | ||
10 | |||
11 | #ifndef __PLAT_ARCFPGA_SMP_H | ||
12 | #define __PLAT_ARCFPGA_SMP_H | ||
13 | |||
14 | #ifdef CONFIG_SMP | ||
15 | |||
16 | #include <linux/types.h> | ||
17 | #include <asm/arcregs.h> | ||
18 | |||
19 | #define ARC_AUX_IDU_REG_CMD 0x2000 | ||
20 | #define ARC_AUX_IDU_REG_PARAM 0x2001 | ||
21 | |||
22 | #define ARC_AUX_XTL_REG_CMD 0x2002 | ||
23 | #define ARC_AUX_XTL_REG_PARAM 0x2003 | ||
24 | |||
25 | #define ARC_REG_MP_BCR 0x2021 | ||
26 | |||
27 | #define ARC_XTL_CMD_WRITE_PC 0x04 | ||
28 | #define ARC_XTL_CMD_CLEAR_HALT 0x02 | ||
29 | |||
30 | /* | ||
31 | * Build Configuration Register which identifies the sub-components | ||
32 | */ | ||
33 | struct bcr_mp { | ||
34 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
35 | unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8; | ||
36 | #else | ||
37 | unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16; | ||
38 | #endif | ||
39 | }; | ||
40 | |||
41 | /* IDU supports 256 common interrupts */ | ||
42 | #define NR_IDU_IRQS 256 | ||
43 | |||
44 | /* | ||
45 | * The Aux Regs layout is same bit-by-bit in both BE/LE modes. | ||
46 | * However when casted as a bitfield encoded "C" struct, gcc treats it as | ||
47 | * memory, generating different code for BE/LE, requiring strcture adj (see | ||
48 | * include/asm/arcregs.h) | ||
49 | * | ||
50 | * However when manually "carving" the value for a Aux, no special handling | ||
51 | * of BE is needed because of the property discribed above | ||
52 | */ | ||
53 | #define IDU_SET_COMMAND(irq, cmd) \ | ||
54 | do { \ | ||
55 | uint32_t __val; \ | ||
56 | __val = (((irq & 0xFF) << 8) | (cmd & 0xFF)); \ | ||
57 | write_aux_reg(ARC_AUX_IDU_REG_CMD, __val); \ | ||
58 | } while (0) | ||
59 | |||
60 | #define IDU_SET_PARAM(par) write_aux_reg(ARC_AUX_IDU_REG_PARAM, par) | ||
61 | #define IDU_GET_PARAM() read_aux_reg(ARC_AUX_IDU_REG_PARAM) | ||
62 | |||
63 | /* IDU Commands */ | ||
64 | #define IDU_DISABLE 0x00 | ||
65 | #define IDU_ENABLE 0x01 | ||
66 | #define IDU_IRQ_CLEAR 0x02 | ||
67 | #define IDU_IRQ_ASSERT 0x03 | ||
68 | #define IDU_IRQ_WMODE 0x04 | ||
69 | #define IDU_IRQ_STATUS 0x05 | ||
70 | #define IDU_IRQ_ACK 0x06 | ||
71 | #define IDU_IRQ_PEND 0x07 | ||
72 | #define IDU_IRQ_RMODE 0x08 | ||
73 | #define IDU_IRQ_WBITMASK 0x09 | ||
74 | #define IDU_IRQ_RBITMASK 0x0A | ||
75 | |||
76 | #define idu_enable() IDU_SET_COMMAND(0, IDU_ENABLE) | ||
77 | #define idu_disable() IDU_SET_COMMAND(0, IDU_DISABLE) | ||
78 | |||
79 | #define idu_irq_assert(irq) IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT) | ||
80 | #define idu_irq_clear(irq) IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR) | ||
81 | |||
82 | /* IDU Interrupt Mode - Destination Encoding */ | ||
83 | #define IDU_IRQ_MOD_DISABLE 0x00 | ||
84 | #define IDU_IRQ_MOD_ROUND_RECP 0x01 | ||
85 | #define IDU_IRQ_MOD_TCPU_FIRSTRECP 0x02 | ||
86 | #define IDU_IRQ_MOD_TCPU_ALLRECP 0x03 | ||
87 | |||
88 | /* IDU Interrupt Mode - Triggering Mode */ | ||
89 | #define IDU_IRQ_MODE_LEVEL_TRIG 0x00 | ||
90 | #define IDU_IRQ_MODE_PULSE_TRIG 0x01 | ||
91 | |||
92 | #define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode) \ | ||
93 | (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF)) | ||
94 | |||
95 | struct idu_irq_config { | ||
96 | uint8_t irq; | ||
97 | uint8_t dest_mode; | ||
98 | uint8_t trig_mode; | ||
99 | }; | ||
100 | |||
101 | struct idu_irq_status { | ||
102 | uint8_t irq; | ||
103 | bool enabled; | ||
104 | bool status; | ||
105 | bool ack; | ||
106 | bool pend; | ||
107 | uint8_t next_rr; | ||
108 | }; | ||
109 | |||
110 | extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask); | ||
111 | extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode); | ||
112 | |||
113 | #endif /* CONFIG_SMP */ | ||
114 | |||
115 | #endif | ||
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c index ed726360b9f6..590edd174c47 100644 --- a/arch/arc/plat-arcfpga/irq.c +++ b/arch/arc/plat-arcfpga/irq.c | |||
@@ -9,7 +9,17 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <asm/irq.h> | ||
12 | 13 | ||
13 | void __init plat_init_IRQ(void) | 14 | void __init plat_init_IRQ(void) |
14 | { | 15 | { |
16 | /* | ||
17 | * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the | ||
18 | * request_irq() comes from any other CPU, the low level IRQ unamsking | ||
19 | * essential for getting Interrupts won't be enabled on cpu0, locking | ||
20 | * up the UART state machine. | ||
21 | */ | ||
22 | #ifdef CONFIG_SMP | ||
23 | arch_unmask_irq(UART0_IRQ); | ||
24 | #endif | ||
15 | } | 25 | } |
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c new file mode 100644 index 000000000000..a95fcdb29033 --- /dev/null +++ b/arch/arc/plat-arcfpga/smp.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * ARC700 Simulation-only Extensions for SMP | ||
3 | * | ||
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Vineet Gupta - 2012 : split off arch common and plat specific SMP | ||
11 | * Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's | ||
12 | */ | ||
13 | |||
14 | #include <linux/smp.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <plat/smp.h> | ||
17 | |||
18 | static char smp_cpuinfo_buf[128]; | ||
19 | |||
20 | /* | ||
21 | *------------------------------------------------------------------- | ||
22 | * Platform specific callbacks expected by arch SMP code | ||
23 | *------------------------------------------------------------------- | ||
24 | */ | ||
25 | |||
26 | const char *arc_platform_smp_cpuinfo(void) | ||
27 | { | ||
28 | #define IS_AVAIL1(var, str) ((var) ? str : "") | ||
29 | |||
30 | struct bcr_mp mp; | ||
31 | |||
32 | READ_BCR(ARC_REG_MP_BCR, mp); | ||
33 | |||
34 | sprintf(smp_cpuinfo_buf, "Extn [700-SMP]: v%d, arch(%d) %s %s %s\n", | ||
35 | mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"), | ||
36 | IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU")); | ||
37 | |||
38 | return smp_cpuinfo_buf; | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * Master kick starting another CPU | ||
43 | */ | ||
44 | void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc) | ||
45 | { | ||
46 | /* setup the start PC */ | ||
47 | write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc); | ||
48 | |||
49 | /* Trigger WRITE_PC cmd for this cpu */ | ||
50 | write_aux_reg(ARC_AUX_XTL_REG_CMD, | ||
51 | (ARC_XTL_CMD_WRITE_PC | (cpu << 8))); | ||
52 | |||
53 | /* Take the cpu out of Halt */ | ||
54 | write_aux_reg(ARC_AUX_XTL_REG_CMD, | ||
55 | (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8))); | ||
56 | |||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Any SMP specific init any CPU does when it comes up. | ||
61 | * Here we setup the CPU to enable Inter-Processor-Interrupts | ||
62 | * Called for each CPU | ||
63 | * -Master : init_IRQ() | ||
64 | * -Other(s) : start_kernel_secondary() | ||
65 | */ | ||
66 | void arc_platform_smp_init_cpu(void) | ||
67 | { | ||
68 | int cpu = smp_processor_id(); | ||
69 | |||
70 | /* Check if CPU is configured for more than 16 interrupts */ | ||
71 | if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16) | ||
72 | panic("[arcfpga] IRQ system can't support IDU IPI\n"); | ||
73 | |||
74 | idu_disable(); | ||
75 | |||
76 | /**************************************************************** | ||
77 | * IDU provides a set of Common IRQs, each of which can be dynamically | ||
78 | * attached to (1|many|all) CPUs. | ||
79 | * The Common IRQs [0-15] are mapped as CPU pvt [16-31] | ||
80 | * | ||
81 | * Here we use a simple 1:1 mapping: | ||
82 | * A CPU 'x' is wired to Common IRQ 'x'. | ||
83 | * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which | ||
84 | * makes up for our simple IPI plumbing. | ||
85 | * | ||
86 | * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs | ||
87 | * w/o having to do one-at-a-time | ||
88 | ******************************************************************/ | ||
89 | |||
90 | /* | ||
91 | * Claim an IRQ which would trigger IPI on this CPU. | ||
92 | * In IDU parlance it involves setting up a cpu bitmask for the IRQ | ||
93 | * The bitmap here contains only 1 CPU (self). | ||
94 | */ | ||
95 | idu_irq_set_tgtcpu(cpu, 0x1 << cpu); | ||
96 | |||
97 | /* Set the IRQ destination to use the bitmask above */ | ||
98 | idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */ | ||
99 | IDU_IRQ_MODE_PULSE_TRIG); | ||
100 | |||
101 | idu_enable(); | ||
102 | |||
103 | /* Attach the arch-common IPI ISR to our IDU IRQ */ | ||
104 | smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu); | ||
105 | } | ||
106 | |||
107 | void arc_platform_ipi_send(const struct cpumask *callmap) | ||
108 | { | ||
109 | unsigned int cpu; | ||
110 | |||
111 | for_each_cpu(cpu, callmap) | ||
112 | idu_irq_assert(cpu); | ||
113 | } | ||
114 | |||
115 | void arc_platform_ipi_clear(int cpu, int irq) | ||
116 | { | ||
117 | idu_irq_clear(IDU_INTERRUPT_0 + cpu); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | *------------------------------------------------------------------- | ||
122 | * Low level Platform IPI Providers | ||
123 | *------------------------------------------------------------------- | ||
124 | */ | ||
125 | |||
126 | /* Set the Mode for the Common IRQ */ | ||
127 | void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode) | ||
128 | { | ||
129 | uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode); | ||
130 | |||
131 | IDU_SET_PARAM(par); | ||
132 | IDU_SET_COMMAND(irq, IDU_IRQ_WMODE); | ||
133 | } | ||
134 | |||
135 | /* Set the target cpu Bitmask for Common IRQ */ | ||
136 | void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask) | ||
137 | { | ||
138 | IDU_SET_PARAM(mask); | ||
139 | IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK); | ||
140 | } | ||
141 | |||
142 | /* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */ | ||
143 | bool idu_irq_get_ack(uint8_t irq) | ||
144 | { | ||
145 | uint32_t val; | ||
146 | |||
147 | IDU_SET_COMMAND(irq, IDU_IRQ_ACK); | ||
148 | val = IDU_GET_PARAM(); | ||
149 | |||
150 | return val & (1 << irq); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Get the Interrupt Pending status for IRQ (as CPU Bitmask) | ||
155 | * -Pending means CPU has not yet noticed the IRQ (e.g. disabled) | ||
156 | * -After Interrupt has been taken, the IPI expcitily needs to be | ||
157 | * cleared, to be acknowledged. | ||
158 | */ | ||
159 | bool idu_irq_get_pend(uint8_t irq) | ||
160 | { | ||
161 | uint32_t val; | ||
162 | |||
163 | IDU_SET_COMMAND(irq, IDU_IRQ_PEND); | ||
164 | val = IDU_GET_PARAM(); | ||
165 | |||
166 | return val & (1 << irq); | ||
167 | } | ||