aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/entry.h49
-rw-r--r--arch/arc/include/asm/mmu_context.h4
-rw-r--r--arch/arc/include/asm/mutex.h9
-rw-r--r--arch/arc/include/asm/pgtable.h4
-rw-r--r--arch/arc/include/asm/processor.h8
-rw-r--r--arch/arc/include/asm/smp.h107
6 files changed, 181 insertions, 0 deletions
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23ef2de1e09f..23daa326fc9b 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -389,11 +389,19 @@
389 * to be saved again on kernel mode stack, as part of ptregs. 389 * to be saved again on kernel mode stack, as part of ptregs.
390 *-------------------------------------------------------------*/ 390 *-------------------------------------------------------------*/
391.macro EXCPN_PROLOG_FREEUP_REG reg 391.macro EXCPN_PROLOG_FREEUP_REG reg
392#ifdef CONFIG_SMP
393 sr \reg, [ARC_REG_SCRATCH_DATA0]
394#else
392 st \reg, [@ex_saved_reg1] 395 st \reg, [@ex_saved_reg1]
396#endif
393.endm 397.endm
394 398
395.macro EXCPN_PROLOG_RESTORE_REG reg 399.macro EXCPN_PROLOG_RESTORE_REG reg
400#ifdef CONFIG_SMP
401 lr \reg, [ARC_REG_SCRATCH_DATA0]
402#else
396 ld \reg, [@ex_saved_reg1] 403 ld \reg, [@ex_saved_reg1]
404#endif
397.endm 405.endm
398 406
399/*-------------------------------------------------------------- 407/*--------------------------------------------------------------
@@ -508,7 +516,11 @@
508 /* restore original r9 , saved in int1_saved_reg 516 /* restore original r9 , saved in int1_saved_reg
509 * It will be saved on stack in macro: SAVE_CALLER_SAVED 517 * It will be saved on stack in macro: SAVE_CALLER_SAVED
510 */ 518 */
519#ifdef CONFIG_SMP
520 lr r9, [ARC_REG_SCRATCH_DATA0]
521#else
511 ld r9, [@int1_saved_reg] 522 ld r9, [@int1_saved_reg]
523#endif
512 524
513 /* now we are ready to save the remaining context :) */ 525 /* now we are ready to save the remaining context :) */
514 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ 526 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
@@ -639,6 +651,41 @@
639 bmsk \reg, \reg, 7 651 bmsk \reg, \reg, 7
640.endm 652.endm
641 653
654#ifdef CONFIG_SMP
655
656/*-------------------------------------------------
657 * Retrieve the current running task on this CPU
658 * 1. Determine curr CPU id.
659 * 2. Use it to index into _current_task[ ]
660 */
661.macro GET_CURR_TASK_ON_CPU reg
662 GET_CPU_ID \reg
663 ld.as \reg, [@_current_task, \reg]
664.endm
665
666/*-------------------------------------------------
667 * Save a new task as the "current" task on this CPU
668 * 1. Determine curr CPU id.
669 * 2. Use it to index into _current_task[ ]
670 *
671 * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
672 * because ST r0, [r1, offset] can ONLY have s9 @offset
673 * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
674 */
675
676.macro SET_CURR_TASK_ON_CPU tsk, tmp
677 GET_CPU_ID \tmp
678 add2 \tmp, @_current_task, \tmp
679 st \tsk, [\tmp]
680#ifdef CONFIG_ARC_CURR_IN_REG
681 mov r25, \tsk
682#endif
683
684.endm
685
686
687#else /* Uniprocessor implementation of macros */
688
642.macro GET_CURR_TASK_ON_CPU reg 689.macro GET_CURR_TASK_ON_CPU reg
643 ld \reg, [@_current_task] 690 ld \reg, [@_current_task]
644.endm 691.endm
@@ -650,6 +697,8 @@
650#endif 697#endif
651.endm 698.endm
652 699
700#endif /* SMP / UNI */
701
653/* ------------------------------------------------------------------ 702/* ------------------------------------------------------------------
654 * Get the ptr to some field of Current Task at @off in task struct 703 * Get the ptr to some field of Current Task at @off in task struct
655 * -Uses r25 for Current task ptr if that is enabled 704 * -Uses r25 for Current task ptr if that is enabled
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index d12f3dec8b70..0d71fb11b57c 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -147,8 +147,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
147static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 147static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
148 struct task_struct *tsk) 148 struct task_struct *tsk)
149{ 149{
150#ifndef CONFIG_SMP
150 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ 151 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
151 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 152 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
153#endif
152 154
153 /* 155 /*
154 * Get a new ASID if task doesn't have a valid one. Possible when 156 * Get a new ASID if task doesn't have a valid one. Possible when
@@ -197,7 +199,9 @@ static inline void destroy_context(struct mm_struct *mm)
197 199
198static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 200static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
199{ 201{
202#ifndef CONFIG_SMP
200 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 203 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
204#endif
201 205
202 /* Unconditionally get a new ASID */ 206 /* Unconditionally get a new ASID */
203 get_new_mmu_context(next); 207 get_new_mmu_context(next);
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
index 3be5e64da139..a2f88ff9f506 100644
--- a/arch/arc/include/asm/mutex.h
+++ b/arch/arc/include/asm/mutex.h
@@ -6,4 +6,13 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9/*
10 * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
11 * atomic dec based which can "count" any number of lock contenders.
12 * This ideally needs to be fixed in core, but for now switching to dec ver.
13 */
14#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
15#include <asm-generic/mutex-dec.h>
16#else
9#include <asm-generic/mutex-xchg.h> 17#include <asm-generic/mutex-xchg.h>
18#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index dcb0701528aa..b7e36684c091 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -354,11 +354,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
354 * Thus use this macro only when you are certain that "current" is current 354 * Thus use this macro only when you are certain that "current" is current
355 * e.g. when dealing with signal frame setup code etc 355 * e.g. when dealing with signal frame setup code etc
356 */ 356 */
357#ifndef CONFIG_SMP
357#define pgd_offset_fast(mm, addr) \ 358#define pgd_offset_fast(mm, addr) \
358({ \ 359({ \
359 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ 360 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
360 pgd_base + pgd_index(addr); \ 361 pgd_base + pgd_index(addr); \
361}) 362})
363#else
364#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
365#endif
362 366
363extern void paging_init(void); 367extern void paging_init(void);
364extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); 368extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index b7b155610067..5f26b2c1cba0 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -58,7 +58,15 @@ unsigned long thread_saved_pc(struct task_struct *t);
58/* Prepare to copy thread state - unlazy all lazy status */ 58/* Prepare to copy thread state - unlazy all lazy status */
59#define prepare_to_copy(tsk) do { } while (0) 59#define prepare_to_copy(tsk) do { } while (0)
60 60
61/*
62 * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
63 * get optimised away by gcc
64 */
65#ifdef CONFIG_SMP
66#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
67#else
61#define cpu_relax() do { } while (0) 68#define cpu_relax() do { } while (0)
69#endif
62 70
63#define copy_segments(tsk, mm) do { } while (0) 71#define copy_segments(tsk, mm) do { } while (0)
64#define release_segments(mm) do { } while (0) 72#define release_segments(mm) do { } while (0)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 4341f3ba7d92..f91f1946272f 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -9,6 +9,69 @@
9#ifndef __ASM_ARC_SMP_H 9#ifndef __ASM_ARC_SMP_H
10#define __ASM_ARC_SMP_H 10#define __ASM_ARC_SMP_H
11 11
12#ifdef CONFIG_SMP
13
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/threads.h>
17
18#define raw_smp_processor_id() (current_thread_info()->cpu)
19
20/* including cpumask.h leads to cyclic deps hence this Forward declaration */
21struct cpumask;
22
23/*
24 * APIs provided by arch SMP code to generic code
25 */
26extern void arch_send_call_function_single_ipi(int cpu);
27extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
28
29/*
30 * APIs provided by arch SMP code to rest of arch code
31 */
32extern void __init smp_init_cpus(void);
33extern void __init first_lines_of_secondary(void);
34
35/*
36 * API expected BY platform smp code (FROM arch smp code)
37 *
38 * smp_ipi_irq_setup:
39 * Takes @cpu and @irq to which the arch-common ISR is hooked up
40 */
41extern int smp_ipi_irq_setup(int cpu, int irq);
42
43/*
44 * APIs expected FROM platform smp code
45 *
46 * arc_platform_smp_cpuinfo:
47 * returns a string containing info for /proc/cpuinfo
48 *
49 * arc_platform_smp_init_cpu:
50 * Called from start_kernel_secondary to do any CPU local setup
51 * such as starting a timer, setting up IPI etc
52 *
53 * arc_platform_smp_wait_to_boot:
54 * Called from early bootup code for non-Master CPUs to "park" them
55 *
56 * arc_platform_smp_wakeup_cpu:
57 * Called from __cpu_up (Master CPU) to kick start another one
58 *
59 * arc_platform_ipi_send:
60 * Takes @cpumask to which IPI(s) would be sent.
61 * The actual msg-id/buffer is manager in arch-common code
62 *
63 * arc_platform_ipi_clear:
64 * Takes @cpu which got IPI at @irq to do any IPI clearing
65 */
66extern const char *arc_platform_smp_cpuinfo(void);
67extern void arc_platform_smp_init_cpu(void);
68extern void arc_platform_smp_wait_to_boot(int cpu);
69extern void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc);
70extern void arc_platform_ipi_send(const struct cpumask *callmap);
71extern void arc_platform_ipi_clear(int cpu, int irq);
72
73#endif /* CONFIG_SMP */
74
12/* 75/*
13 * ARC700 doesn't support atomic Read-Modify-Write ops. 76 * ARC700 doesn't support atomic Read-Modify-Write ops.
14 * Originally Interrupts had to be disabled around code to gaurantee atomicity. 77 * Originally Interrupts had to be disabled around code to gaurantee atomicity.
@@ -18,10 +81,52 @@
18 * 81 *
19 * (1) These insn were introduced only in 4.10 release. So for older released 82 * (1) These insn were introduced only in 4.10 release. So for older released
20 * support needed. 83 * support needed.
84 *
85 * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
86 * gaurantted by the platform (not something which core handles).
87 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
88 * disabling for atomicity.
89 *
90 * However exported spinlock API is not usable due to cyclic hdr deps
91 * (even after system.h disintegration upstream)
92 * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
93 * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
94 *
95 * So the workaround is to use the lowest level arch spinlock API.
96 * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
97 * but same is not true for ARCH backend, hence the need for 2 variants
21 */ 98 */
22#ifndef CONFIG_ARC_HAS_LLSC 99#ifndef CONFIG_ARC_HAS_LLSC
23 100
24#include <linux/irqflags.h> 101#include <linux/irqflags.h>
102#ifdef CONFIG_SMP
103
104#include <asm/spinlock.h>
105
106extern arch_spinlock_t smp_atomic_ops_lock;
107extern arch_spinlock_t smp_bitops_lock;
108
109#define atomic_ops_lock(flags) do { \
110 local_irq_save(flags); \
111 arch_spin_lock(&smp_atomic_ops_lock); \
112} while (0)
113
114#define atomic_ops_unlock(flags) do { \
115 arch_spin_unlock(&smp_atomic_ops_lock); \
116 local_irq_restore(flags); \
117} while (0)
118
119#define bitops_lock(flags) do { \
120 local_irq_save(flags); \
121 arch_spin_lock(&smp_bitops_lock); \
122} while (0)
123
124#define bitops_unlock(flags) do { \
125 arch_spin_unlock(&smp_bitops_lock); \
126 local_irq_restore(flags); \
127} while (0)
128
129#else /* !CONFIG_SMP */
25 130
26#define atomic_ops_lock(flags) local_irq_save(flags) 131#define atomic_ops_lock(flags) local_irq_save(flags)
27#define atomic_ops_unlock(flags) local_irq_restore(flags) 132#define atomic_ops_unlock(flags) local_irq_restore(flags)
@@ -29,6 +134,8 @@
29#define bitops_lock(flags) local_irq_save(flags) 134#define bitops_lock(flags) local_irq_save(flags)
30#define bitops_unlock(flags) local_irq_restore(flags) 135#define bitops_unlock(flags) local_irq_restore(flags)
31 136
137#endif /* !CONFIG_SMP */
138
32#endif /* !CONFIG_ARC_HAS_LLSC */ 139#endif /* !CONFIG_ARC_HAS_LLSC */
33 140
34#endif 141#endif