aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc/smp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc/smp.h')
-rw-r--r--include/asm-sparc/smp.h177
1 files changed, 6 insertions, 171 deletions
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index e6d561599726..1f9dedfbabd8 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -1,173 +1,8 @@
1/* smp.h: Sparc specific SMP stuff. 1#ifndef ___ASM_SPARC_SMP_H
2 * 2#define ___ASM_SPARC_SMP_H
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/smp_64.h>
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
25#include <asm/atomic.h>
26
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long);
37
38/*
39 * General functions that each host system must provide.
40 */
41
42void sun4m_init_smp(void);
43void sun4d_init_smp(void);
44
45void smp_callin(void);
46void smp_boot_cpus(void);
47void smp_store_cpu_info(int);
48
49struct seq_file;
50void smp_bogo(struct seq_file *);
51void smp_info(struct seq_file *);
52
53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
56BTFIXUPDEF_BLACKBOX(load_current)
57
58#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
59
60static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
61static inline void xc1(smpfunc_t func, unsigned long arg1)
62{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
64{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
66 unsigned long arg3)
67{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3, unsigned long arg4)
70{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
71static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74
75static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
76{
77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0;
79}
80
81static inline int cpu_logical_map(int cpu)
82{
83 return cpu;
84}
85
86static inline int hard_smp4m_processor_id(void)
87{
88 int cpuid;
89
90 __asm__ __volatile__("rd %%tbr, %0\n\t"
91 "srl %0, 12, %0\n\t"
92 "and %0, 3, %0\n\t" :
93 "=&r" (cpuid));
94 return cpuid;
95}
96
97static inline int hard_smp4d_processor_id(void)
98{
99 int cpuid;
100
101 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
102 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
103 return cpuid;
104}
105
106#ifndef MODULE
107static inline int hard_smp_processor_id(void)
108{
109 int cpuid;
110
111 /* Black box - sun4m
112 __asm__ __volatile__("rd %%tbr, %0\n\t"
113 "srl %0, 12, %0\n\t"
114 "and %0, 3, %0\n\t" :
115 "=&r" (cpuid));
116 - sun4d
117 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
118 "nop; nop" :
119 "=&r" (cpuid));
120 See btfixup.h and btfixupprep.c to understand how a blackbox works.
121 */
122 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
123 "sethi %%hi(boot_cpu_id), %0\n\t"
124 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
125 "=&r" (cpuid));
126 return cpuid;
127}
128#else 5#else
129static inline int hard_smp_processor_id(void) 6#include <asm-sparc/smp_32.h>
130{ 7#endif
131 int cpuid;
132
133 __asm__ __volatile__("mov %%o7, %%g1\n\t"
134 "call ___f___hard_smp_processor_id\n\t"
135 " nop\n\t"
136 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
137 return cpuid;
138}
139#endif 8#endif
140
141#define raw_smp_processor_id() (current_thread_info()->cpu)
142
143#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
144#define prof_counter(__cpu) cpu_data(__cpu).counter
145
146void smp_setup_cpu_possible_map(void);
147
148#endif /* !(__ASSEMBLY__) */
149
150/* Sparc specific messages. */
151#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
152
153/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
154 * contains something other than one of these then the ipi is from
155 * Linux's active_kernel_processor. This facility exists so that
156 * the boot monitor can capture all the other cpus when one catches
157 * a watchdog reset or the user enters the monitor using L1-A keys.
158 */
159#define MBOX_STOPCPU 0xFB
160#define MBOX_IDLECPU 0xFC
161#define MBOX_IDLECPU2 0xFD
162#define MBOX_STOPCPU2 0xFE
163
164#else /* SMP */
165
166#define hard_smp_processor_id() 0
167#define smp_setup_cpu_possible_map() do { } while (0)
168
169#endif /* !(SMP) */
170
171#define NO_PROC_ID 0xFF
172
173#endif /* !(_SPARC_SMP_H) */