aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-04-01 04:26:43 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-16 07:56:08 -0400
commit19f0fa3fb3499d8c5fb861933959f546d05fc202 (patch)
tree2a4b43ac975d0a10f752a90d43020836a09cec07
parent557fe0e8842e919aeacedeb5f35444c78232b3c8 (diff)
sparc64: Move trap_block[] definitions into a new header file.
Later we're going to want to get at these definitions from asm/percpu.h and that's not possible via cpudata.h because of the set of dependencies the non-trap_block[] stuff has. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/cpudata_64.h197
-rw-r--r--arch/sparc/include/asm/trap_block.h207
2 files changed, 208 insertions, 196 deletions
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index c58d87b3314f..926397d345ff 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
6#ifndef _SPARC64_CPUDATA_H 6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H 7#define _SPARC64_CPUDATA_H
8 8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
13 10
14#include <linux/percpu.h> 11#include <linux/percpu.h>
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 35#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data) 36#define local_cpu_data() __get_cpu_var(__cpu_data)
40 37
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long tsb_huge;
75 unsigned long tsb_huge_temp;
76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned long irq_worklist_pa;
79 unsigned int cpu_mondo_qmask;
80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 unsigned long __unused;
84} __attribute__((aligned(64)));
85extern struct trap_per_cpu trap_block[NR_CPUS];
86extern void init_cur_cpu_trap(struct thread_info *);
87extern void setup_tba(void);
88extern int ncpus_probed;
89extern const struct seq_operations cpuinfo_op; 38extern const struct seq_operations cpuinfo_op;
90 39
91extern unsigned long real_hard_smp_processor_id(void);
92
93struct cpuid_patch_entry {
94 unsigned int addr;
95 unsigned int cheetah_safari[4];
96 unsigned int cheetah_jbus[4];
97 unsigned int starfire[4];
98 unsigned int sun4v[4];
99};
100extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
101
102struct sun4v_1insn_patch_entry {
103 unsigned int addr;
104 unsigned int insn;
105};
106extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
107 __sun4v_1insn_patch_end;
108
109struct sun4v_2insn_patch_entry {
110 unsigned int addr;
111 unsigned int insns[2];
112};
113extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
114 __sun4v_2insn_patch_end;
115
116#endif /* !(__ASSEMBLY__) */ 40#endif /* !(__ASSEMBLY__) */
117 41
118#define TRAP_PER_CPU_THREAD 0x00 42#include <asm/trap_block.h>
119#define TRAP_PER_CPU_PGD_PADDR 0x08
120#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
121#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
122#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
123#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
124#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
125#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
126#define TRAP_PER_CPU_FAULT_INFO 0x40
127#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
128#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
129#define TRAP_PER_CPU_TSB_HUGE 0xd0
130#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
131#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
132#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
133#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
134#define TRAP_PER_CPU_RESUM_QMASK 0xf0
135#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
136
137#define TRAP_BLOCK_SZ_SHIFT 8
138
139#include <asm/scratchpad.h>
140
141#define __GET_CPUID(REG) \
142 /* Spitfire implementation (default). */ \
143661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
144 srlx REG, 17, REG; \
145 and REG, 0x1f, REG; \
146 nop; \
147 .section .cpuid_patch, "ax"; \
148 /* Instruction location. */ \
149 .word 661b; \
150 /* Cheetah Safari implementation. */ \
151 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
152 srlx REG, 17, REG; \
153 and REG, 0x3ff, REG; \
154 nop; \
155 /* Cheetah JBUS implementation. */ \
156 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
157 srlx REG, 17, REG; \
158 and REG, 0x1f, REG; \
159 nop; \
160 /* Starfire implementation. */ \
161 sethi %hi(0x1fff40000d0 >> 9), REG; \
162 sllx REG, 9, REG; \
163 or REG, 0xd0, REG; \
164 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
165 /* sun4v implementation. */ \
166 mov SCRATCHPAD_CPUID, REG; \
167 ldxa [REG] ASI_SCRATCHPAD, REG; \
168 nop; \
169 nop; \
170 .previous;
171
172#ifdef CONFIG_SMP
173
174#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
175 __GET_CPUID(TMP) \
176 sethi %hi(trap_block), DEST; \
177 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
178 or DEST, %lo(trap_block), DEST; \
179 add DEST, TMP, DEST; \
180
181/* Clobbers TMP, current address space PGD phys address into DEST. */
182#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
183 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
184 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
185
186/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
187#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
188 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
189 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
190
191/* Clobbers TMP, loads DEST with current thread info pointer. */
192#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
193 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
194 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
195
196/* Given the current thread info pointer in THR, load the per-cpu
197 * area base of the current processor into DEST. REG1, REG2, and REG3 are
198 * clobbered.
199 *
200 * You absolutely cannot use DEST as a temporary in this code. The
201 * reason is that traps can happen during execution, and return from
202 * trap will load the fully resolved DEST per-cpu base. This can corrupt
203 * the calculations done by the macro mid-stream.
204 */
205#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
206 lduh [THR + TI_CPU], REG1; \
207 sethi %hi(__per_cpu_shift), REG3; \
208 sethi %hi(__per_cpu_base), REG2; \
209 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
210 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
211 sllx REG1, REG3, REG3; \
212 add REG3, REG2, DEST;
213
214#else
215
216#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
217 sethi %hi(trap_block), DEST; \
218 or DEST, %lo(trap_block), DEST; \
219
220/* Uniprocessor versions, we know the cpuid is zero. */
221#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
222 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
223 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
224
225/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
226#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
227 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
228 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
229
230#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
231 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
232 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
233
234/* No per-cpu areas on uniprocessor, so no need to load DEST. */
235#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
236
237#endif /* !(CONFIG_SMP) */
238 43
239#endif /* _SPARC64_CPUDATA_H */ 44#endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 000000000000..68fd9ee3e8ae
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
1#ifndef _SPARC_TRAP_BLOCK_H
2#define _SPARC_TRAP_BLOCK_H
3
4#include <asm/hypervisor.h>
5#include <asm/asi.h>
6
7#ifndef __ASSEMBLY__
8
9/* Trap handling code needs to get at a few critical values upon
10 * trap entry and to process TSB misses. These cannot be in the
11 * per_cpu() area as we really need to lock them into the TLB and
12 * thus make them part of the main kernel image. As a result we
13 * try to make this as small as possible.
14 *
15 * This is padded out and aligned to 64-bytes to avoid false sharing
16 * on SMP.
17 */
18
19/* If you modify the size of this structure, please update
20 * TRAP_BLOCK_SZ_SHIFT below.
21 */
22struct thread_info;
23struct trap_per_cpu {
24/* D-cache line 1: Basic thread information, cpu and device mondo queues */
25 struct thread_info *thread;
26 unsigned long pgd_paddr;
27 unsigned long cpu_mondo_pa;
28 unsigned long dev_mondo_pa;
29
30/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
31 unsigned long resum_mondo_pa;
32 unsigned long resum_kernel_buf_pa;
33 unsigned long nonresum_mondo_pa;
34 unsigned long nonresum_kernel_buf_pa;
35
36/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
37 struct hv_fault_status fault_info;
38
39/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
40 unsigned long cpu_mondo_block_pa;
41 unsigned long cpu_list_pa;
42 unsigned long tsb_huge;
43 unsigned long tsb_huge_temp;
44
45/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
46 unsigned long irq_worklist_pa;
47 unsigned int cpu_mondo_qmask;
48 unsigned int dev_mondo_qmask;
49 unsigned int resum_qmask;
50 unsigned int nonresum_qmask;
51 unsigned long __unused;
52} __attribute__((aligned(64)));
53extern struct trap_per_cpu trap_block[NR_CPUS];
54extern void init_cur_cpu_trap(struct thread_info *);
55extern void setup_tba(void);
56extern int ncpus_probed;
57
58extern unsigned long real_hard_smp_processor_id(void);
59
60struct cpuid_patch_entry {
61 unsigned int addr;
62 unsigned int cheetah_safari[4];
63 unsigned int cheetah_jbus[4];
64 unsigned int starfire[4];
65 unsigned int sun4v[4];
66};
67extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
68
69struct sun4v_1insn_patch_entry {
70 unsigned int addr;
71 unsigned int insn;
72};
73extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
74 __sun4v_1insn_patch_end;
75
76struct sun4v_2insn_patch_entry {
77 unsigned int addr;
78 unsigned int insns[2];
79};
80extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
81 __sun4v_2insn_patch_end;
82
83
84#endif /* !(__ASSEMBLY__) */
85
86#define TRAP_PER_CPU_THREAD 0x00
87#define TRAP_PER_CPU_PGD_PADDR 0x08
88#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
89#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
90#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
91#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
92#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
93#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
94#define TRAP_PER_CPU_FAULT_INFO 0x40
95#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
96#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
97#define TRAP_PER_CPU_TSB_HUGE 0xd0
98#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
99#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
100#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
101#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
102#define TRAP_PER_CPU_RESUM_QMASK 0xf0
103#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
104
105#define TRAP_BLOCK_SZ_SHIFT 8
106
107#include <asm/scratchpad.h>
108
109#define __GET_CPUID(REG) \
110 /* Spitfire implementation (default). */ \
111661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
112 srlx REG, 17, REG; \
113 and REG, 0x1f, REG; \
114 nop; \
115 .section .cpuid_patch, "ax"; \
116 /* Instruction location. */ \
117 .word 661b; \
118 /* Cheetah Safari implementation. */ \
119 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
120 srlx REG, 17, REG; \
121 and REG, 0x3ff, REG; \
122 nop; \
123 /* Cheetah JBUS implementation. */ \
124 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
125 srlx REG, 17, REG; \
126 and REG, 0x1f, REG; \
127 nop; \
128 /* Starfire implementation. */ \
129 sethi %hi(0x1fff40000d0 >> 9), REG; \
130 sllx REG, 9, REG; \
131 or REG, 0xd0, REG; \
132 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
133 /* sun4v implementation. */ \
134 mov SCRATCHPAD_CPUID, REG; \
135 ldxa [REG] ASI_SCRATCHPAD, REG; \
136 nop; \
137 nop; \
138 .previous;
139
140#ifdef CONFIG_SMP
141
142#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
143 __GET_CPUID(TMP) \
144 sethi %hi(trap_block), DEST; \
145 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
146 or DEST, %lo(trap_block), DEST; \
147 add DEST, TMP, DEST; \
148
149/* Clobbers TMP, current address space PGD phys address into DEST. */
150#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
151 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
152 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
153
154/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
155#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
156 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
157 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
158
159/* Clobbers TMP, loads DEST with current thread info pointer. */
160#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
161 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
162 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
163
164/* Given the current thread info pointer in THR, load the per-cpu
165 * area base of the current processor into DEST. REG1, REG2, and REG3 are
166 * clobbered.
167 *
168 * You absolutely cannot use DEST as a temporary in this code. The
169 * reason is that traps can happen during execution, and return from
170 * trap will load the fully resolved DEST per-cpu base. This can corrupt
171 * the calculations done by the macro mid-stream.
172 */
173#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
174 lduh [THR + TI_CPU], REG1; \
175 sethi %hi(__per_cpu_shift), REG3; \
176 sethi %hi(__per_cpu_base), REG2; \
177 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
178 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
179 sllx REG1, REG3, REG3; \
180 add REG3, REG2, DEST;
181
182#else
183
184#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
185 sethi %hi(trap_block), DEST; \
186 or DEST, %lo(trap_block), DEST; \
187
188/* Uniprocessor versions, we know the cpuid is zero. */
189#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
190 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
191 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
192
193/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
194#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
195 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
196 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
197
198#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
199 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
200 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
201
202/* No per-cpu areas on uniprocessor, so no need to load DEST. */
203#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
204
205#endif /* !(CONFIG_SMP) */
206
207#endif /* _SPARC_TRAP_BLOCK_H */