diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 00:19:43 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 00:19:43 -0400 |
commit | 9f04b9e327c495f8ddbf89c08da6cbe626e1b1b3 (patch) | |
tree | 381e9786b837085bd555c705542047f54cd2beab /include/asm-ppc64 | |
parent | b60fc8bbd2d0ea2a9b1fc7271d521fcf47f27bfd (diff) |
powerpc: Merged processor.h.
This adds register definitions from the ppc64 processor.h to reg.h,
and makes a single merged processor.h. I moved __is_processor from
the ppc64 system.h to the merged reg.h along with the PVR register
constants.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r-- | include/asm-ppc64/processor.h | 567 | ||||
-rw-r--r-- | include/asm-ppc64/system.h | 7 |
2 files changed, 0 insertions, 574 deletions
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h deleted file mode 100644 index 6447fbee7d69..000000000000 --- a/include/asm-ppc64/processor.h +++ /dev/null | |||
@@ -1,567 +0,0 @@ | |||
1 | #ifndef __ASM_PPC64_PROCESSOR_H | ||
2 | #define __ASM_PPC64_PROCESSOR_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2001 PPC 64 Team, IBM Corp | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/stringify.h> | ||
14 | #ifndef __ASSEMBLY__ | ||
15 | #include <linux/config.h> | ||
16 | #include <asm/atomic.h> | ||
17 | #include <asm/ppcdebug.h> | ||
18 | #include <asm/a.out.h> | ||
19 | #endif | ||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/types.h> | ||
22 | #include <asm/systemcfg.h> | ||
23 | #include <asm/cputable.h> | ||
24 | |||
25 | /* Machine State Register (MSR) Fields */ | ||
26 | #define MSR_SF_LG 63 /* Enable 64 bit mode */ | ||
27 | #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ | ||
28 | #define MSR_HV_LG 60 /* Hypervisor state */ | ||
29 | #define MSR_VEC_LG 25 /* Enable AltiVec */ | ||
30 | #define MSR_POW_LG 18 /* Enable Power Management */ | ||
31 | #define MSR_WE_LG 18 /* Wait State Enable */ | ||
32 | #define MSR_TGPR_LG 17 /* TLB Update registers in use */ | ||
33 | #define MSR_CE_LG 17 /* Critical Interrupt Enable */ | ||
34 | #define MSR_ILE_LG 16 /* Interrupt Little Endian */ | ||
35 | #define MSR_EE_LG 15 /* External Interrupt Enable */ | ||
36 | #define MSR_PR_LG 14 /* Problem State / Privilege Level */ | ||
37 | #define MSR_FP_LG 13 /* Floating Point enable */ | ||
38 | #define MSR_ME_LG 12 /* Machine Check Enable */ | ||
39 | #define MSR_FE0_LG 11 /* Floating Exception mode 0 */ | ||
40 | #define MSR_SE_LG 10 /* Single Step */ | ||
41 | #define MSR_BE_LG 9 /* Branch Trace */ | ||
42 | #define MSR_DE_LG 9 /* Debug Exception Enable */ | ||
43 | #define MSR_FE1_LG 8 /* Floating Exception mode 1 */ | ||
44 | #define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */ | ||
45 | #define MSR_IR_LG 5 /* Instruction Relocate */ | ||
46 | #define MSR_DR_LG 4 /* Data Relocate */ | ||
47 | #define MSR_PE_LG 3 /* Protection Enable */ | ||
48 | #define MSR_PX_LG 2 /* Protection Exclusive Mode */ | ||
49 | #define MSR_PMM_LG 2 /* Performance monitor */ | ||
50 | #define MSR_RI_LG 1 /* Recoverable Exception */ | ||
51 | #define MSR_LE_LG 0 /* Little Endian */ | ||
52 | |||
53 | #ifdef __ASSEMBLY__ | ||
54 | #define __MASK(X) (1<<(X)) | ||
55 | #else | ||
56 | #define __MASK(X) (1UL<<(X)) | ||
57 | #endif | ||
58 | |||
59 | #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ | ||
60 | #define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */ | ||
61 | #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ | ||
62 | #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */ | ||
63 | #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */ | ||
64 | #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */ | ||
65 | #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */ | ||
66 | #define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */ | ||
67 | #define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */ | ||
68 | #define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */ | ||
69 | #define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */ | ||
70 | #define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */ | ||
71 | #define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */ | ||
72 | #define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */ | ||
73 | #define MSR_SE __MASK(MSR_SE_LG) /* Single Step */ | ||
74 | #define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */ | ||
75 | #define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */ | ||
76 | #define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */ | ||
77 | #define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */ | ||
78 | #define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */ | ||
79 | #define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */ | ||
80 | #define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */ | ||
81 | #define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */ | ||
82 | #define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */ | ||
83 | #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */ | ||
84 | #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ | ||
85 | |||
86 | #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF | ||
87 | #define MSR_KERNEL MSR_ | MSR_SF | MSR_HV | ||
88 | |||
89 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | ||
90 | #define MSR_USER64 MSR_USER32 | MSR_SF | ||
91 | |||
92 | /* Floating Point Status and Control Register (FPSCR) Fields */ | ||
93 | |||
94 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ | ||
95 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ | ||
96 | #define FPSCR_VX 0x20000000 /* Invalid operation summary */ | ||
97 | #define FPSCR_OX 0x10000000 /* Overflow exception summary */ | ||
98 | #define FPSCR_UX 0x08000000 /* Underflow exception summary */ | ||
99 | #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */ | ||
100 | #define FPSCR_XX 0x02000000 /* Inexact exception summary */ | ||
101 | #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ | ||
102 | #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ | ||
103 | #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */ | ||
104 | #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */ | ||
105 | #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */ | ||
106 | #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */ | ||
107 | #define FPSCR_FR 0x00040000 /* Fraction rounded */ | ||
108 | #define FPSCR_FI 0x00020000 /* Fraction inexact */ | ||
109 | #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */ | ||
110 | #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */ | ||
111 | #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */ | ||
112 | #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */ | ||
113 | #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */ | ||
114 | #define FPSCR_VE 0x00000080 /* Invalid op exception enable */ | ||
115 | #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */ | ||
116 | #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */ | ||
117 | #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */ | ||
118 | #define FPSCR_XE 0x00000008 /* FP inexact exception enable */ | ||
119 | #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */ | ||
120 | #define FPSCR_RN 0x00000003 /* FPU rounding control */ | ||
121 | |||
122 | /* Special Purpose Registers (SPRNs)*/ | ||
123 | |||
124 | #define SPRN_CTR 0x009 /* Count Register */ | ||
125 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ | ||
126 | #define DABR_TRANSLATION (1UL << 2) | ||
127 | #define SPRN_DAR 0x013 /* Data Address Register */ | ||
128 | #define SPRN_DEC 0x016 /* Decrement Register */ | ||
129 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ | ||
130 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ | ||
131 | #define DSISR_PROTFAULT 0x08000000 /* protection fault */ | ||
132 | #define DSISR_ISSTORE 0x02000000 /* access was a store */ | ||
133 | #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ | ||
134 | #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ | ||
135 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ | ||
136 | #define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */ | ||
137 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ | ||
138 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ | ||
139 | #define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */ | ||
140 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ | ||
141 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ | ||
142 | #define SPRN_HID6 0x3F9 /* BE HID 6 */ | ||
143 | #define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */ | ||
144 | #define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */ | ||
145 | #define SPRN_TSCR 0x399 /* Thread switch control on BE */ | ||
146 | #define SPRN_TTR 0x39A /* Thread switch timeout on BE */ | ||
147 | #define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */ | ||
148 | #define TSCR_EE_ENABLE 0x100000 /* External Interrupt */ | ||
149 | #define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */ | ||
150 | #define SPRN_TSC 0x3FD /* Thread switch control on others */ | ||
151 | #define SPRN_TST 0x3FC /* Thread switch timeout on others */ | ||
152 | #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */ | ||
153 | #define SPRN_LR 0x008 /* Link Register */ | ||
154 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ | ||
155 | #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ | ||
156 | #define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */ | ||
157 | #define SPRN_PVR 0x11F /* Processor Version Register */ | ||
158 | #define SPRN_RPA 0x3D6 /* Required Physical Address Register */ | ||
159 | #define SPRN_SDA 0x3BF /* Sampled Data Address Register */ | ||
160 | #define SPRN_SDR1 0x019 /* MMU Hash Base Register */ | ||
161 | #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ | ||
162 | #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ | ||
163 | #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ | ||
164 | #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ | ||
165 | #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */ | ||
166 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ | ||
167 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ | ||
168 | #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ | ||
169 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ | ||
170 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */ | ||
171 | #define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */ | ||
172 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ | ||
173 | #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */ | ||
174 | #define SPRN_XER 0x001 /* Fixed Point Exception Register */ | ||
175 | #define SPRN_VRSAVE 0x100 /* Vector save */ | ||
176 | #define SPRN_CTRLF 0x088 | ||
177 | #define SPRN_CTRLT 0x098 | ||
178 | #define CTRL_RUNLATCH 0x1 | ||
179 | |||
180 | /* Performance monitor SPRs */ | ||
181 | #define SPRN_SIAR 780 | ||
182 | #define SPRN_SDAR 781 | ||
183 | #define SPRN_MMCRA 786 | ||
184 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ | ||
185 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ | ||
186 | #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ | ||
187 | #define SPRN_PMC1 787 | ||
188 | #define SPRN_PMC2 788 | ||
189 | #define SPRN_PMC3 789 | ||
190 | #define SPRN_PMC4 790 | ||
191 | #define SPRN_PMC5 791 | ||
192 | #define SPRN_PMC6 792 | ||
193 | #define SPRN_PMC7 793 | ||
194 | #define SPRN_PMC8 794 | ||
195 | #define SPRN_MMCR0 795 | ||
196 | #define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */ | ||
197 | #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */ | ||
198 | #define MMCR0_KERNEL_DISABLE MMCR0_FCS | ||
199 | #define MMCR0_FCP 0x20000000UL /* freeze in problem state */ | ||
200 | #define MMCR0_PROBLEM_DISABLE MMCR0_FCP | ||
201 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ | ||
202 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ | ||
203 | #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ | ||
204 | #define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */ | ||
205 | /* time base exception enable */ | ||
206 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ | ||
207 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ | ||
208 | #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ | ||
209 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ | ||
210 | #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ | ||
211 | #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ | ||
212 | #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ | ||
213 | #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */ | ||
214 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ | ||
215 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ | ||
216 | #define SPRN_MMCR1 798 | ||
217 | |||
218 | /* Short-hand versions for a number of the above SPRNs */ | ||
219 | |||
220 | #define CTR SPRN_CTR /* Counter Register */ | ||
221 | #define DAR SPRN_DAR /* Data Address Register */ | ||
222 | #define DABR SPRN_DABR /* Data Address Breakpoint Register */ | ||
223 | #define DEC SPRN_DEC /* Decrement Register */ | ||
224 | #define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */ | ||
225 | #define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */ | ||
226 | #define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */ | ||
227 | #define NIADORM SPRN_NIADORM /* NIA Dormant Register */ | ||
228 | #define TSC SPRN_TSC /* Thread switch control */ | ||
229 | #define TST SPRN_TST /* Thread switch timeout */ | ||
230 | #define IABR SPRN_IABR /* Instruction Address Breakpoint Register */ | ||
231 | #define L2CR SPRN_L2CR /* PPC 750 L2 control register */ | ||
232 | #define __LR SPRN_LR | ||
233 | #define PVR SPRN_PVR /* Processor Version */ | ||
234 | #define PIR SPRN_PIR /* Processor ID */ | ||
235 | #define PURR SPRN_PURR /* Processor Utilization of Resource Register */ | ||
236 | #define SDR1 SPRN_SDR1 /* MMU hash base register */ | ||
237 | #define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */ | ||
238 | #define SPR1 SPRN_SPRG1 | ||
239 | #define SPR2 SPRN_SPRG2 | ||
240 | #define SPR3 SPRN_SPRG3 | ||
241 | #define SPRG0 SPRN_SPRG0 | ||
242 | #define SPRG1 SPRN_SPRG1 | ||
243 | #define SPRG2 SPRN_SPRG2 | ||
244 | #define SPRG3 SPRN_SPRG3 | ||
245 | #define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */ | ||
246 | #define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */ | ||
247 | #define TBRL SPRN_TBRL /* Time Base Read Lower Register */ | ||
248 | #define TBRU SPRN_TBRU /* Time Base Read Upper Register */ | ||
249 | #define TBWL SPRN_TBWL /* Time Base Write Lower Register */ | ||
250 | #define TBWU SPRN_TBWU /* Time Base Write Upper Register */ | ||
251 | #define XER SPRN_XER | ||
252 | |||
253 | /* Processor Version Register (PVR) field extraction */ | ||
254 | |||
255 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ | ||
256 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ | ||
257 | |||
258 | /* Processor Version Numbers */ | ||
259 | #define PV_NORTHSTAR 0x0033 | ||
260 | #define PV_PULSAR 0x0034 | ||
261 | #define PV_POWER4 0x0035 | ||
262 | #define PV_ICESTAR 0x0036 | ||
263 | #define PV_SSTAR 0x0037 | ||
264 | #define PV_POWER4p 0x0038 | ||
265 | #define PV_970 0x0039 | ||
266 | #define PV_POWER5 0x003A | ||
267 | #define PV_POWER5p 0x003B | ||
268 | #define PV_970FX 0x003C | ||
269 | #define PV_630 0x0040 | ||
270 | #define PV_630p 0x0041 | ||
271 | #define PV_970MP 0x0044 | ||
272 | #define PV_BE 0x0070 | ||
273 | |||
274 | /* Platforms supported by PPC64 */ | ||
275 | #define PLATFORM_PSERIES 0x0100 | ||
276 | #define PLATFORM_PSERIES_LPAR 0x0101 | ||
277 | #define PLATFORM_ISERIES_LPAR 0x0201 | ||
278 | #define PLATFORM_LPAR 0x0001 | ||
279 | #define PLATFORM_POWERMAC 0x0400 | ||
280 | #define PLATFORM_MAPLE 0x0500 | ||
281 | #define PLATFORM_BPA 0x1000 | ||
282 | |||
283 | /* Compatibility with drivers coming from PPC32 world */ | ||
284 | #define _machine (systemcfg->platform) | ||
285 | #define _MACH_Pmac PLATFORM_POWERMAC | ||
286 | |||
287 | /* | ||
288 | * List of interrupt controllers. | ||
289 | */ | ||
290 | #define IC_INVALID 0 | ||
291 | #define IC_OPEN_PIC 1 | ||
292 | #define IC_PPC_XIC 2 | ||
293 | #define IC_BPA_IIC 3 | ||
294 | #define IC_ISERIES 4 | ||
295 | |||
296 | #define XGLUE(a,b) a##b | ||
297 | #define GLUE(a,b) XGLUE(a,b) | ||
298 | |||
299 | #ifdef __ASSEMBLY__ | ||
300 | |||
301 | #define _GLOBAL(name) \ | ||
302 | .section ".text"; \ | ||
303 | .align 2 ; \ | ||
304 | .globl name; \ | ||
305 | .globl GLUE(.,name); \ | ||
306 | .section ".opd","aw"; \ | ||
307 | name: \ | ||
308 | .quad GLUE(.,name); \ | ||
309 | .quad .TOC.@tocbase; \ | ||
310 | .quad 0; \ | ||
311 | .previous; \ | ||
312 | .type GLUE(.,name),@function; \ | ||
313 | GLUE(.,name): | ||
314 | |||
315 | #define _KPROBE(name) \ | ||
316 | .section ".kprobes.text","a"; \ | ||
317 | .align 2 ; \ | ||
318 | .globl name; \ | ||
319 | .globl GLUE(.,name); \ | ||
320 | .section ".opd","aw"; \ | ||
321 | name: \ | ||
322 | .quad GLUE(.,name); \ | ||
323 | .quad .TOC.@tocbase; \ | ||
324 | .quad 0; \ | ||
325 | .previous; \ | ||
326 | .type GLUE(.,name),@function; \ | ||
327 | GLUE(.,name): | ||
328 | |||
329 | #define _STATIC(name) \ | ||
330 | .section ".text"; \ | ||
331 | .align 2 ; \ | ||
332 | .section ".opd","aw"; \ | ||
333 | name: \ | ||
334 | .quad GLUE(.,name); \ | ||
335 | .quad .TOC.@tocbase; \ | ||
336 | .quad 0; \ | ||
337 | .previous; \ | ||
338 | .type GLUE(.,name),@function; \ | ||
339 | GLUE(.,name): | ||
340 | |||
341 | #else /* __ASSEMBLY__ */ | ||
342 | |||
343 | /* | ||
344 | * Default implementation of macro that returns current | ||
345 | * instruction pointer ("program counter"). | ||
346 | */ | ||
347 | #define current_text_addr() ({ __label__ _l; _l: &&_l;}) | ||
348 | |||
349 | /* Macros for setting and retrieving special purpose registers */ | ||
350 | |||
351 | #define mfmsr() ({unsigned long rval; \ | ||
352 | asm volatile("mfmsr %0" : "=r" (rval)); rval;}) | ||
353 | |||
354 | #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ | ||
355 | : : "r" (v)) | ||
356 | #define mtmsrd(v) __mtmsrd((v), 0) | ||
357 | |||
358 | #define mfspr(rn) ({unsigned long rval; \ | ||
359 | asm volatile("mfspr %0," __stringify(rn) \ | ||
360 | : "=r" (rval)); rval;}) | ||
361 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) | ||
362 | |||
363 | #define mftb() ({unsigned long rval; \ | ||
364 | asm volatile("mftb %0" : "=r" (rval)); rval;}) | ||
365 | |||
366 | #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) | ||
367 | #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) | ||
368 | |||
369 | #define mfasr() ({unsigned long rval; \ | ||
370 | asm volatile("mfasr %0" : "=r" (rval)); rval;}) | ||
371 | |||
372 | /* Macros for adjusting thread priority (hardware multi-threading) */ | ||
373 | #define HMT_very_low() asm volatile("or 31,31,31 # very low priority") | ||
374 | #define HMT_low() asm volatile("or 1,1,1 # low priority") | ||
375 | #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") | ||
376 | #define HMT_medium() asm volatile("or 2,2,2 # medium priority") | ||
377 | #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") | ||
378 | #define HMT_high() asm volatile("or 3,3,3 # high priority") | ||
379 | |||
380 | static inline void set_tb(unsigned int upper, unsigned int lower) | ||
381 | { | ||
382 | mttbl(0); | ||
383 | mttbu(upper); | ||
384 | mttbl(lower); | ||
385 | } | ||
386 | |||
387 | #define __get_SP() ({unsigned long sp; \ | ||
388 | asm volatile("mr %0,1": "=r" (sp)); sp;}) | ||
389 | |||
390 | #ifdef __KERNEL__ | ||
391 | |||
392 | extern int have_of; | ||
393 | extern u64 ppc64_interrupt_controller; | ||
394 | |||
395 | struct task_struct; | ||
396 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); | ||
397 | void release_thread(struct task_struct *); | ||
398 | |||
399 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
400 | extern void prepare_to_copy(struct task_struct *tsk); | ||
401 | |||
402 | /* Create a new kernel thread. */ | ||
403 | extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
404 | |||
405 | /* Lazy FPU handling on uni-processor */ | ||
406 | extern struct task_struct *last_task_used_math; | ||
407 | extern struct task_struct *last_task_used_altivec; | ||
408 | |||
409 | /* 64-bit user address space is 44-bits (16TB user VM) */ | ||
410 | #define TASK_SIZE_USER64 (0x0000100000000000UL) | ||
411 | |||
412 | /* | ||
413 | * 32-bit user address space is 4GB - 1 page | ||
414 | * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT | ||
415 | */ | ||
416 | #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE)) | ||
417 | |||
418 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ | ||
419 | TASK_SIZE_USER32 : TASK_SIZE_USER64) | ||
420 | |||
421 | /* This decides where the kernel will search for a free chunk of vm | ||
422 | * space during mmap's. | ||
423 | */ | ||
424 | #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) | ||
425 | #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4)) | ||
426 | |||
427 | #define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \ | ||
428 | TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) | ||
429 | |||
430 | typedef struct { | ||
431 | unsigned long seg; | ||
432 | } mm_segment_t; | ||
433 | |||
434 | struct thread_struct { | ||
435 | unsigned long ksp; /* Kernel stack pointer */ | ||
436 | unsigned long ksp_vsid; | ||
437 | struct pt_regs *regs; /* Pointer to saved register state */ | ||
438 | mm_segment_t fs; /* for get_fs() validation */ | ||
439 | double fpr[32]; /* Complete floating point set */ | ||
440 | unsigned long fpscr; /* Floating point status (plus pad) */ | ||
441 | unsigned long fpexc_mode; /* Floating-point exception mode */ | ||
442 | unsigned long start_tb; /* Start purr when proc switched in */ | ||
443 | unsigned long accum_tb; /* Total accumilated purr for process */ | ||
444 | unsigned long vdso_base; /* base of the vDSO library */ | ||
445 | unsigned long dabr; /* Data address breakpoint register */ | ||
446 | #ifdef CONFIG_ALTIVEC | ||
447 | /* Complete AltiVec register set */ | ||
448 | vector128 vr[32] __attribute((aligned(16))); | ||
449 | /* AltiVec status */ | ||
450 | vector128 vscr __attribute((aligned(16))); | ||
451 | unsigned long vrsave; | ||
452 | int used_vr; /* set if process has used altivec */ | ||
453 | #endif /* CONFIG_ALTIVEC */ | ||
454 | }; | ||
455 | |||
456 | #define ARCH_MIN_TASKALIGN 16 | ||
457 | |||
458 | #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) | ||
459 | |||
460 | #define INIT_THREAD { \ | ||
461 | .ksp = INIT_SP, \ | ||
462 | .regs = (struct pt_regs *)INIT_SP - 1, \ | ||
463 | .fs = KERNEL_DS, \ | ||
464 | .fpr = {0}, \ | ||
465 | .fpscr = 0, \ | ||
466 | .fpexc_mode = MSR_FE0|MSR_FE1, \ | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Return saved PC of a blocked thread. For now, this is the "user" PC | ||
471 | */ | ||
472 | #define thread_saved_pc(tsk) \ | ||
473 | ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | ||
474 | |||
475 | unsigned long get_wchan(struct task_struct *p); | ||
476 | |||
477 | #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | ||
478 | #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) | ||
479 | |||
480 | /* Get/set floating-point exception mode */ | ||
481 | #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) | ||
482 | #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) | ||
483 | |||
484 | extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); | ||
485 | extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); | ||
486 | |||
487 | static inline unsigned int __unpack_fe01(unsigned long msr_bits) | ||
488 | { | ||
489 | return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); | ||
490 | } | ||
491 | |||
492 | static inline unsigned long __pack_fe01(unsigned int fpmode) | ||
493 | { | ||
494 | return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); | ||
495 | } | ||
496 | |||
497 | #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) | ||
498 | |||
499 | /* | ||
500 | * Prefetch macros. | ||
501 | */ | ||
502 | #define ARCH_HAS_PREFETCH | ||
503 | #define ARCH_HAS_PREFETCHW | ||
504 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
505 | |||
506 | static inline void prefetch(const void *x) | ||
507 | { | ||
508 | if (unlikely(!x)) | ||
509 | return; | ||
510 | |||
511 | __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); | ||
512 | } | ||
513 | |||
514 | static inline void prefetchw(const void *x) | ||
515 | { | ||
516 | if (unlikely(!x)) | ||
517 | return; | ||
518 | |||
519 | __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); | ||
520 | } | ||
521 | |||
522 | #define spin_lock_prefetch(x) prefetchw(x) | ||
523 | |||
524 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
525 | |||
526 | static inline void ppc64_runlatch_on(void) | ||
527 | { | ||
528 | unsigned long ctrl; | ||
529 | |||
530 | if (cpu_has_feature(CPU_FTR_CTRL)) { | ||
531 | ctrl = mfspr(SPRN_CTRLF); | ||
532 | ctrl |= CTRL_RUNLATCH; | ||
533 | mtspr(SPRN_CTRLT, ctrl); | ||
534 | } | ||
535 | } | ||
536 | |||
537 | static inline void ppc64_runlatch_off(void) | ||
538 | { | ||
539 | unsigned long ctrl; | ||
540 | |||
541 | if (cpu_has_feature(CPU_FTR_CTRL)) { | ||
542 | ctrl = mfspr(SPRN_CTRLF); | ||
543 | ctrl &= ~CTRL_RUNLATCH; | ||
544 | mtspr(SPRN_CTRLT, ctrl); | ||
545 | } | ||
546 | } | ||
547 | |||
548 | #endif /* __KERNEL__ */ | ||
549 | |||
550 | #endif /* __ASSEMBLY__ */ | ||
551 | |||
552 | #ifdef __KERNEL__ | ||
553 | #define RUNLATCH_ON(REG) \ | ||
554 | BEGIN_FTR_SECTION \ | ||
555 | mfspr (REG),SPRN_CTRLF; \ | ||
556 | ori (REG),(REG),CTRL_RUNLATCH; \ | ||
557 | mtspr SPRN_CTRLT,(REG); \ | ||
558 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | ||
559 | #endif | ||
560 | |||
561 | /* | ||
562 | * Number of entries in the SLB. If this ever changes we should handle | ||
563 | * it with a use a cpu feature fixup. | ||
564 | */ | ||
565 | #define SLB_NUM_ENTRIES 64 | ||
566 | |||
567 | #endif /* __ASM_PPC64_PROCESSOR_H */ | ||
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index 1fbdc9f0590c..c04d62c592a6 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h | |||
@@ -144,13 +144,6 @@ struct thread_struct; | |||
144 | extern struct task_struct * _switch(struct thread_struct *prev, | 144 | extern struct task_struct * _switch(struct thread_struct *prev, |
145 | struct thread_struct *next); | 145 | struct thread_struct *next); |
146 | 146 | ||
147 | static inline int __is_processor(unsigned long pv) | ||
148 | { | ||
149 | unsigned long pvr; | ||
150 | asm("mfspr %0, 0x11F" : "=r" (pvr)); | ||
151 | return(PVR_VER(pvr) == pv); | ||
152 | } | ||
153 | |||
154 | /* | 147 | /* |
155 | * Atomic exchange | 148 | * Atomic exchange |
156 | * | 149 | * |