diff options
Diffstat (limited to 'arch/ia64/include/asm/system.h')
-rw-r--r-- | arch/ia64/include/asm/system.h | 292 |
1 files changed, 292 insertions, 0 deletions
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h new file mode 100644 index 000000000000..927a381c20ca --- /dev/null +++ b/arch/ia64/include/asm/system.h | |||
@@ -0,0 +1,292 @@ | |||
1 | #ifndef _ASM_IA64_SYSTEM_H | ||
2 | #define _ASM_IA64_SYSTEM_H | ||
3 | |||
4 | /* | ||
5 | * System defines. Note that this is included both from .c and .S | ||
6 | * files, so it does only defines, not any C code. This is based | ||
7 | * on information published in the Processor Abstraction Layer | ||
8 | * and the System Abstraction Layer manual. | ||
9 | * | ||
10 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
11 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
12 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
13 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
14 | */ | ||
15 | |||
16 | #include <asm/kregs.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/pal.h> | ||
19 | #include <asm/percpu.h> | ||
20 | |||
21 | #define GATE_ADDR RGN_BASE(RGN_GATE) | ||
22 | |||
23 | /* | ||
24 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE | ||
25 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) | ||
26 | */ | ||
27 | #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) | ||
28 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) | ||
29 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) | ||
30 | |||
31 | #ifndef __ASSEMBLY__ | ||
32 | |||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/types.h> | ||
35 | |||
36 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ | ||
37 | |||
38 | struct pci_vector_struct { | ||
39 | __u16 segment; /* PCI Segment number */ | ||
40 | __u16 bus; /* PCI Bus number */ | ||
41 | __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ | ||
42 | __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ | ||
43 | __u32 irq; /* IRQ assigned */ | ||
44 | }; | ||
45 | |||
46 | extern struct ia64_boot_param { | ||
47 | __u64 command_line; /* physical address of command line arguments */ | ||
48 | __u64 efi_systab; /* physical address of EFI system table */ | ||
49 | __u64 efi_memmap; /* physical address of EFI memory map */ | ||
50 | __u64 efi_memmap_size; /* size of EFI memory map */ | ||
51 | __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ | ||
52 | __u32 efi_memdesc_version; /* memory descriptor version */ | ||
53 | struct { | ||
54 | __u16 num_cols; /* number of columns on console output device */ | ||
55 | __u16 num_rows; /* number of rows on console output device */ | ||
56 | __u16 orig_x; /* cursor's x position */ | ||
57 | __u16 orig_y; /* cursor's y position */ | ||
58 | } console_info; | ||
59 | __u64 fpswa; /* physical address of the fpswa interface */ | ||
60 | __u64 initrd_start; | ||
61 | __u64 initrd_size; | ||
62 | } *ia64_boot_param; | ||
63 | |||
64 | /* | ||
65 | * Macros to force memory ordering. In these descriptions, "previous" | ||
66 | * and "subsequent" refer to program order; "visible" means that all | ||
67 | * architecturally visible effects of a memory access have occurred | ||
68 | * (at a minimum, this means the memory has been read or written). | ||
69 | * | ||
70 | * wmb(): Guarantees that all preceding stores to memory- | ||
71 | * like regions are visible before any subsequent | ||
72 | * stores and that all following stores will be | ||
73 | * visible only after all previous stores. | ||
74 | * rmb(): Like wmb(), but for reads. | ||
75 | * mb(): wmb()/rmb() combo, i.e., all previous memory | ||
76 | * accesses are visible before all subsequent | ||
77 | * accesses and vice versa. This is also known as | ||
78 | * a "fence." | ||
79 | * | ||
80 | * Note: "mb()" and its variants cannot be used as a fence to order | ||
81 | * accesses to memory mapped I/O registers. For that, mf.a needs to | ||
82 | * be used. However, we don't want to always use mf.a because (a) | ||
83 | * it's (presumably) much slower than mf and (b) mf.a is supported for | ||
84 | * sequential memory pages only. | ||
85 | */ | ||
86 | #define mb() ia64_mf() | ||
87 | #define rmb() mb() | ||
88 | #define wmb() mb() | ||
89 | #define read_barrier_depends() do { } while(0) | ||
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | # define smp_mb() mb() | ||
93 | # define smp_rmb() rmb() | ||
94 | # define smp_wmb() wmb() | ||
95 | # define smp_read_barrier_depends() read_barrier_depends() | ||
96 | #else | ||
97 | # define smp_mb() barrier() | ||
98 | # define smp_rmb() barrier() | ||
99 | # define smp_wmb() barrier() | ||
100 | # define smp_read_barrier_depends() do { } while(0) | ||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * XXX check on this ---I suspect what Linus really wants here is | ||
105 | * acquire vs release semantics but we can't discuss this stuff with | ||
106 | * Linus just yet. Grrr... | ||
107 | */ | ||
108 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | ||
109 | |||
110 | #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ | ||
111 | |||
112 | /* | ||
113 | * The group barrier in front of the rsm & ssm are necessary to ensure | ||
114 | * that none of the previous instructions in the same group are | ||
115 | * affected by the rsm/ssm. | ||
116 | */ | ||
117 | /* For spinlocks etc */ | ||
118 | |||
119 | /* | ||
120 | * - clearing psr.i is implicitly serialized (visible by next insn) | ||
121 | * - setting psr.i requires data serialization | ||
122 | * - we need a stop-bit before reading PSR because we sometimes | ||
123 | * write a floating-point register right before reading the PSR | ||
124 | * and that writes to PSR.mfl | ||
125 | */ | ||
126 | #ifdef CONFIG_PARAVIRT | ||
127 | #define __local_save_flags() ia64_get_psr_i() | ||
128 | #else | ||
129 | #define __local_save_flags() ia64_getreg(_IA64_REG_PSR) | ||
130 | #endif | ||
131 | |||
132 | #define __local_irq_save(x) \ | ||
133 | do { \ | ||
134 | ia64_stop(); \ | ||
135 | (x) = __local_save_flags(); \ | ||
136 | ia64_stop(); \ | ||
137 | ia64_rsm(IA64_PSR_I); \ | ||
138 | } while (0) | ||
139 | |||
140 | #define __local_irq_disable() \ | ||
141 | do { \ | ||
142 | ia64_stop(); \ | ||
143 | ia64_rsm(IA64_PSR_I); \ | ||
144 | } while (0) | ||
145 | |||
146 | #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) | ||
147 | |||
148 | #ifdef CONFIG_IA64_DEBUG_IRQ | ||
149 | |||
150 | extern unsigned long last_cli_ip; | ||
151 | |||
152 | # define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) | ||
153 | |||
154 | # define local_irq_save(x) \ | ||
155 | do { \ | ||
156 | unsigned long __psr; \ | ||
157 | \ | ||
158 | __local_irq_save(__psr); \ | ||
159 | if (__psr & IA64_PSR_I) \ | ||
160 | __save_ip(); \ | ||
161 | (x) = __psr; \ | ||
162 | } while (0) | ||
163 | |||
164 | # define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0) | ||
165 | |||
166 | # define local_irq_restore(x) \ | ||
167 | do { \ | ||
168 | unsigned long __old_psr, __psr = (x); \ | ||
169 | \ | ||
170 | local_save_flags(__old_psr); \ | ||
171 | __local_irq_restore(__psr); \ | ||
172 | if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \ | ||
173 | __save_ip(); \ | ||
174 | } while (0) | ||
175 | |||
176 | #else /* !CONFIG_IA64_DEBUG_IRQ */ | ||
177 | # define local_irq_save(x) __local_irq_save(x) | ||
178 | # define local_irq_disable() __local_irq_disable() | ||
179 | # define local_irq_restore(x) __local_irq_restore(x) | ||
180 | #endif /* !CONFIG_IA64_DEBUG_IRQ */ | ||
181 | |||
182 | #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) | ||
183 | #define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); }) | ||
184 | |||
185 | #define irqs_disabled() \ | ||
186 | ({ \ | ||
187 | unsigned long __ia64_id_flags; \ | ||
188 | local_save_flags(__ia64_id_flags); \ | ||
189 | (__ia64_id_flags & IA64_PSR_I) == 0; \ | ||
190 | }) | ||
191 | |||
192 | #ifdef __KERNEL__ | ||
193 | |||
194 | #ifdef CONFIG_IA32_SUPPORT | ||
195 | # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) | ||
196 | #else | ||
197 | # define IS_IA32_PROCESS(regs) 0 | ||
198 | struct task_struct; | ||
199 | static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} | ||
200 | static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} | ||
201 | #endif | ||
202 | |||
203 | /* | ||
204 | * Context switch from one thread to another. If the two threads have | ||
205 | * different address spaces, schedule() has already taken care of | ||
206 | * switching to the new address space by calling switch_mm(). | ||
207 | * | ||
208 | * Disabling access to the fph partition and the debug-register | ||
209 | * context switch MUST be done before calling ia64_switch_to() since a | ||
210 | * newly created thread returns directly to | ||
211 | * ia64_ret_from_syscall_clear_r8. | ||
212 | */ | ||
213 | extern struct task_struct *ia64_switch_to (void *next_task); | ||
214 | |||
215 | struct task_struct; | ||
216 | |||
217 | extern void ia64_save_extra (struct task_struct *task); | ||
218 | extern void ia64_load_extra (struct task_struct *task); | ||
219 | |||
220 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
221 | extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); | ||
222 | # define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) | ||
223 | #else | ||
224 | # define IA64_ACCOUNT_ON_SWITCH(p,n) | ||
225 | #endif | ||
226 | |||
227 | #ifdef CONFIG_PERFMON | ||
228 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | ||
229 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | ||
230 | #else | ||
231 | # define PERFMON_IS_SYSWIDE() (0) | ||
232 | #endif | ||
233 | |||
234 | #define IA64_HAS_EXTRA_STATE(t) \ | ||
235 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | ||
236 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) | ||
237 | |||
238 | #define __switch_to(prev,next,last) do { \ | ||
239 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | ||
240 | if (IA64_HAS_EXTRA_STATE(prev)) \ | ||
241 | ia64_save_extra(prev); \ | ||
242 | if (IA64_HAS_EXTRA_STATE(next)) \ | ||
243 | ia64_load_extra(next); \ | ||
244 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ | ||
245 | (last) = ia64_switch_to((next)); \ | ||
246 | } while (0) | ||
247 | |||
248 | #ifdef CONFIG_SMP | ||
249 | /* | ||
250 | * In the SMP case, we save the fph state when context-switching away from a thread that | ||
251 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can | ||
252 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch | ||
253 | * the latest fph state from another CPU. In other words: eager save, lazy restore. | ||
254 | */ | ||
255 | # define switch_to(prev,next,last) do { \ | ||
256 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ | ||
257 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ | ||
258 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ | ||
259 | __ia64_save_fpu((prev)->thread.fph); \ | ||
260 | } \ | ||
261 | __switch_to(prev, next, last); \ | ||
262 | /* "next" in old context is "current" in new context */ \ | ||
263 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ | ||
264 | (task_cpu(current) != \ | ||
265 | task_thread_info(current)->last_cpu))) { \ | ||
266 | platform_migrate(current); \ | ||
267 | task_thread_info(current)->last_cpu = task_cpu(current); \ | ||
268 | } \ | ||
269 | } while (0) | ||
270 | #else | ||
271 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | ||
272 | #endif | ||
273 | |||
274 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
275 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | ||
276 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | ||
277 | |||
278 | void cpu_idle_wait(void); | ||
279 | |||
280 | #define arch_align_stack(x) (x) | ||
281 | |||
282 | void default_idle(void); | ||
283 | |||
284 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
285 | extern void account_system_vtime(struct task_struct *); | ||
286 | #endif | ||
287 | |||
288 | #endif /* __KERNEL__ */ | ||
289 | |||
290 | #endif /* __ASSEMBLY__ */ | ||
291 | |||
292 | #endif /* _ASM_IA64_SYSTEM_H */ | ||