diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64/system.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ia64/system.h')
-rw-r--r-- | include/asm-ia64/system.h | 295 |
1 files changed, 295 insertions, 0 deletions
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h new file mode 100644 index 000000000000..6f516e76d1f0 --- /dev/null +++ b/include/asm-ia64/system.h | |||
@@ -0,0 +1,295 @@ | |||
1 | #ifndef _ASM_IA64_SYSTEM_H | ||
2 | #define _ASM_IA64_SYSTEM_H | ||
3 | |||
4 | /* | ||
5 | * System defines. Note that this is included both from .c and .S | ||
6 | * files, so it does only defines, not any C code. This is based | ||
7 | * on information published in the Processor Abstraction Layer | ||
8 | * and the System Abstraction Layer manual. | ||
9 | * | ||
10 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
11 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
12 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
13 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
14 | */ | ||
15 | #include <linux/config.h> | ||
16 | |||
17 | #include <asm/kregs.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pal.h> | ||
20 | #include <asm/percpu.h> | ||
21 | |||
22 | #define GATE_ADDR __IA64_UL_CONST(0xa000000000000000) | ||
23 | /* | ||
24 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE | ||
25 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) | ||
26 | */ | ||
27 | #define KERNEL_START __IA64_UL_CONST(0xa000000100000000) | ||
28 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) | ||
29 | |||
30 | #ifndef __ASSEMBLY__ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/types.h> | ||
34 | |||
35 | struct pci_vector_struct { | ||
36 | __u16 segment; /* PCI Segment number */ | ||
37 | __u16 bus; /* PCI Bus number */ | ||
38 | __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ | ||
39 | __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ | ||
40 | __u32 irq; /* IRQ assigned */ | ||
41 | }; | ||
42 | |||
43 | extern struct ia64_boot_param { | ||
44 | __u64 command_line; /* physical address of command line arguments */ | ||
45 | __u64 efi_systab; /* physical address of EFI system table */ | ||
46 | __u64 efi_memmap; /* physical address of EFI memory map */ | ||
47 | __u64 efi_memmap_size; /* size of EFI memory map */ | ||
48 | __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ | ||
49 | __u32 efi_memdesc_version; /* memory descriptor version */ | ||
50 | struct { | ||
51 | __u16 num_cols; /* number of columns on console output device */ | ||
52 | __u16 num_rows; /* number of rows on console output device */ | ||
53 | __u16 orig_x; /* cursor's x position */ | ||
54 | __u16 orig_y; /* cursor's y position */ | ||
55 | } console_info; | ||
56 | __u64 fpswa; /* physical address of the fpswa interface */ | ||
57 | __u64 initrd_start; | ||
58 | __u64 initrd_size; | ||
59 | } *ia64_boot_param; | ||
60 | |||
61 | /* | ||
62 | * Macros to force memory ordering. In these descriptions, "previous" | ||
63 | * and "subsequent" refer to program order; "visible" means that all | ||
64 | * architecturally visible effects of a memory access have occurred | ||
65 | * (at a minimum, this means the memory has been read or written). | ||
66 | * | ||
67 | * wmb(): Guarantees that all preceding stores to memory- | ||
68 | * like regions are visible before any subsequent | ||
69 | * stores and that all following stores will be | ||
70 | * visible only after all previous stores. | ||
71 | * rmb(): Like wmb(), but for reads. | ||
72 | * mb(): wmb()/rmb() combo, i.e., all previous memory | ||
73 | * accesses are visible before all subsequent | ||
74 | * accesses and vice versa. This is also known as | ||
75 | * a "fence." | ||
76 | * | ||
77 | * Note: "mb()" and its variants cannot be used as a fence to order | ||
78 | * accesses to memory mapped I/O registers. For that, mf.a needs to | ||
79 | * be used. However, we don't want to always use mf.a because (a) | ||
80 | * it's (presumably) much slower than mf and (b) mf.a is supported for | ||
81 | * sequential memory pages only. | ||
82 | */ | ||
83 | #define mb() ia64_mf() | ||
84 | #define rmb() mb() | ||
85 | #define wmb() mb() | ||
86 | #define read_barrier_depends() do { } while(0) | ||
87 | |||
88 | #ifdef CONFIG_SMP | ||
89 | # define smp_mb() mb() | ||
90 | # define smp_rmb() rmb() | ||
91 | # define smp_wmb() wmb() | ||
92 | # define smp_read_barrier_depends() read_barrier_depends() | ||
93 | #else | ||
94 | # define smp_mb() barrier() | ||
95 | # define smp_rmb() barrier() | ||
96 | # define smp_wmb() barrier() | ||
97 | # define smp_read_barrier_depends() do { } while(0) | ||
98 | #endif | ||
99 | |||
100 | /* | ||
101 | * XXX check on these---I suspect what Linus really wants here is | ||
102 | * acquire vs release semantics but we can't discuss this stuff with | ||
103 | * Linus just yet. Grrr... | ||
104 | */ | ||
105 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | ||
106 | #define set_wmb(var, value) do { (var) = (value); mb(); } while (0) | ||
107 | |||
108 | #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ | ||
109 | |||
110 | /* | ||
111 | * The group barrier in front of the rsm & ssm are necessary to ensure | ||
112 | * that none of the previous instructions in the same group are | ||
113 | * affected by the rsm/ssm. | ||
114 | */ | ||
115 | /* For spinlocks etc */ | ||
116 | |||
117 | /* | ||
118 | * - clearing psr.i is implicitly serialized (visible by next insn) | ||
119 | * - setting psr.i requires data serialization | ||
120 | * - we need a stop-bit before reading PSR because we sometimes | ||
121 | * write a floating-point register right before reading the PSR | ||
122 | * and that writes to PSR.mfl | ||
123 | */ | ||
124 | #define __local_irq_save(x) \ | ||
125 | do { \ | ||
126 | ia64_stop(); \ | ||
127 | (x) = ia64_getreg(_IA64_REG_PSR); \ | ||
128 | ia64_stop(); \ | ||
129 | ia64_rsm(IA64_PSR_I); \ | ||
130 | } while (0) | ||
131 | |||
132 | #define __local_irq_disable() \ | ||
133 | do { \ | ||
134 | ia64_stop(); \ | ||
135 | ia64_rsm(IA64_PSR_I); \ | ||
136 | } while (0) | ||
137 | |||
138 | #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) | ||
139 | |||
140 | #ifdef CONFIG_IA64_DEBUG_IRQ | ||
141 | |||
142 | extern unsigned long last_cli_ip; | ||
143 | |||
144 | # define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) | ||
145 | |||
146 | # define local_irq_save(x) \ | ||
147 | do { \ | ||
148 | unsigned long psr; \ | ||
149 | \ | ||
150 | __local_irq_save(psr); \ | ||
151 | if (psr & IA64_PSR_I) \ | ||
152 | __save_ip(); \ | ||
153 | (x) = psr; \ | ||
154 | } while (0) | ||
155 | |||
156 | # define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) | ||
157 | |||
158 | # define local_irq_restore(x) \ | ||
159 | do { \ | ||
160 | unsigned long old_psr, psr = (x); \ | ||
161 | \ | ||
162 | local_save_flags(old_psr); \ | ||
163 | __local_irq_restore(psr); \ | ||
164 | if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ | ||
165 | __save_ip(); \ | ||
166 | } while (0) | ||
167 | |||
168 | #else /* !CONFIG_IA64_DEBUG_IRQ */ | ||
169 | # define local_irq_save(x) __local_irq_save(x) | ||
170 | # define local_irq_disable() __local_irq_disable() | ||
171 | # define local_irq_restore(x) __local_irq_restore(x) | ||
172 | #endif /* !CONFIG_IA64_DEBUG_IRQ */ | ||
173 | |||
174 | #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) | ||
175 | #define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) | ||
176 | |||
177 | #define irqs_disabled() \ | ||
178 | ({ \ | ||
179 | unsigned long __ia64_id_flags; \ | ||
180 | local_save_flags(__ia64_id_flags); \ | ||
181 | (__ia64_id_flags & IA64_PSR_I) == 0; \ | ||
182 | }) | ||
183 | |||
184 | #ifdef __KERNEL__ | ||
185 | |||
186 | #define prepare_to_switch() do { } while(0) | ||
187 | |||
188 | #ifdef CONFIG_IA32_SUPPORT | ||
189 | # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) | ||
190 | #else | ||
191 | # define IS_IA32_PROCESS(regs) 0 | ||
192 | struct task_struct; | ||
193 | static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} | ||
194 | static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} | ||
195 | #endif | ||
196 | |||
197 | /* | ||
198 | * Context switch from one thread to another. If the two threads have | ||
199 | * different address spaces, schedule() has already taken care of | ||
200 | * switching to the new address space by calling switch_mm(). | ||
201 | * | ||
202 | * Disabling access to the fph partition and the debug-register | ||
203 | * context switch MUST be done before calling ia64_switch_to() since a | ||
204 | * newly created thread returns directly to | ||
205 | * ia64_ret_from_syscall_clear_r8. | ||
206 | */ | ||
207 | extern struct task_struct *ia64_switch_to (void *next_task); | ||
208 | |||
209 | struct task_struct; | ||
210 | |||
211 | extern void ia64_save_extra (struct task_struct *task); | ||
212 | extern void ia64_load_extra (struct task_struct *task); | ||
213 | |||
214 | #ifdef CONFIG_PERFMON | ||
215 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | ||
216 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | ||
217 | #else | ||
218 | # define PERFMON_IS_SYSWIDE() (0) | ||
219 | #endif | ||
220 | |||
221 | #define IA64_HAS_EXTRA_STATE(t) \ | ||
222 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | ||
223 | || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) | ||
224 | |||
225 | #define __switch_to(prev,next,last) do { \ | ||
226 | if (IA64_HAS_EXTRA_STATE(prev)) \ | ||
227 | ia64_save_extra(prev); \ | ||
228 | if (IA64_HAS_EXTRA_STATE(next)) \ | ||
229 | ia64_load_extra(next); \ | ||
230 | ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ | ||
231 | (last) = ia64_switch_to((next)); \ | ||
232 | } while (0) | ||
233 | |||
234 | #ifdef CONFIG_SMP | ||
235 | /* | ||
236 | * In the SMP case, we save the fph state when context-switching away from a thread that | ||
237 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can | ||
238 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch | ||
239 | * the latest fph state from another CPU. In other words: eager save, lazy restore. | ||
240 | */ | ||
241 | # define switch_to(prev,next,last) do { \ | ||
242 | if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ | ||
243 | ia64_psr(ia64_task_regs(prev))->mfh = 0; \ | ||
244 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ | ||
245 | __ia64_save_fpu((prev)->thread.fph); \ | ||
246 | } \ | ||
247 | __switch_to(prev, next, last); \ | ||
248 | } while (0) | ||
249 | #else | ||
250 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | ||
251 | #endif | ||
252 | |||
253 | /* | ||
254 | * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch, | ||
255 | * because that could cause a deadlock. Here is an example by Erich Focht: | ||
256 | * | ||
257 | * Example: | ||
258 | * CPU#0: | ||
259 | * schedule() | ||
260 | * -> spin_lock_irq(&rq->lock) | ||
261 | * -> context_switch() | ||
262 | * -> wrap_mmu_context() | ||
263 | * -> read_lock(&tasklist_lock) | ||
264 | * | ||
265 | * CPU#1: | ||
266 | * sys_wait4() or release_task() or forget_original_parent() | ||
267 | * -> write_lock(&tasklist_lock) | ||
268 | * -> do_notify_parent() | ||
269 | * -> wake_up_parent() | ||
270 | * -> try_to_wake_up() | ||
271 | * -> spin_lock_irq(&parent_rq->lock) | ||
272 | * | ||
273 | * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock | ||
274 | * of that CPU which will not be released, because there we wait for the | ||
275 | * tasklist_lock to become available. | ||
276 | */ | ||
277 | #define prepare_arch_switch(rq, next) \ | ||
278 | do { \ | ||
279 | spin_lock(&(next)->switch_lock); \ | ||
280 | spin_unlock(&(rq)->lock); \ | ||
281 | } while (0) | ||
282 | #define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) | ||
283 | #define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) | ||
284 | |||
285 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | ||
286 | |||
287 | void cpu_idle_wait(void); | ||
288 | |||
289 | #define arch_align_stack(x) (x) | ||
290 | |||
291 | #endif /* __KERNEL__ */ | ||
292 | |||
293 | #endif /* __ASSEMBLY__ */ | ||
294 | |||
295 | #endif /* _ASM_IA64_SYSTEM_H */ | ||