diff options
Diffstat (limited to 'arch/ia64/include/asm/system.h')
-rw-r--r-- | arch/ia64/include/asm/system.h | 203 |
1 files changed, 0 insertions, 203 deletions
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h deleted file mode 100644 index 6cca30705d50..000000000000 --- a/arch/ia64/include/asm/system.h +++ /dev/null | |||
@@ -1,203 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_SYSTEM_H | ||
2 | #define _ASM_IA64_SYSTEM_H | ||
3 | |||
4 | /* | ||
5 | * System defines. Note that this is included both from .c and .S | ||
6 | * files, so it does only defines, not any C code. This is based | ||
7 | * on information published in the Processor Abstraction Layer | ||
8 | * and the System Abstraction Layer manual. | ||
9 | * | ||
10 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
11 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
12 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
13 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
14 | */ | ||
15 | |||
16 | #include <asm/kregs.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/pal.h> | ||
19 | #include <asm/percpu.h> | ||
20 | |||
21 | #define GATE_ADDR RGN_BASE(RGN_GATE) | ||
22 | |||
23 | /* | ||
24 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE | ||
25 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) | ||
26 | */ | ||
27 | #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) | ||
28 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) | ||
29 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) | ||
30 | |||
31 | #ifndef __ASSEMBLY__ | ||
32 | |||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/types.h> | ||
35 | |||
36 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ | ||
37 | |||
38 | struct pci_vector_struct { | ||
39 | __u16 segment; /* PCI Segment number */ | ||
40 | __u16 bus; /* PCI Bus number */ | ||
41 | __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ | ||
42 | __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ | ||
43 | __u32 irq; /* IRQ assigned */ | ||
44 | }; | ||
45 | |||
46 | extern struct ia64_boot_param { | ||
47 | __u64 command_line; /* physical address of command line arguments */ | ||
48 | __u64 efi_systab; /* physical address of EFI system table */ | ||
49 | __u64 efi_memmap; /* physical address of EFI memory map */ | ||
50 | __u64 efi_memmap_size; /* size of EFI memory map */ | ||
51 | __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ | ||
52 | __u32 efi_memdesc_version; /* memory descriptor version */ | ||
53 | struct { | ||
54 | __u16 num_cols; /* number of columns on console output device */ | ||
55 | __u16 num_rows; /* number of rows on console output device */ | ||
56 | __u16 orig_x; /* cursor's x position */ | ||
57 | __u16 orig_y; /* cursor's y position */ | ||
58 | } console_info; | ||
59 | __u64 fpswa; /* physical address of the fpswa interface */ | ||
60 | __u64 initrd_start; | ||
61 | __u64 initrd_size; | ||
62 | } *ia64_boot_param; | ||
63 | |||
64 | /* | ||
65 | * Macros to force memory ordering. In these descriptions, "previous" | ||
66 | * and "subsequent" refer to program order; "visible" means that all | ||
67 | * architecturally visible effects of a memory access have occurred | ||
68 | * (at a minimum, this means the memory has been read or written). | ||
69 | * | ||
70 | * wmb(): Guarantees that all preceding stores to memory- | ||
71 | * like regions are visible before any subsequent | ||
72 | * stores and that all following stores will be | ||
73 | * visible only after all previous stores. | ||
74 | * rmb(): Like wmb(), but for reads. | ||
75 | * mb(): wmb()/rmb() combo, i.e., all previous memory | ||
76 | * accesses are visible before all subsequent | ||
77 | * accesses and vice versa. This is also known as | ||
78 | * a "fence." | ||
79 | * | ||
80 | * Note: "mb()" and its variants cannot be used as a fence to order | ||
81 | * accesses to memory mapped I/O registers. For that, mf.a needs to | ||
82 | * be used. However, we don't want to always use mf.a because (a) | ||
83 | * it's (presumably) much slower than mf and (b) mf.a is supported for | ||
84 | * sequential memory pages only. | ||
85 | */ | ||
86 | #define mb() ia64_mf() | ||
87 | #define rmb() mb() | ||
88 | #define wmb() mb() | ||
89 | #define read_barrier_depends() do { } while(0) | ||
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | # define smp_mb() mb() | ||
93 | # define smp_rmb() rmb() | ||
94 | # define smp_wmb() wmb() | ||
95 | # define smp_read_barrier_depends() read_barrier_depends() | ||
96 | #else | ||
97 | # define smp_mb() barrier() | ||
98 | # define smp_rmb() barrier() | ||
99 | # define smp_wmb() barrier() | ||
100 | # define smp_read_barrier_depends() do { } while(0) | ||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * XXX check on this ---I suspect what Linus really wants here is | ||
105 | * acquire vs release semantics but we can't discuss this stuff with | ||
106 | * Linus just yet. Grrr... | ||
107 | */ | ||
108 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | ||
109 | |||
110 | /* | ||
111 | * The group barrier in front of the rsm & ssm are necessary to ensure | ||
112 | * that none of the previous instructions in the same group are | ||
113 | * affected by the rsm/ssm. | ||
114 | */ | ||
115 | |||
116 | #ifdef __KERNEL__ | ||
117 | |||
118 | /* | ||
119 | * Context switch from one thread to another. If the two threads have | ||
120 | * different address spaces, schedule() has already taken care of | ||
121 | * switching to the new address space by calling switch_mm(). | ||
122 | * | ||
123 | * Disabling access to the fph partition and the debug-register | ||
124 | * context switch MUST be done before calling ia64_switch_to() since a | ||
125 | * newly created thread returns directly to | ||
126 | * ia64_ret_from_syscall_clear_r8. | ||
127 | */ | ||
128 | extern struct task_struct *ia64_switch_to (void *next_task); | ||
129 | |||
130 | struct task_struct; | ||
131 | |||
132 | extern void ia64_save_extra (struct task_struct *task); | ||
133 | extern void ia64_load_extra (struct task_struct *task); | ||
134 | |||
135 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
136 | extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); | ||
137 | # define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) | ||
138 | #else | ||
139 | # define IA64_ACCOUNT_ON_SWITCH(p,n) | ||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_PERFMON | ||
143 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | ||
144 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | ||
145 | #else | ||
146 | # define PERFMON_IS_SYSWIDE() (0) | ||
147 | #endif | ||
148 | |||
149 | #define IA64_HAS_EXTRA_STATE(t) \ | ||
150 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | ||
151 | || PERFMON_IS_SYSWIDE()) | ||
152 | |||
153 | #define __switch_to(prev,next,last) do { \ | ||
154 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | ||
155 | if (IA64_HAS_EXTRA_STATE(prev)) \ | ||
156 | ia64_save_extra(prev); \ | ||
157 | if (IA64_HAS_EXTRA_STATE(next)) \ | ||
158 | ia64_load_extra(next); \ | ||
159 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ | ||
160 | (last) = ia64_switch_to((next)); \ | ||
161 | } while (0) | ||
162 | |||
163 | #ifdef CONFIG_SMP | ||
164 | /* | ||
165 | * In the SMP case, we save the fph state when context-switching away from a thread that | ||
166 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can | ||
167 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch | ||
168 | * the latest fph state from another CPU. In other words: eager save, lazy restore. | ||
169 | */ | ||
170 | # define switch_to(prev,next,last) do { \ | ||
171 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ | ||
172 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ | ||
173 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ | ||
174 | __ia64_save_fpu((prev)->thread.fph); \ | ||
175 | } \ | ||
176 | __switch_to(prev, next, last); \ | ||
177 | /* "next" in old context is "current" in new context */ \ | ||
178 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ | ||
179 | (task_cpu(current) != \ | ||
180 | task_thread_info(current)->last_cpu))) { \ | ||
181 | platform_migrate(current); \ | ||
182 | task_thread_info(current)->last_cpu = task_cpu(current); \ | ||
183 | } \ | ||
184 | } while (0) | ||
185 | #else | ||
186 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | ||
187 | #endif | ||
188 | |||
189 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
190 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | ||
191 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | ||
192 | |||
193 | void cpu_idle_wait(void); | ||
194 | |||
195 | #define arch_align_stack(x) (x) | ||
196 | |||
197 | void default_idle(void); | ||
198 | |||
199 | #endif /* __KERNEL__ */ | ||
200 | |||
201 | #endif /* __ASSEMBLY__ */ | ||
202 | |||
203 | #endif /* _ASM_IA64_SYSTEM_H */ | ||