diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
commit | 96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch) | |
tree | d947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86_64/processor.h | |
parent | 27bd0c955648646abf2a353a8371d28c37bcd982 (diff) |
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the
header install make rules
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86_64/processor.h')
-rw-r--r-- | include/asm-x86_64/processor.h | 439 |
1 files changed, 0 insertions, 439 deletions
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h deleted file mode 100644 index 31f579b828f2..000000000000 --- a/include/asm-x86_64/processor.h +++ /dev/null | |||
@@ -1,439 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-x86_64/processor.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_X86_64_PROCESSOR_H | ||
8 | #define __ASM_X86_64_PROCESSOR_H | ||
9 | |||
10 | #include <asm/segment.h> | ||
11 | #include <asm/page.h> | ||
12 | #include <asm/types.h> | ||
13 | #include <asm/sigcontext.h> | ||
14 | #include <asm/cpufeature.h> | ||
15 | #include <linux/threads.h> | ||
16 | #include <asm/msr.h> | ||
17 | #include <asm/current.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/mmsegment.h> | ||
20 | #include <asm/percpu.h> | ||
21 | #include <linux/personality.h> | ||
22 | #include <linux/cpumask.h> | ||
23 | #include <asm/processor-flags.h> | ||
24 | |||
25 | #define TF_MASK 0x00000100 | ||
26 | #define IF_MASK 0x00000200 | ||
27 | #define IOPL_MASK 0x00003000 | ||
28 | #define NT_MASK 0x00004000 | ||
29 | #define VM_MASK 0x00020000 | ||
30 | #define AC_MASK 0x00040000 | ||
31 | #define VIF_MASK 0x00080000 /* virtual interrupt flag */ | ||
32 | #define VIP_MASK 0x00100000 /* virtual interrupt pending */ | ||
33 | #define ID_MASK 0x00200000 | ||
34 | |||
35 | #define desc_empty(desc) \ | ||
36 | (!((desc)->a | (desc)->b)) | ||
37 | |||
38 | #define desc_equal(desc1, desc2) \ | ||
39 | (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | ||
40 | |||
41 | /* | ||
42 | * Default implementation of macro that returns current | ||
43 | * instruction pointer ("program counter"). | ||
44 | */ | ||
45 | #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) | ||
46 | |||
47 | /* | ||
48 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
49 | */ | ||
50 | |||
51 | struct cpuinfo_x86 { | ||
52 | __u8 x86; /* CPU family */ | ||
53 | __u8 x86_vendor; /* CPU vendor */ | ||
54 | __u8 x86_model; | ||
55 | __u8 x86_mask; | ||
56 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | ||
57 | __u32 x86_capability[NCAPINTS]; | ||
58 | char x86_vendor_id[16]; | ||
59 | char x86_model_id[64]; | ||
60 | int x86_cache_size; /* in KB */ | ||
61 | int x86_clflush_size; | ||
62 | int x86_cache_alignment; | ||
63 | int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | ||
64 | __u8 x86_virt_bits, x86_phys_bits; | ||
65 | __u8 x86_max_cores; /* cpuid returned max cores value */ | ||
66 | __u32 x86_power; | ||
67 | __u32 extended_cpuid_level; /* Max extended CPUID function supported */ | ||
68 | unsigned long loops_per_jiffy; | ||
69 | #ifdef CONFIG_SMP | ||
70 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
71 | #endif | ||
72 | __u8 apicid; | ||
73 | #ifdef CONFIG_SMP | ||
74 | __u8 booted_cores; /* number of cores as seen by OS */ | ||
75 | __u8 phys_proc_id; /* Physical Processor id. */ | ||
76 | __u8 cpu_core_id; /* Core id. */ | ||
77 | #endif | ||
78 | } ____cacheline_aligned; | ||
79 | |||
80 | #define X86_VENDOR_INTEL 0 | ||
81 | #define X86_VENDOR_CYRIX 1 | ||
82 | #define X86_VENDOR_AMD 2 | ||
83 | #define X86_VENDOR_UMC 3 | ||
84 | #define X86_VENDOR_NEXGEN 4 | ||
85 | #define X86_VENDOR_CENTAUR 5 | ||
86 | #define X86_VENDOR_TRANSMETA 7 | ||
87 | #define X86_VENDOR_NUM 8 | ||
88 | #define X86_VENDOR_UNKNOWN 0xff | ||
89 | |||
90 | #ifdef CONFIG_SMP | ||
91 | extern struct cpuinfo_x86 cpu_data[]; | ||
92 | #define current_cpu_data cpu_data[smp_processor_id()] | ||
93 | #else | ||
94 | #define cpu_data (&boot_cpu_data) | ||
95 | #define current_cpu_data boot_cpu_data | ||
96 | #endif | ||
97 | |||
98 | extern char ignore_irq13; | ||
99 | |||
100 | extern void identify_cpu(struct cpuinfo_x86 *); | ||
101 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
102 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
103 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
104 | extern unsigned short num_cache_leaves; | ||
105 | |||
106 | /* | ||
107 | * Save the cr4 feature set we're using (ie | ||
108 | * Pentium 4MB enable and PPro Global page | ||
109 | * enable), so that any CPU's that boot up | ||
110 | * after us can get the correct flags. | ||
111 | */ | ||
112 | extern unsigned long mmu_cr4_features; | ||
113 | |||
114 | static inline void set_in_cr4 (unsigned long mask) | ||
115 | { | ||
116 | mmu_cr4_features |= mask; | ||
117 | __asm__("movq %%cr4,%%rax\n\t" | ||
118 | "orq %0,%%rax\n\t" | ||
119 | "movq %%rax,%%cr4\n" | ||
120 | : : "irg" (mask) | ||
121 | :"ax"); | ||
122 | } | ||
123 | |||
124 | static inline void clear_in_cr4 (unsigned long mask) | ||
125 | { | ||
126 | mmu_cr4_features &= ~mask; | ||
127 | __asm__("movq %%cr4,%%rax\n\t" | ||
128 | "andq %0,%%rax\n\t" | ||
129 | "movq %%rax,%%cr4\n" | ||
130 | : : "irg" (~mask) | ||
131 | :"ax"); | ||
132 | } | ||
133 | |||
134 | |||
135 | /* | ||
136 | * User space process size. 47bits minus one guard page. | ||
137 | */ | ||
138 | #define TASK_SIZE64 (0x800000000000UL - 4096) | ||
139 | |||
140 | /* This decides where the kernel will search for a free chunk of vm | ||
141 | * space during mmap's. | ||
142 | */ | ||
143 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) | ||
144 | |||
145 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) | ||
146 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) | ||
147 | |||
148 | #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) | ||
149 | |||
150 | /* | ||
151 | * Size of io_bitmap. | ||
152 | */ | ||
153 | #define IO_BITMAP_BITS 65536 | ||
154 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
155 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
156 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) | ||
157 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
158 | |||
159 | struct i387_fxsave_struct { | ||
160 | u16 cwd; | ||
161 | u16 swd; | ||
162 | u16 twd; | ||
163 | u16 fop; | ||
164 | u64 rip; | ||
165 | u64 rdp; | ||
166 | u32 mxcsr; | ||
167 | u32 mxcsr_mask; | ||
168 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
169 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | ||
170 | u32 padding[24]; | ||
171 | } __attribute__ ((aligned (16))); | ||
172 | |||
173 | union i387_union { | ||
174 | struct i387_fxsave_struct fxsave; | ||
175 | }; | ||
176 | |||
177 | struct tss_struct { | ||
178 | u32 reserved1; | ||
179 | u64 rsp0; | ||
180 | u64 rsp1; | ||
181 | u64 rsp2; | ||
182 | u64 reserved2; | ||
183 | u64 ist[7]; | ||
184 | u32 reserved3; | ||
185 | u32 reserved4; | ||
186 | u16 reserved5; | ||
187 | u16 io_bitmap_base; | ||
188 | /* | ||
189 | * The extra 1 is there because the CPU will access an | ||
190 | * additional byte beyond the end of the IO permission | ||
191 | * bitmap. The extra byte must be all 1 bits, and must | ||
192 | * be within the limit. Thus we have: | ||
193 | * | ||
194 | * 128 bytes, the bitmap itself, for ports 0..0x3ff | ||
195 | * 8 bytes, for an extra "long" of ~0UL | ||
196 | */ | ||
197 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
198 | } __attribute__((packed)) ____cacheline_aligned; | ||
199 | |||
200 | |||
201 | extern struct cpuinfo_x86 boot_cpu_data; | ||
202 | DECLARE_PER_CPU(struct tss_struct,init_tss); | ||
203 | /* Save the original ist values for checking stack pointers during debugging */ | ||
204 | struct orig_ist { | ||
205 | unsigned long ist[7]; | ||
206 | }; | ||
207 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | ||
208 | |||
209 | #ifdef CONFIG_X86_VSMP | ||
210 | #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
211 | #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
212 | #else | ||
213 | #define ARCH_MIN_TASKALIGN 16 | ||
214 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | ||
215 | #endif | ||
216 | |||
217 | struct thread_struct { | ||
218 | unsigned long rsp0; | ||
219 | unsigned long rsp; | ||
220 | unsigned long userrsp; /* Copy from PDA */ | ||
221 | unsigned long fs; | ||
222 | unsigned long gs; | ||
223 | unsigned short es, ds, fsindex, gsindex; | ||
224 | /* Hardware debugging registers */ | ||
225 | unsigned long debugreg0; | ||
226 | unsigned long debugreg1; | ||
227 | unsigned long debugreg2; | ||
228 | unsigned long debugreg3; | ||
229 | unsigned long debugreg6; | ||
230 | unsigned long debugreg7; | ||
231 | /* fault info */ | ||
232 | unsigned long cr2, trap_no, error_code; | ||
233 | /* floating point info */ | ||
234 | union i387_union i387 __attribute__((aligned(16))); | ||
235 | /* IO permissions. the bitmap could be moved into the GDT, that would make | ||
236 | switch faster for a limited number of ioperm using tasks. -AK */ | ||
237 | int ioperm; | ||
238 | unsigned long *io_bitmap_ptr; | ||
239 | unsigned io_bitmap_max; | ||
240 | /* cached TLS descriptors. */ | ||
241 | u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
242 | } __attribute__((aligned(16))); | ||
243 | |||
244 | #define INIT_THREAD { \ | ||
245 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
246 | } | ||
247 | |||
248 | #define INIT_TSS { \ | ||
249 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
250 | } | ||
251 | |||
252 | #define INIT_MMAP \ | ||
253 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } | ||
254 | |||
255 | #define start_thread(regs,new_rip,new_rsp) do { \ | ||
256 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ | ||
257 | load_gs_index(0); \ | ||
258 | (regs)->rip = (new_rip); \ | ||
259 | (regs)->rsp = (new_rsp); \ | ||
260 | write_pda(oldrsp, (new_rsp)); \ | ||
261 | (regs)->cs = __USER_CS; \ | ||
262 | (regs)->ss = __USER_DS; \ | ||
263 | (regs)->eflags = 0x200; \ | ||
264 | set_fs(USER_DS); \ | ||
265 | } while(0) | ||
266 | |||
267 | #define get_debugreg(var, register) \ | ||
268 | __asm__("movq %%db" #register ", %0" \ | ||
269 | :"=r" (var)) | ||
270 | #define set_debugreg(value, register) \ | ||
271 | __asm__("movq %0,%%db" #register \ | ||
272 | : /* no output */ \ | ||
273 | :"r" (value)) | ||
274 | |||
275 | struct task_struct; | ||
276 | struct mm_struct; | ||
277 | |||
278 | /* Free all resources held by a thread. */ | ||
279 | extern void release_thread(struct task_struct *); | ||
280 | |||
281 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
282 | extern void prepare_to_copy(struct task_struct *tsk); | ||
283 | |||
284 | /* | ||
285 | * create a kernel thread without removing it from tasklists | ||
286 | */ | ||
287 | extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
288 | |||
289 | /* | ||
290 | * Return saved PC of a blocked thread. | ||
291 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
292 | */ | ||
293 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) | ||
294 | |||
295 | extern unsigned long get_wchan(struct task_struct *p); | ||
296 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) | ||
297 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) | ||
298 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | ||
299 | |||
300 | |||
301 | struct microcode_header { | ||
302 | unsigned int hdrver; | ||
303 | unsigned int rev; | ||
304 | unsigned int date; | ||
305 | unsigned int sig; | ||
306 | unsigned int cksum; | ||
307 | unsigned int ldrver; | ||
308 | unsigned int pf; | ||
309 | unsigned int datasize; | ||
310 | unsigned int totalsize; | ||
311 | unsigned int reserved[3]; | ||
312 | }; | ||
313 | |||
314 | struct microcode { | ||
315 | struct microcode_header hdr; | ||
316 | unsigned int bits[0]; | ||
317 | }; | ||
318 | |||
319 | typedef struct microcode microcode_t; | ||
320 | typedef struct microcode_header microcode_header_t; | ||
321 | |||
322 | /* microcode format is extended from prescott processors */ | ||
323 | struct extended_signature { | ||
324 | unsigned int sig; | ||
325 | unsigned int pf; | ||
326 | unsigned int cksum; | ||
327 | }; | ||
328 | |||
329 | struct extended_sigtable { | ||
330 | unsigned int count; | ||
331 | unsigned int cksum; | ||
332 | unsigned int reserved[3]; | ||
333 | struct extended_signature sigs[0]; | ||
334 | }; | ||
335 | |||
336 | |||
337 | #define ASM_NOP1 K8_NOP1 | ||
338 | #define ASM_NOP2 K8_NOP2 | ||
339 | #define ASM_NOP3 K8_NOP3 | ||
340 | #define ASM_NOP4 K8_NOP4 | ||
341 | #define ASM_NOP5 K8_NOP5 | ||
342 | #define ASM_NOP6 K8_NOP6 | ||
343 | #define ASM_NOP7 K8_NOP7 | ||
344 | #define ASM_NOP8 K8_NOP8 | ||
345 | |||
346 | /* Opteron nops */ | ||
347 | #define K8_NOP1 ".byte 0x90\n" | ||
348 | #define K8_NOP2 ".byte 0x66,0x90\n" | ||
349 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | ||
350 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | ||
351 | #define K8_NOP5 K8_NOP3 K8_NOP2 | ||
352 | #define K8_NOP6 K8_NOP3 K8_NOP3 | ||
353 | #define K8_NOP7 K8_NOP4 K8_NOP3 | ||
354 | #define K8_NOP8 K8_NOP4 K8_NOP4 | ||
355 | |||
356 | #define ASM_NOP_MAX 8 | ||
357 | |||
358 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
359 | static inline void rep_nop(void) | ||
360 | { | ||
361 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
362 | } | ||
363 | |||
364 | /* Stop speculative execution */ | ||
365 | static inline void sync_core(void) | ||
366 | { | ||
367 | int tmp; | ||
368 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | ||
369 | } | ||
370 | |||
371 | #define ARCH_HAS_PREFETCH | ||
372 | static inline void prefetch(void *x) | ||
373 | { | ||
374 | asm volatile("prefetcht0 (%0)" :: "r" (x)); | ||
375 | } | ||
376 | |||
377 | #define ARCH_HAS_PREFETCHW 1 | ||
378 | static inline void prefetchw(void *x) | ||
379 | { | ||
380 | alternative_input("prefetcht0 (%1)", | ||
381 | "prefetchw (%1)", | ||
382 | X86_FEATURE_3DNOW, | ||
383 | "r" (x)); | ||
384 | } | ||
385 | |||
386 | #define ARCH_HAS_SPINLOCK_PREFETCH 1 | ||
387 | |||
388 | #define spin_lock_prefetch(x) prefetchw(x) | ||
389 | |||
390 | #define cpu_relax() rep_nop() | ||
391 | |||
392 | static inline void serialize_cpu(void) | ||
393 | { | ||
394 | __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); | ||
395 | } | ||
396 | |||
397 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
398 | unsigned long edx) | ||
399 | { | ||
400 | /* "monitor %eax,%ecx,%edx;" */ | ||
401 | asm volatile( | ||
402 | ".byte 0x0f,0x01,0xc8;" | ||
403 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
404 | } | ||
405 | |||
406 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
407 | { | ||
408 | /* "mwait %eax,%ecx;" */ | ||
409 | asm volatile( | ||
410 | ".byte 0x0f,0x01,0xc9;" | ||
411 | : :"a" (eax), "c" (ecx)); | ||
412 | } | ||
413 | |||
414 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
415 | { | ||
416 | /* "mwait %eax,%ecx;" */ | ||
417 | asm volatile( | ||
418 | "sti; .byte 0x0f,0x01,0xc9;" | ||
419 | : :"a" (eax), "c" (ecx)); | ||
420 | } | ||
421 | |||
422 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
423 | |||
424 | #define stack_current() \ | ||
425 | ({ \ | ||
426 | struct thread_info *ti; \ | ||
427 | asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
428 | ti->task; \ | ||
429 | }) | ||
430 | |||
431 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
432 | |||
433 | extern unsigned long boot_option_idle_override; | ||
434 | /* Boot loader type from the setup header */ | ||
435 | extern int bootloader_type; | ||
436 | |||
437 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
438 | |||
439 | #endif /* __ASM_X86_64_PROCESSOR_H */ | ||