diff options
Diffstat (limited to 'include/asm-x86/processor.h')
-rw-r--r-- | include/asm-x86/processor.h | 841 |
1 files changed, 839 insertions, 2 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 46e1c04e309c..ab4d0c2a3f8f 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -1,5 +1,842 @@ | |||
1 | #ifndef __ASM_X86_PROCESSOR_H | ||
2 | #define __ASM_X86_PROCESSOR_H | ||
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | |||
6 | /* migration helpers, for KVM - will be removed in 2.6.25: */ | ||
7 | #include <asm/vm86.h> | ||
8 | #define Xgt_desc_struct desc_ptr | ||
9 | |||
10 | /* Forward declaration, a strange C thing */ | ||
11 | struct task_struct; | ||
12 | struct mm_struct; | ||
13 | |||
14 | #include <asm/vm86.h> | ||
15 | #include <asm/math_emu.h> | ||
16 | #include <asm/segment.h> | ||
17 | #include <asm/types.h> | ||
18 | #include <asm/sigcontext.h> | ||
19 | #include <asm/current.h> | ||
20 | #include <asm/cpufeature.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/percpu.h> | ||
24 | #include <asm/msr.h> | ||
25 | #include <asm/desc_defs.h> | ||
26 | #include <asm/nops.h> | ||
27 | #include <linux/personality.h> | ||
28 | #include <linux/cpumask.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/threads.h> | ||
31 | #include <linux/init.h> | ||
32 | |||
33 | /* | ||
34 | * Default implementation of macro that returns current | ||
35 | * instruction pointer ("program counter"). | ||
36 | */ | ||
37 | static inline void *current_text_addr(void) | ||
38 | { | ||
39 | void *pc; | ||
40 | asm volatile("mov $1f,%0\n1:":"=r" (pc)); | ||
41 | return pc; | ||
42 | } | ||
43 | |||
44 | #ifdef CONFIG_X86_VSMP | ||
45 | #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
46 | #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
47 | #else | ||
48 | #define ARCH_MIN_TASKALIGN 16 | ||
49 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | ||
50 | #endif | ||
51 | |||
52 | /* | ||
53 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
54 | * Members of this structure are referenced in head.S, so think twice | ||
55 | * before touching them. [mj] | ||
56 | */ | ||
57 | |||
58 | struct cpuinfo_x86 { | ||
59 | __u8 x86; /* CPU family */ | ||
60 | __u8 x86_vendor; /* CPU vendor */ | ||
61 | __u8 x86_model; | ||
62 | __u8 x86_mask; | ||
63 | #ifdef CONFIG_X86_32 | ||
64 | char wp_works_ok; /* It doesn't on 386's */ | ||
65 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | ||
66 | char hard_math; | ||
67 | char rfu; | ||
68 | char fdiv_bug; | ||
69 | char f00f_bug; | ||
70 | char coma_bug; | ||
71 | char pad0; | ||
72 | #else | ||
73 | /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | ||
74 | int x86_tlbsize; | ||
75 | __u8 x86_virt_bits, x86_phys_bits; | ||
76 | /* cpuid returned core id bits */ | ||
77 | __u8 x86_coreid_bits; | ||
78 | /* Max extended CPUID function supported */ | ||
79 | __u32 extended_cpuid_level; | ||
80 | #endif | ||
81 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | ||
82 | __u32 x86_capability[NCAPINTS]; | ||
83 | char x86_vendor_id[16]; | ||
84 | char x86_model_id[64]; | ||
85 | int x86_cache_size; /* in KB - valid for CPUS which support this | ||
86 | call */ | ||
87 | int x86_cache_alignment; /* In bytes */ | ||
88 | int x86_power; | ||
89 | unsigned long loops_per_jiffy; | ||
90 | #ifdef CONFIG_SMP | ||
91 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
92 | #endif | ||
93 | u16 x86_max_cores; /* cpuid returned max cores value */ | ||
94 | u16 apicid; | ||
95 | u16 x86_clflush_size; | ||
96 | #ifdef CONFIG_SMP | ||
97 | u16 booted_cores; /* number of cores as seen by OS */ | ||
98 | u16 phys_proc_id; /* Physical processor id. */ | ||
99 | u16 cpu_core_id; /* Core id */ | ||
100 | u16 cpu_index; /* index into per_cpu list */ | ||
101 | #endif | ||
102 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
103 | |||
104 | #define X86_VENDOR_INTEL 0 | ||
105 | #define X86_VENDOR_CYRIX 1 | ||
106 | #define X86_VENDOR_AMD 2 | ||
107 | #define X86_VENDOR_UMC 3 | ||
108 | #define X86_VENDOR_NEXGEN 4 | ||
109 | #define X86_VENDOR_CENTAUR 5 | ||
110 | #define X86_VENDOR_TRANSMETA 7 | ||
111 | #define X86_VENDOR_NSC 8 | ||
112 | #define X86_VENDOR_NUM 9 | ||
113 | #define X86_VENDOR_UNKNOWN 0xff | ||
114 | |||
115 | /* | ||
116 | * capabilities of CPUs | ||
117 | */ | ||
118 | extern struct cpuinfo_x86 boot_cpu_data; | ||
119 | extern struct cpuinfo_x86 new_cpu_data; | ||
120 | extern struct tss_struct doublefault_tss; | ||
121 | extern __u32 cleared_cpu_caps[NCAPINTS]; | ||
122 | |||
123 | #ifdef CONFIG_SMP | ||
124 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | ||
125 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | ||
126 | #define current_cpu_data cpu_data(smp_processor_id()) | ||
127 | #else | ||
128 | #define cpu_data(cpu) boot_cpu_data | ||
129 | #define current_cpu_data boot_cpu_data | ||
130 | #endif | ||
131 | |||
132 | void cpu_detect(struct cpuinfo_x86 *c); | ||
133 | |||
134 | extern void identify_cpu(struct cpuinfo_x86 *); | ||
135 | extern void identify_boot_cpu(void); | ||
136 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | ||
137 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
138 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
139 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
140 | extern unsigned short num_cache_leaves; | ||
141 | |||
142 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | ||
143 | extern void detect_ht(struct cpuinfo_x86 *c); | ||
144 | #else | ||
145 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
146 | #endif | ||
147 | |||
148 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | ||
149 | unsigned int *ecx, unsigned int *edx) | ||
150 | { | ||
151 | /* ecx is often an input as well as an output. */ | ||
152 | __asm__("cpuid" | ||
153 | : "=a" (*eax), | ||
154 | "=b" (*ebx), | ||
155 | "=c" (*ecx), | ||
156 | "=d" (*edx) | ||
157 | : "0" (*eax), "2" (*ecx)); | ||
158 | } | ||
159 | |||
160 | static inline void load_cr3(pgd_t *pgdir) | ||
161 | { | ||
162 | write_cr3(__pa(pgdir)); | ||
163 | } | ||
164 | |||
165 | #ifdef CONFIG_X86_32 | ||
166 | /* This is the TSS defined by the hardware. */ | ||
167 | struct x86_hw_tss { | ||
168 | unsigned short back_link, __blh; | ||
169 | unsigned long sp0; | ||
170 | unsigned short ss0, __ss0h; | ||
171 | unsigned long sp1; | ||
172 | unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ | ||
173 | unsigned long sp2; | ||
174 | unsigned short ss2, __ss2h; | ||
175 | unsigned long __cr3; | ||
176 | unsigned long ip; | ||
177 | unsigned long flags; | ||
178 | unsigned long ax, cx, dx, bx; | ||
179 | unsigned long sp, bp, si, di; | ||
180 | unsigned short es, __esh; | ||
181 | unsigned short cs, __csh; | ||
182 | unsigned short ss, __ssh; | ||
183 | unsigned short ds, __dsh; | ||
184 | unsigned short fs, __fsh; | ||
185 | unsigned short gs, __gsh; | ||
186 | unsigned short ldt, __ldth; | ||
187 | unsigned short trace, io_bitmap_base; | ||
188 | } __attribute__((packed)); | ||
189 | #else | ||
190 | struct x86_hw_tss { | ||
191 | u32 reserved1; | ||
192 | u64 sp0; | ||
193 | u64 sp1; | ||
194 | u64 sp2; | ||
195 | u64 reserved2; | ||
196 | u64 ist[7]; | ||
197 | u32 reserved3; | ||
198 | u32 reserved4; | ||
199 | u16 reserved5; | ||
200 | u16 io_bitmap_base; | ||
201 | } __attribute__((packed)) ____cacheline_aligned; | ||
202 | #endif | ||
203 | |||
204 | /* | ||
205 | * Size of io_bitmap. | ||
206 | */ | ||
207 | #define IO_BITMAP_BITS 65536 | ||
208 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
209 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
210 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | ||
211 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
212 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | ||
213 | |||
214 | struct tss_struct { | ||
215 | struct x86_hw_tss x86_tss; | ||
216 | |||
217 | /* | ||
218 | * The extra 1 is there because the CPU will access an | ||
219 | * additional byte beyond the end of the IO permission | ||
220 | * bitmap. The extra byte must be all 1 bits, and must | ||
221 | * be within the limit. | ||
222 | */ | ||
223 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
224 | /* | ||
225 | * Cache the current maximum and the last task that used the bitmap: | ||
226 | */ | ||
227 | unsigned long io_bitmap_max; | ||
228 | struct thread_struct *io_bitmap_owner; | ||
229 | /* | ||
230 | * pads the TSS to be cacheline-aligned (size is 0x100) | ||
231 | */ | ||
232 | unsigned long __cacheline_filler[35]; | ||
233 | /* | ||
234 | * .. and then another 0x100 bytes for emergency kernel stack | ||
235 | */ | ||
236 | unsigned long stack[64]; | ||
237 | } __attribute__((packed)); | ||
238 | |||
239 | DECLARE_PER_CPU(struct tss_struct, init_tss); | ||
240 | |||
241 | /* Save the original ist values for checking stack pointers during debugging */ | ||
242 | struct orig_ist { | ||
243 | unsigned long ist[7]; | ||
244 | }; | ||
245 | |||
246 | #define MXCSR_DEFAULT 0x1f80 | ||
247 | |||
248 | struct i387_fsave_struct { | ||
249 | u32 cwd; | ||
250 | u32 swd; | ||
251 | u32 twd; | ||
252 | u32 fip; | ||
253 | u32 fcs; | ||
254 | u32 foo; | ||
255 | u32 fos; | ||
256 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
257 | u32 status; /* software status information */ | ||
258 | }; | ||
259 | |||
260 | struct i387_fxsave_struct { | ||
261 | u16 cwd; | ||
262 | u16 swd; | ||
263 | u16 twd; | ||
264 | u16 fop; | ||
265 | union { | ||
266 | struct { | ||
267 | u64 rip; | ||
268 | u64 rdp; | ||
269 | }; | ||
270 | struct { | ||
271 | u32 fip; | ||
272 | u32 fcs; | ||
273 | u32 foo; | ||
274 | u32 fos; | ||
275 | }; | ||
276 | }; | ||
277 | u32 mxcsr; | ||
278 | u32 mxcsr_mask; | ||
279 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
280 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | ||
281 | u32 padding[24]; | ||
282 | } __attribute__((aligned(16))); | ||
283 | |||
284 | struct i387_soft_struct { | ||
285 | u32 cwd; | ||
286 | u32 swd; | ||
287 | u32 twd; | ||
288 | u32 fip; | ||
289 | u32 fcs; | ||
290 | u32 foo; | ||
291 | u32 fos; | ||
292 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
293 | u8 ftop, changed, lookahead, no_update, rm, alimit; | ||
294 | struct info *info; | ||
295 | u32 entry_eip; | ||
296 | }; | ||
297 | |||
298 | union i387_union { | ||
299 | struct i387_fsave_struct fsave; | ||
300 | struct i387_fxsave_struct fxsave; | ||
301 | struct i387_soft_struct soft; | ||
302 | }; | ||
303 | |||
304 | #ifdef CONFIG_X86_32 | ||
305 | /* | ||
306 | * the following now lives in the per cpu area: | ||
307 | * extern int cpu_llc_id[NR_CPUS]; | ||
308 | */ | ||
309 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
310 | #else | ||
311 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | ||
312 | #endif | ||
313 | |||
314 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
315 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
316 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
317 | extern unsigned short num_cache_leaves; | ||
318 | |||
319 | struct thread_struct { | ||
320 | /* cached TLS descriptors. */ | ||
321 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
322 | unsigned long sp0; | ||
323 | unsigned long sp; | ||
324 | #ifdef CONFIG_X86_32 | ||
325 | unsigned long sysenter_cs; | ||
326 | #else | ||
327 | unsigned long usersp; /* Copy from PDA */ | ||
328 | unsigned short es, ds, fsindex, gsindex; | ||
329 | #endif | ||
330 | unsigned long ip; | ||
331 | unsigned long fs; | ||
332 | unsigned long gs; | ||
333 | /* Hardware debugging registers */ | ||
334 | unsigned long debugreg0; | ||
335 | unsigned long debugreg1; | ||
336 | unsigned long debugreg2; | ||
337 | unsigned long debugreg3; | ||
338 | unsigned long debugreg6; | ||
339 | unsigned long debugreg7; | ||
340 | /* fault info */ | ||
341 | unsigned long cr2, trap_no, error_code; | ||
342 | /* floating point info */ | ||
343 | union i387_union i387 __attribute__((aligned(16)));; | ||
344 | #ifdef CONFIG_X86_32 | ||
345 | /* virtual 86 mode info */ | ||
346 | struct vm86_struct __user *vm86_info; | ||
347 | unsigned long screen_bitmap; | ||
348 | unsigned long v86flags, v86mask, saved_sp0; | ||
349 | unsigned int saved_fs, saved_gs; | ||
350 | #endif | ||
351 | /* IO permissions */ | ||
352 | unsigned long *io_bitmap_ptr; | ||
353 | unsigned long iopl; | ||
354 | /* max allowed port in the bitmap, in bytes: */ | ||
355 | unsigned io_bitmap_max; | ||
356 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ | ||
357 | unsigned long debugctlmsr; | ||
358 | /* Debug Store - if not 0 points to a DS Save Area configuration; | ||
359 | * goes into MSR_IA32_DS_AREA */ | ||
360 | unsigned long ds_area_msr; | ||
361 | }; | ||
362 | |||
363 | static inline unsigned long native_get_debugreg(int regno) | ||
364 | { | ||
365 | unsigned long val = 0; /* Damn you, gcc! */ | ||
366 | |||
367 | switch (regno) { | ||
368 | case 0: | ||
369 | asm("mov %%db0, %0" :"=r" (val)); break; | ||
370 | case 1: | ||
371 | asm("mov %%db1, %0" :"=r" (val)); break; | ||
372 | case 2: | ||
373 | asm("mov %%db2, %0" :"=r" (val)); break; | ||
374 | case 3: | ||
375 | asm("mov %%db3, %0" :"=r" (val)); break; | ||
376 | case 6: | ||
377 | asm("mov %%db6, %0" :"=r" (val)); break; | ||
378 | case 7: | ||
379 | asm("mov %%db7, %0" :"=r" (val)); break; | ||
380 | default: | ||
381 | BUG(); | ||
382 | } | ||
383 | return val; | ||
384 | } | ||
385 | |||
386 | static inline void native_set_debugreg(int regno, unsigned long value) | ||
387 | { | ||
388 | switch (regno) { | ||
389 | case 0: | ||
390 | asm("mov %0,%%db0" : /* no output */ :"r" (value)); | ||
391 | break; | ||
392 | case 1: | ||
393 | asm("mov %0,%%db1" : /* no output */ :"r" (value)); | ||
394 | break; | ||
395 | case 2: | ||
396 | asm("mov %0,%%db2" : /* no output */ :"r" (value)); | ||
397 | break; | ||
398 | case 3: | ||
399 | asm("mov %0,%%db3" : /* no output */ :"r" (value)); | ||
400 | break; | ||
401 | case 6: | ||
402 | asm("mov %0,%%db6" : /* no output */ :"r" (value)); | ||
403 | break; | ||
404 | case 7: | ||
405 | asm("mov %0,%%db7" : /* no output */ :"r" (value)); | ||
406 | break; | ||
407 | default: | ||
408 | BUG(); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * Set IOPL bits in EFLAGS from given mask | ||
414 | */ | ||
415 | static inline void native_set_iopl_mask(unsigned mask) | ||
416 | { | ||
417 | #ifdef CONFIG_X86_32 | ||
418 | unsigned int reg; | ||
419 | __asm__ __volatile__ ("pushfl;" | ||
420 | "popl %0;" | ||
421 | "andl %1, %0;" | ||
422 | "orl %2, %0;" | ||
423 | "pushl %0;" | ||
424 | "popfl" | ||
425 | : "=&r" (reg) | ||
426 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
427 | #endif | ||
428 | } | ||
429 | |||
430 | static inline void native_load_sp0(struct tss_struct *tss, | ||
431 | struct thread_struct *thread) | ||
432 | { | ||
433 | tss->x86_tss.sp0 = thread->sp0; | ||
434 | #ifdef CONFIG_X86_32 | ||
435 | /* Only happens when SEP is enabled, no need to test "SEP"arately */ | ||
436 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | ||
437 | tss->x86_tss.ss1 = thread->sysenter_cs; | ||
438 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
439 | } | ||
440 | #endif | ||
441 | } | ||
442 | |||
443 | static inline void native_swapgs(void) | ||
444 | { | ||
445 | #ifdef CONFIG_X86_64 | ||
446 | asm volatile("swapgs" ::: "memory"); | ||
447 | #endif | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_PARAVIRT | ||
451 | #include <asm/paravirt.h> | ||
452 | #else | ||
453 | #define __cpuid native_cpuid | ||
454 | #define paravirt_enabled() 0 | ||
455 | |||
456 | /* | ||
457 | * These special macros can be used to get or set a debugging register | ||
458 | */ | ||
459 | #define get_debugreg(var, register) \ | ||
460 | (var) = native_get_debugreg(register) | ||
461 | #define set_debugreg(value, register) \ | ||
462 | native_set_debugreg(register, value) | ||
463 | |||
464 | static inline void load_sp0(struct tss_struct *tss, | ||
465 | struct thread_struct *thread) | ||
466 | { | ||
467 | native_load_sp0(tss, thread); | ||
468 | } | ||
469 | |||
470 | #define set_iopl_mask native_set_iopl_mask | ||
471 | #define SWAPGS swapgs | ||
472 | #endif /* CONFIG_PARAVIRT */ | ||
473 | |||
474 | /* | ||
475 | * Save the cr4 feature set we're using (ie | ||
476 | * Pentium 4MB enable and PPro Global page | ||
477 | * enable), so that any CPU's that boot up | ||
478 | * after us can get the correct flags. | ||
479 | */ | ||
480 | extern unsigned long mmu_cr4_features; | ||
481 | |||
482 | static inline void set_in_cr4(unsigned long mask) | ||
483 | { | ||
484 | unsigned cr4; | ||
485 | mmu_cr4_features |= mask; | ||
486 | cr4 = read_cr4(); | ||
487 | cr4 |= mask; | ||
488 | write_cr4(cr4); | ||
489 | } | ||
490 | |||
491 | static inline void clear_in_cr4(unsigned long mask) | ||
492 | { | ||
493 | unsigned cr4; | ||
494 | mmu_cr4_features &= ~mask; | ||
495 | cr4 = read_cr4(); | ||
496 | cr4 &= ~mask; | ||
497 | write_cr4(cr4); | ||
498 | } | ||
499 | |||
500 | struct microcode_header { | ||
501 | unsigned int hdrver; | ||
502 | unsigned int rev; | ||
503 | unsigned int date; | ||
504 | unsigned int sig; | ||
505 | unsigned int cksum; | ||
506 | unsigned int ldrver; | ||
507 | unsigned int pf; | ||
508 | unsigned int datasize; | ||
509 | unsigned int totalsize; | ||
510 | unsigned int reserved[3]; | ||
511 | }; | ||
512 | |||
513 | struct microcode { | ||
514 | struct microcode_header hdr; | ||
515 | unsigned int bits[0]; | ||
516 | }; | ||
517 | |||
518 | typedef struct microcode microcode_t; | ||
519 | typedef struct microcode_header microcode_header_t; | ||
520 | |||
521 | /* microcode format is extended from prescott processors */ | ||
522 | struct extended_signature { | ||
523 | unsigned int sig; | ||
524 | unsigned int pf; | ||
525 | unsigned int cksum; | ||
526 | }; | ||
527 | |||
528 | struct extended_sigtable { | ||
529 | unsigned int count; | ||
530 | unsigned int cksum; | ||
531 | unsigned int reserved[3]; | ||
532 | struct extended_signature sigs[0]; | ||
533 | }; | ||
534 | |||
535 | typedef struct { | ||
536 | unsigned long seg; | ||
537 | } mm_segment_t; | ||
538 | |||
539 | |||
540 | /* | ||
541 | * create a kernel thread without removing it from tasklists | ||
542 | */ | ||
543 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
544 | |||
545 | /* Free all resources held by a thread. */ | ||
546 | extern void release_thread(struct task_struct *); | ||
547 | |||
548 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
549 | extern void prepare_to_copy(struct task_struct *tsk); | ||
550 | |||
551 | unsigned long get_wchan(struct task_struct *p); | ||
552 | |||
553 | /* | ||
554 | * Generic CPUID function | ||
555 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
556 | * resulting in stale register contents being returned. | ||
557 | */ | ||
558 | static inline void cpuid(unsigned int op, | ||
559 | unsigned int *eax, unsigned int *ebx, | ||
560 | unsigned int *ecx, unsigned int *edx) | ||
561 | { | ||
562 | *eax = op; | ||
563 | *ecx = 0; | ||
564 | __cpuid(eax, ebx, ecx, edx); | ||
565 | } | ||
566 | |||
567 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
568 | static inline void cpuid_count(unsigned int op, int count, | ||
569 | unsigned int *eax, unsigned int *ebx, | ||
570 | unsigned int *ecx, unsigned int *edx) | ||
571 | { | ||
572 | *eax = op; | ||
573 | *ecx = count; | ||
574 | __cpuid(eax, ebx, ecx, edx); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * CPUID functions returning a single datum | ||
579 | */ | ||
580 | static inline unsigned int cpuid_eax(unsigned int op) | ||
581 | { | ||
582 | unsigned int eax, ebx, ecx, edx; | ||
583 | |||
584 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
585 | return eax; | ||
586 | } | ||
587 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
588 | { | ||
589 | unsigned int eax, ebx, ecx, edx; | ||
590 | |||
591 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
592 | return ebx; | ||
593 | } | ||
594 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
595 | { | ||
596 | unsigned int eax, ebx, ecx, edx; | ||
597 | |||
598 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
599 | return ecx; | ||
600 | } | ||
601 | static inline unsigned int cpuid_edx(unsigned int op) | ||
602 | { | ||
603 | unsigned int eax, ebx, ecx, edx; | ||
604 | |||
605 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
606 | return edx; | ||
607 | } | ||
608 | |||
609 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
610 | static inline void rep_nop(void) | ||
611 | { | ||
612 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
613 | } | ||
614 | |||
615 | /* Stop speculative execution */ | ||
616 | static inline void sync_core(void) | ||
617 | { | ||
618 | int tmp; | ||
619 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | ||
620 | : "ebx", "ecx", "edx", "memory"); | ||
621 | } | ||
622 | |||
623 | #define cpu_relax() rep_nop() | ||
624 | |||
625 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
626 | unsigned long edx) | ||
627 | { | ||
628 | /* "monitor %eax,%ecx,%edx;" */ | ||
629 | asm volatile( | ||
630 | ".byte 0x0f,0x01,0xc8;" | ||
631 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
632 | } | ||
633 | |||
634 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
635 | { | ||
636 | /* "mwait %eax,%ecx;" */ | ||
637 | asm volatile( | ||
638 | ".byte 0x0f,0x01,0xc9;" | ||
639 | : :"a" (eax), "c" (ecx)); | ||
640 | } | ||
641 | |||
642 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
643 | { | ||
644 | /* "mwait %eax,%ecx;" */ | ||
645 | asm volatile( | ||
646 | "sti; .byte 0x0f,0x01,0xc9;" | ||
647 | : :"a" (eax), "c" (ecx)); | ||
648 | } | ||
649 | |||
650 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
651 | |||
652 | extern int force_mwait; | ||
653 | |||
654 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
655 | |||
656 | extern unsigned long boot_option_idle_override; | ||
657 | |||
658 | extern void enable_sep_cpu(void); | ||
659 | extern int sysenter_setup(void); | ||
660 | |||
661 | /* Defined in head.S */ | ||
662 | extern struct desc_ptr early_gdt_descr; | ||
663 | |||
664 | extern void cpu_set_gdt(int); | ||
665 | extern void switch_to_new_gdt(void); | ||
666 | extern void cpu_init(void); | ||
667 | extern void init_gdt(int cpu); | ||
668 | |||
669 | /* from system description table in BIOS. Mostly for MCA use, but | ||
670 | * others may find it useful. */ | ||
671 | extern unsigned int machine_id; | ||
672 | extern unsigned int machine_submodel_id; | ||
673 | extern unsigned int BIOS_revision; | ||
674 | extern unsigned int mca_pentium_flag; | ||
675 | |||
676 | /* Boot loader type from the setup header */ | ||
677 | extern int bootloader_type; | ||
678 | |||
679 | extern char ignore_fpu_irq; | ||
680 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
681 | |||
682 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
683 | #define ARCH_HAS_PREFETCHW | ||
684 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
685 | |||
686 | #ifdef CONFIG_X86_32 | ||
687 | #define BASE_PREFETCH ASM_NOP4 | ||
688 | #define ARCH_HAS_PREFETCH | ||
689 | #else | ||
690 | #define BASE_PREFETCH "prefetcht0 (%1)" | ||
691 | #endif | ||
692 | |||
693 | /* Prefetch instructions for Pentium III and AMD Athlon */ | ||
694 | /* It's not worth to care about 3dnow! prefetches for the K6 | ||
695 | because they are microcoded there and very slow. | ||
696 | However we don't do prefetches for pre XP Athlons currently | ||
697 | That should be fixed. */ | ||
698 | static inline void prefetch(const void *x) | ||
699 | { | ||
700 | alternative_input(BASE_PREFETCH, | ||
701 | "prefetchnta (%1)", | ||
702 | X86_FEATURE_XMM, | ||
703 | "r" (x)); | ||
704 | } | ||
705 | |||
706 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | ||
707 | spinlocks to avoid one state transition in the cache coherency protocol. */ | ||
708 | static inline void prefetchw(const void *x) | ||
709 | { | ||
710 | alternative_input(BASE_PREFETCH, | ||
711 | "prefetchw (%1)", | ||
712 | X86_FEATURE_3DNOW, | ||
713 | "r" (x)); | ||
714 | } | ||
715 | |||
716 | #define spin_lock_prefetch(x) prefetchw(x) | ||
1 | #ifdef CONFIG_X86_32 | 717 | #ifdef CONFIG_X86_32 |
2 | # include "processor_32.h" | 718 | /* |
719 | * User space process size: 3GB (default). | ||
720 | */ | ||
721 | #define TASK_SIZE (PAGE_OFFSET) | ||
722 | |||
723 | #define INIT_THREAD { \ | ||
724 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
725 | .vm86_info = NULL, \ | ||
726 | .sysenter_cs = __KERNEL_CS, \ | ||
727 | .io_bitmap_ptr = NULL, \ | ||
728 | .fs = __KERNEL_PERCPU, \ | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Note that the .io_bitmap member must be extra-big. This is because | ||
733 | * the CPU will access an additional byte beyond the end of the IO | ||
734 | * permission bitmap. The extra byte must be all 1 bits, and must | ||
735 | * be within the limit. | ||
736 | */ | ||
737 | #define INIT_TSS { \ | ||
738 | .x86_tss = { \ | ||
739 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
740 | .ss0 = __KERNEL_DS, \ | ||
741 | .ss1 = __KERNEL_CS, \ | ||
742 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | ||
743 | }, \ | ||
744 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ | ||
745 | } | ||
746 | |||
747 | #define start_thread(regs, new_eip, new_esp) do { \ | ||
748 | __asm__("movl %0,%%gs": :"r" (0)); \ | ||
749 | regs->fs = 0; \ | ||
750 | set_fs(USER_DS); \ | ||
751 | regs->ds = __USER_DS; \ | ||
752 | regs->es = __USER_DS; \ | ||
753 | regs->ss = __USER_DS; \ | ||
754 | regs->cs = __USER_CS; \ | ||
755 | regs->ip = new_eip; \ | ||
756 | regs->sp = new_esp; \ | ||
757 | } while (0) | ||
758 | |||
759 | |||
760 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | ||
761 | |||
762 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | ||
763 | #define KSTK_TOP(info) \ | ||
764 | ({ \ | ||
765 | unsigned long *__ptr = (unsigned long *)(info); \ | ||
766 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | ||
767 | }) | ||
768 | |||
769 | /* | ||
770 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | ||
771 | * This is necessary to guarantee that the entire "struct pt_regs" | ||
772 | * is accessable even if the CPU haven't stored the SS/ESP registers | ||
773 | * on the stack (interrupt gate does not save these registers | ||
774 | * when switching to the same priv ring). | ||
775 | * Therefore beware: accessing the ss/esp fields of the | ||
776 | * "struct pt_regs" is possible, but they may contain the | ||
777 | * completely wrong values. | ||
778 | */ | ||
779 | #define task_pt_regs(task) \ | ||
780 | ({ \ | ||
781 | struct pt_regs *__regs__; \ | ||
782 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | ||
783 | __regs__ - 1; \ | ||
784 | }) | ||
785 | |||
786 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | ||
787 | |||
3 | #else | 788 | #else |
4 | # include "processor_64.h" | 789 | /* |
790 | * User space process size. 47bits minus one guard page. | ||
791 | */ | ||
792 | #define TASK_SIZE64 (0x800000000000UL - 4096) | ||
793 | |||
794 | /* This decides where the kernel will search for a free chunk of vm | ||
795 | * space during mmap's. | ||
796 | */ | ||
797 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | ||
798 | 0xc0000000 : 0xFFFFe000) | ||
799 | |||
800 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ | ||
801 | IA32_PAGE_OFFSET : TASK_SIZE64) | ||
802 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ | ||
803 | IA32_PAGE_OFFSET : TASK_SIZE64) | ||
804 | |||
805 | #define INIT_THREAD { \ | ||
806 | .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
807 | } | ||
808 | |||
809 | #define INIT_TSS { \ | ||
810 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
811 | } | ||
812 | |||
813 | #define start_thread(regs, new_rip, new_rsp) do { \ | ||
814 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ | ||
815 | load_gs_index(0); \ | ||
816 | (regs)->ip = (new_rip); \ | ||
817 | (regs)->sp = (new_rsp); \ | ||
818 | write_pda(oldrsp, (new_rsp)); \ | ||
819 | (regs)->cs = __USER_CS; \ | ||
820 | (regs)->ss = __USER_DS; \ | ||
821 | (regs)->flags = 0x200; \ | ||
822 | set_fs(USER_DS); \ | ||
823 | } while (0) | ||
824 | |||
825 | /* | ||
826 | * Return saved PC of a blocked thread. | ||
827 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
828 | */ | ||
829 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | ||
830 | |||
831 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | ||
832 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | ||
833 | #endif /* CONFIG_X86_64 */ | ||
834 | |||
835 | /* This decides where the kernel will search for a free chunk of vm | ||
836 | * space during mmap's. | ||
837 | */ | ||
838 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
839 | |||
840 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | ||
841 | |||
5 | #endif | 842 | #endif |