diff options
Diffstat (limited to 'include/asm-x86/processor.h')
-rw-r--r-- | include/asm-x86/processor.h | 946 |
1 files changed, 0 insertions, 946 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h deleted file mode 100644 index 4df3e2f6fb56..000000000000 --- a/include/asm-x86/processor.h +++ /dev/null | |||
@@ -1,946 +0,0 @@ | |||
1 | #ifndef __ASM_X86_PROCESSOR_H | ||
2 | #define __ASM_X86_PROCESSOR_H | ||
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | |||
6 | /* Forward declaration, a strange C thing */ | ||
7 | struct task_struct; | ||
8 | struct mm_struct; | ||
9 | |||
10 | #include <asm/vm86.h> | ||
11 | #include <asm/math_emu.h> | ||
12 | #include <asm/segment.h> | ||
13 | #include <asm/types.h> | ||
14 | #include <asm/sigcontext.h> | ||
15 | #include <asm/current.h> | ||
16 | #include <asm/cpufeature.h> | ||
17 | #include <asm/system.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/percpu.h> | ||
20 | #include <asm/msr.h> | ||
21 | #include <asm/desc_defs.h> | ||
22 | #include <asm/nops.h> | ||
23 | |||
24 | #include <linux/personality.h> | ||
25 | #include <linux/cpumask.h> | ||
26 | #include <linux/cache.h> | ||
27 | #include <linux/threads.h> | ||
28 | #include <linux/init.h> | ||
29 | |||
30 | /* | ||
31 | * Default implementation of macro that returns current | ||
32 | * instruction pointer ("program counter"). | ||
33 | */ | ||
34 | static inline void *current_text_addr(void) | ||
35 | { | ||
36 | void *pc; | ||
37 | |||
38 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | ||
39 | |||
40 | return pc; | ||
41 | } | ||
42 | |||
43 | #ifdef CONFIG_X86_VSMP | ||
44 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
45 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
46 | #else | ||
47 | # define ARCH_MIN_TASKALIGN 16 | ||
48 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 | ||
49 | #endif | ||
50 | |||
51 | /* | ||
52 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
53 | * Members of this structure are referenced in head.S, so think twice | ||
54 | * before touching them. [mj] | ||
55 | */ | ||
56 | |||
57 | struct cpuinfo_x86 { | ||
58 | __u8 x86; /* CPU family */ | ||
59 | __u8 x86_vendor; /* CPU vendor */ | ||
60 | __u8 x86_model; | ||
61 | __u8 x86_mask; | ||
62 | #ifdef CONFIG_X86_32 | ||
63 | char wp_works_ok; /* It doesn't on 386's */ | ||
64 | |||
65 | /* Problems on some 486Dx4's and old 386's: */ | ||
66 | char hlt_works_ok; | ||
67 | char hard_math; | ||
68 | char rfu; | ||
69 | char fdiv_bug; | ||
70 | char f00f_bug; | ||
71 | char coma_bug; | ||
72 | char pad0; | ||
73 | #else | ||
74 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ | ||
75 | int x86_tlbsize; | ||
76 | __u8 x86_virt_bits; | ||
77 | __u8 x86_phys_bits; | ||
78 | /* CPUID returned core id bits: */ | ||
79 | __u8 x86_coreid_bits; | ||
80 | /* Max extended CPUID function supported: */ | ||
81 | __u32 extended_cpuid_level; | ||
82 | #endif | ||
83 | /* Maximum supported CPUID level, -1=no CPUID: */ | ||
84 | int cpuid_level; | ||
85 | __u32 x86_capability[NCAPINTS]; | ||
86 | char x86_vendor_id[16]; | ||
87 | char x86_model_id[64]; | ||
88 | /* in KB - valid for CPUS which support this call: */ | ||
89 | int x86_cache_size; | ||
90 | int x86_cache_alignment; /* In bytes */ | ||
91 | int x86_power; | ||
92 | unsigned long loops_per_jiffy; | ||
93 | #ifdef CONFIG_SMP | ||
94 | /* cpus sharing the last level cache: */ | ||
95 | cpumask_t llc_shared_map; | ||
96 | #endif | ||
97 | /* cpuid returned max cores value: */ | ||
98 | u16 x86_max_cores; | ||
99 | u16 apicid; | ||
100 | u16 initial_apicid; | ||
101 | u16 x86_clflush_size; | ||
102 | #ifdef CONFIG_SMP | ||
103 | /* number of cores as seen by the OS: */ | ||
104 | u16 booted_cores; | ||
105 | /* Physical processor id: */ | ||
106 | u16 phys_proc_id; | ||
107 | /* Core id: */ | ||
108 | u16 cpu_core_id; | ||
109 | /* Index into per_cpu list: */ | ||
110 | u16 cpu_index; | ||
111 | #endif | ||
112 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
113 | |||
114 | #define X86_VENDOR_INTEL 0 | ||
115 | #define X86_VENDOR_CYRIX 1 | ||
116 | #define X86_VENDOR_AMD 2 | ||
117 | #define X86_VENDOR_UMC 3 | ||
118 | #define X86_VENDOR_CENTAUR 5 | ||
119 | #define X86_VENDOR_TRANSMETA 7 | ||
120 | #define X86_VENDOR_NSC 8 | ||
121 | #define X86_VENDOR_NUM 9 | ||
122 | |||
123 | #define X86_VENDOR_UNKNOWN 0xff | ||
124 | |||
125 | /* | ||
126 | * capabilities of CPUs | ||
127 | */ | ||
128 | extern struct cpuinfo_x86 boot_cpu_data; | ||
129 | extern struct cpuinfo_x86 new_cpu_data; | ||
130 | |||
131 | extern struct tss_struct doublefault_tss; | ||
132 | extern __u32 cleared_cpu_caps[NCAPINTS]; | ||
133 | |||
134 | #ifdef CONFIG_SMP | ||
135 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | ||
136 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | ||
137 | #define current_cpu_data __get_cpu_var(cpu_info) | ||
138 | #else | ||
139 | #define cpu_data(cpu) boot_cpu_data | ||
140 | #define current_cpu_data boot_cpu_data | ||
141 | #endif | ||
142 | |||
143 | static inline int hlt_works(int cpu) | ||
144 | { | ||
145 | #ifdef CONFIG_X86_32 | ||
146 | return cpu_data(cpu).hlt_works_ok; | ||
147 | #else | ||
148 | return 1; | ||
149 | #endif | ||
150 | } | ||
151 | |||
152 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
153 | |||
154 | extern void cpu_detect(struct cpuinfo_x86 *c); | ||
155 | |||
156 | extern void early_cpu_init(void); | ||
157 | extern void identify_boot_cpu(void); | ||
158 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | ||
159 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
160 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
161 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
162 | extern unsigned short num_cache_leaves; | ||
163 | |||
164 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | ||
165 | extern void detect_ht(struct cpuinfo_x86 *c); | ||
166 | #else | ||
167 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
168 | #endif | ||
169 | |||
170 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | ||
171 | unsigned int *ecx, unsigned int *edx) | ||
172 | { | ||
173 | /* ecx is often an input as well as an output. */ | ||
174 | asm("cpuid" | ||
175 | : "=a" (*eax), | ||
176 | "=b" (*ebx), | ||
177 | "=c" (*ecx), | ||
178 | "=d" (*edx) | ||
179 | : "0" (*eax), "2" (*ecx)); | ||
180 | } | ||
181 | |||
182 | static inline void load_cr3(pgd_t *pgdir) | ||
183 | { | ||
184 | write_cr3(__pa(pgdir)); | ||
185 | } | ||
186 | |||
187 | #ifdef CONFIG_X86_32 | ||
188 | /* This is the TSS defined by the hardware. */ | ||
189 | struct x86_hw_tss { | ||
190 | unsigned short back_link, __blh; | ||
191 | unsigned long sp0; | ||
192 | unsigned short ss0, __ss0h; | ||
193 | unsigned long sp1; | ||
194 | /* ss1 caches MSR_IA32_SYSENTER_CS: */ | ||
195 | unsigned short ss1, __ss1h; | ||
196 | unsigned long sp2; | ||
197 | unsigned short ss2, __ss2h; | ||
198 | unsigned long __cr3; | ||
199 | unsigned long ip; | ||
200 | unsigned long flags; | ||
201 | unsigned long ax; | ||
202 | unsigned long cx; | ||
203 | unsigned long dx; | ||
204 | unsigned long bx; | ||
205 | unsigned long sp; | ||
206 | unsigned long bp; | ||
207 | unsigned long si; | ||
208 | unsigned long di; | ||
209 | unsigned short es, __esh; | ||
210 | unsigned short cs, __csh; | ||
211 | unsigned short ss, __ssh; | ||
212 | unsigned short ds, __dsh; | ||
213 | unsigned short fs, __fsh; | ||
214 | unsigned short gs, __gsh; | ||
215 | unsigned short ldt, __ldth; | ||
216 | unsigned short trace; | ||
217 | unsigned short io_bitmap_base; | ||
218 | |||
219 | } __attribute__((packed)); | ||
220 | #else | ||
221 | struct x86_hw_tss { | ||
222 | u32 reserved1; | ||
223 | u64 sp0; | ||
224 | u64 sp1; | ||
225 | u64 sp2; | ||
226 | u64 reserved2; | ||
227 | u64 ist[7]; | ||
228 | u32 reserved3; | ||
229 | u32 reserved4; | ||
230 | u16 reserved5; | ||
231 | u16 io_bitmap_base; | ||
232 | |||
233 | } __attribute__((packed)) ____cacheline_aligned; | ||
234 | #endif | ||
235 | |||
236 | /* | ||
237 | * IO-bitmap sizes: | ||
238 | */ | ||
239 | #define IO_BITMAP_BITS 65536 | ||
240 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
241 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
242 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | ||
243 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
244 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | ||
245 | |||
246 | struct tss_struct { | ||
247 | /* | ||
248 | * The hardware state: | ||
249 | */ | ||
250 | struct x86_hw_tss x86_tss; | ||
251 | |||
252 | /* | ||
253 | * The extra 1 is there because the CPU will access an | ||
254 | * additional byte beyond the end of the IO permission | ||
255 | * bitmap. The extra byte must be all 1 bits, and must | ||
256 | * be within the limit. | ||
257 | */ | ||
258 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
259 | /* | ||
260 | * Cache the current maximum and the last task that used the bitmap: | ||
261 | */ | ||
262 | unsigned long io_bitmap_max; | ||
263 | struct thread_struct *io_bitmap_owner; | ||
264 | |||
265 | /* | ||
266 | * .. and then another 0x100 bytes for the emergency kernel stack: | ||
267 | */ | ||
268 | unsigned long stack[64]; | ||
269 | |||
270 | } ____cacheline_aligned; | ||
271 | |||
272 | DECLARE_PER_CPU(struct tss_struct, init_tss); | ||
273 | |||
274 | /* | ||
275 | * Save the original ist values for checking stack pointers during debugging | ||
276 | */ | ||
277 | struct orig_ist { | ||
278 | unsigned long ist[7]; | ||
279 | }; | ||
280 | |||
281 | #define MXCSR_DEFAULT 0x1f80 | ||
282 | |||
283 | struct i387_fsave_struct { | ||
284 | u32 cwd; /* FPU Control Word */ | ||
285 | u32 swd; /* FPU Status Word */ | ||
286 | u32 twd; /* FPU Tag Word */ | ||
287 | u32 fip; /* FPU IP Offset */ | ||
288 | u32 fcs; /* FPU IP Selector */ | ||
289 | u32 foo; /* FPU Operand Pointer Offset */ | ||
290 | u32 fos; /* FPU Operand Pointer Selector */ | ||
291 | |||
292 | /* 8*10 bytes for each FP-reg = 80 bytes: */ | ||
293 | u32 st_space[20]; | ||
294 | |||
295 | /* Software status information [not touched by FSAVE ]: */ | ||
296 | u32 status; | ||
297 | }; | ||
298 | |||
299 | struct i387_fxsave_struct { | ||
300 | u16 cwd; /* Control Word */ | ||
301 | u16 swd; /* Status Word */ | ||
302 | u16 twd; /* Tag Word */ | ||
303 | u16 fop; /* Last Instruction Opcode */ | ||
304 | union { | ||
305 | struct { | ||
306 | u64 rip; /* Instruction Pointer */ | ||
307 | u64 rdp; /* Data Pointer */ | ||
308 | }; | ||
309 | struct { | ||
310 | u32 fip; /* FPU IP Offset */ | ||
311 | u32 fcs; /* FPU IP Selector */ | ||
312 | u32 foo; /* FPU Operand Offset */ | ||
313 | u32 fos; /* FPU Operand Selector */ | ||
314 | }; | ||
315 | }; | ||
316 | u32 mxcsr; /* MXCSR Register State */ | ||
317 | u32 mxcsr_mask; /* MXCSR Mask */ | ||
318 | |||
319 | /* 8*16 bytes for each FP-reg = 128 bytes: */ | ||
320 | u32 st_space[32]; | ||
321 | |||
322 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | ||
323 | u32 xmm_space[64]; | ||
324 | |||
325 | u32 padding[24]; | ||
326 | |||
327 | } __attribute__((aligned(16))); | ||
328 | |||
329 | struct i387_soft_struct { | ||
330 | u32 cwd; | ||
331 | u32 swd; | ||
332 | u32 twd; | ||
333 | u32 fip; | ||
334 | u32 fcs; | ||
335 | u32 foo; | ||
336 | u32 fos; | ||
337 | /* 8*10 bytes for each FP-reg = 80 bytes: */ | ||
338 | u32 st_space[20]; | ||
339 | u8 ftop; | ||
340 | u8 changed; | ||
341 | u8 lookahead; | ||
342 | u8 no_update; | ||
343 | u8 rm; | ||
344 | u8 alimit; | ||
345 | struct info *info; | ||
346 | u32 entry_eip; | ||
347 | }; | ||
348 | |||
349 | union thread_xstate { | ||
350 | struct i387_fsave_struct fsave; | ||
351 | struct i387_fxsave_struct fxsave; | ||
352 | struct i387_soft_struct soft; | ||
353 | }; | ||
354 | |||
355 | #ifdef CONFIG_X86_64 | ||
356 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | ||
357 | #endif | ||
358 | |||
359 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
360 | extern unsigned int xstate_size; | ||
361 | extern void free_thread_xstate(struct task_struct *); | ||
362 | extern struct kmem_cache *task_xstate_cachep; | ||
363 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
364 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
365 | extern unsigned short num_cache_leaves; | ||
366 | |||
367 | struct thread_struct { | ||
368 | /* Cached TLS descriptors: */ | ||
369 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
370 | unsigned long sp0; | ||
371 | unsigned long sp; | ||
372 | #ifdef CONFIG_X86_32 | ||
373 | unsigned long sysenter_cs; | ||
374 | #else | ||
375 | unsigned long usersp; /* Copy from PDA */ | ||
376 | unsigned short es; | ||
377 | unsigned short ds; | ||
378 | unsigned short fsindex; | ||
379 | unsigned short gsindex; | ||
380 | #endif | ||
381 | unsigned long ip; | ||
382 | unsigned long fs; | ||
383 | unsigned long gs; | ||
384 | /* Hardware debugging registers: */ | ||
385 | unsigned long debugreg0; | ||
386 | unsigned long debugreg1; | ||
387 | unsigned long debugreg2; | ||
388 | unsigned long debugreg3; | ||
389 | unsigned long debugreg6; | ||
390 | unsigned long debugreg7; | ||
391 | /* Fault info: */ | ||
392 | unsigned long cr2; | ||
393 | unsigned long trap_no; | ||
394 | unsigned long error_code; | ||
395 | /* floating point and extended processor state */ | ||
396 | union thread_xstate *xstate; | ||
397 | #ifdef CONFIG_X86_32 | ||
398 | /* Virtual 86 mode info */ | ||
399 | struct vm86_struct __user *vm86_info; | ||
400 | unsigned long screen_bitmap; | ||
401 | unsigned long v86flags; | ||
402 | unsigned long v86mask; | ||
403 | unsigned long saved_sp0; | ||
404 | unsigned int saved_fs; | ||
405 | unsigned int saved_gs; | ||
406 | #endif | ||
407 | /* IO permissions: */ | ||
408 | unsigned long *io_bitmap_ptr; | ||
409 | unsigned long iopl; | ||
410 | /* Max allowed port in the bitmap, in bytes: */ | ||
411 | unsigned io_bitmap_max; | ||
412 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ | ||
413 | unsigned long debugctlmsr; | ||
414 | /* Debug Store - if not 0 points to a DS Save Area configuration; | ||
415 | * goes into MSR_IA32_DS_AREA */ | ||
416 | unsigned long ds_area_msr; | ||
417 | }; | ||
418 | |||
419 | static inline unsigned long native_get_debugreg(int regno) | ||
420 | { | ||
421 | unsigned long val = 0; /* Damn you, gcc! */ | ||
422 | |||
423 | switch (regno) { | ||
424 | case 0: | ||
425 | asm("mov %%db0, %0" :"=r" (val)); | ||
426 | break; | ||
427 | case 1: | ||
428 | asm("mov %%db1, %0" :"=r" (val)); | ||
429 | break; | ||
430 | case 2: | ||
431 | asm("mov %%db2, %0" :"=r" (val)); | ||
432 | break; | ||
433 | case 3: | ||
434 | asm("mov %%db3, %0" :"=r" (val)); | ||
435 | break; | ||
436 | case 6: | ||
437 | asm("mov %%db6, %0" :"=r" (val)); | ||
438 | break; | ||
439 | case 7: | ||
440 | asm("mov %%db7, %0" :"=r" (val)); | ||
441 | break; | ||
442 | default: | ||
443 | BUG(); | ||
444 | } | ||
445 | return val; | ||
446 | } | ||
447 | |||
448 | static inline void native_set_debugreg(int regno, unsigned long value) | ||
449 | { | ||
450 | switch (regno) { | ||
451 | case 0: | ||
452 | asm("mov %0, %%db0" ::"r" (value)); | ||
453 | break; | ||
454 | case 1: | ||
455 | asm("mov %0, %%db1" ::"r" (value)); | ||
456 | break; | ||
457 | case 2: | ||
458 | asm("mov %0, %%db2" ::"r" (value)); | ||
459 | break; | ||
460 | case 3: | ||
461 | asm("mov %0, %%db3" ::"r" (value)); | ||
462 | break; | ||
463 | case 6: | ||
464 | asm("mov %0, %%db6" ::"r" (value)); | ||
465 | break; | ||
466 | case 7: | ||
467 | asm("mov %0, %%db7" ::"r" (value)); | ||
468 | break; | ||
469 | default: | ||
470 | BUG(); | ||
471 | } | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * Set IOPL bits in EFLAGS from given mask | ||
476 | */ | ||
477 | static inline void native_set_iopl_mask(unsigned mask) | ||
478 | { | ||
479 | #ifdef CONFIG_X86_32 | ||
480 | unsigned int reg; | ||
481 | |||
482 | asm volatile ("pushfl;" | ||
483 | "popl %0;" | ||
484 | "andl %1, %0;" | ||
485 | "orl %2, %0;" | ||
486 | "pushl %0;" | ||
487 | "popfl" | ||
488 | : "=&r" (reg) | ||
489 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
490 | #endif | ||
491 | } | ||
492 | |||
493 | static inline void | ||
494 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) | ||
495 | { | ||
496 | tss->x86_tss.sp0 = thread->sp0; | ||
497 | #ifdef CONFIG_X86_32 | ||
498 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ | ||
499 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | ||
500 | tss->x86_tss.ss1 = thread->sysenter_cs; | ||
501 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
502 | } | ||
503 | #endif | ||
504 | } | ||
505 | |||
506 | static inline void native_swapgs(void) | ||
507 | { | ||
508 | #ifdef CONFIG_X86_64 | ||
509 | asm volatile("swapgs" ::: "memory"); | ||
510 | #endif | ||
511 | } | ||
512 | |||
513 | #ifdef CONFIG_PARAVIRT | ||
514 | #include <asm/paravirt.h> | ||
515 | #else | ||
516 | #define __cpuid native_cpuid | ||
517 | #define paravirt_enabled() 0 | ||
518 | |||
519 | /* | ||
520 | * These special macros can be used to get or set a debugging register | ||
521 | */ | ||
522 | #define get_debugreg(var, register) \ | ||
523 | (var) = native_get_debugreg(register) | ||
524 | #define set_debugreg(value, register) \ | ||
525 | native_set_debugreg(register, value) | ||
526 | |||
527 | static inline void load_sp0(struct tss_struct *tss, | ||
528 | struct thread_struct *thread) | ||
529 | { | ||
530 | native_load_sp0(tss, thread); | ||
531 | } | ||
532 | |||
533 | #define set_iopl_mask native_set_iopl_mask | ||
534 | #endif /* CONFIG_PARAVIRT */ | ||
535 | |||
536 | /* | ||
537 | * Save the cr4 feature set we're using (ie | ||
538 | * Pentium 4MB enable and PPro Global page | ||
539 | * enable), so that any CPU's that boot up | ||
540 | * after us can get the correct flags. | ||
541 | */ | ||
542 | extern unsigned long mmu_cr4_features; | ||
543 | |||
544 | static inline void set_in_cr4(unsigned long mask) | ||
545 | { | ||
546 | unsigned cr4; | ||
547 | |||
548 | mmu_cr4_features |= mask; | ||
549 | cr4 = read_cr4(); | ||
550 | cr4 |= mask; | ||
551 | write_cr4(cr4); | ||
552 | } | ||
553 | |||
554 | static inline void clear_in_cr4(unsigned long mask) | ||
555 | { | ||
556 | unsigned cr4; | ||
557 | |||
558 | mmu_cr4_features &= ~mask; | ||
559 | cr4 = read_cr4(); | ||
560 | cr4 &= ~mask; | ||
561 | write_cr4(cr4); | ||
562 | } | ||
563 | |||
564 | struct microcode_header { | ||
565 | unsigned int hdrver; | ||
566 | unsigned int rev; | ||
567 | unsigned int date; | ||
568 | unsigned int sig; | ||
569 | unsigned int cksum; | ||
570 | unsigned int ldrver; | ||
571 | unsigned int pf; | ||
572 | unsigned int datasize; | ||
573 | unsigned int totalsize; | ||
574 | unsigned int reserved[3]; | ||
575 | }; | ||
576 | |||
577 | struct microcode { | ||
578 | struct microcode_header hdr; | ||
579 | unsigned int bits[0]; | ||
580 | }; | ||
581 | |||
582 | typedef struct microcode microcode_t; | ||
583 | typedef struct microcode_header microcode_header_t; | ||
584 | |||
585 | /* microcode format is extended from prescott processors */ | ||
586 | struct extended_signature { | ||
587 | unsigned int sig; | ||
588 | unsigned int pf; | ||
589 | unsigned int cksum; | ||
590 | }; | ||
591 | |||
592 | struct extended_sigtable { | ||
593 | unsigned int count; | ||
594 | unsigned int cksum; | ||
595 | unsigned int reserved[3]; | ||
596 | struct extended_signature sigs[0]; | ||
597 | }; | ||
598 | |||
599 | typedef struct { | ||
600 | unsigned long seg; | ||
601 | } mm_segment_t; | ||
602 | |||
603 | |||
604 | /* | ||
605 | * create a kernel thread without removing it from tasklists | ||
606 | */ | ||
607 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
608 | |||
609 | /* Free all resources held by a thread. */ | ||
610 | extern void release_thread(struct task_struct *); | ||
611 | |||
612 | /* Prepare to copy thread state - unlazy all lazy state */ | ||
613 | extern void prepare_to_copy(struct task_struct *tsk); | ||
614 | |||
615 | unsigned long get_wchan(struct task_struct *p); | ||
616 | |||
617 | /* | ||
618 | * Generic CPUID function | ||
619 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
620 | * resulting in stale register contents being returned. | ||
621 | */ | ||
622 | static inline void cpuid(unsigned int op, | ||
623 | unsigned int *eax, unsigned int *ebx, | ||
624 | unsigned int *ecx, unsigned int *edx) | ||
625 | { | ||
626 | *eax = op; | ||
627 | *ecx = 0; | ||
628 | __cpuid(eax, ebx, ecx, edx); | ||
629 | } | ||
630 | |||
631 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
632 | static inline void cpuid_count(unsigned int op, int count, | ||
633 | unsigned int *eax, unsigned int *ebx, | ||
634 | unsigned int *ecx, unsigned int *edx) | ||
635 | { | ||
636 | *eax = op; | ||
637 | *ecx = count; | ||
638 | __cpuid(eax, ebx, ecx, edx); | ||
639 | } | ||
640 | |||
641 | /* | ||
642 | * CPUID functions returning a single datum | ||
643 | */ | ||
644 | static inline unsigned int cpuid_eax(unsigned int op) | ||
645 | { | ||
646 | unsigned int eax, ebx, ecx, edx; | ||
647 | |||
648 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
649 | |||
650 | return eax; | ||
651 | } | ||
652 | |||
653 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
654 | { | ||
655 | unsigned int eax, ebx, ecx, edx; | ||
656 | |||
657 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
658 | |||
659 | return ebx; | ||
660 | } | ||
661 | |||
662 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
663 | { | ||
664 | unsigned int eax, ebx, ecx, edx; | ||
665 | |||
666 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
667 | |||
668 | return ecx; | ||
669 | } | ||
670 | |||
671 | static inline unsigned int cpuid_edx(unsigned int op) | ||
672 | { | ||
673 | unsigned int eax, ebx, ecx, edx; | ||
674 | |||
675 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
676 | |||
677 | return edx; | ||
678 | } | ||
679 | |||
680 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
681 | static inline void rep_nop(void) | ||
682 | { | ||
683 | asm volatile("rep; nop" ::: "memory"); | ||
684 | } | ||
685 | |||
686 | static inline void cpu_relax(void) | ||
687 | { | ||
688 | rep_nop(); | ||
689 | } | ||
690 | |||
691 | /* Stop speculative execution: */ | ||
692 | static inline void sync_core(void) | ||
693 | { | ||
694 | int tmp; | ||
695 | |||
696 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | ||
697 | : "ebx", "ecx", "edx", "memory"); | ||
698 | } | ||
699 | |||
700 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
701 | unsigned long edx) | ||
702 | { | ||
703 | /* "monitor %eax, %ecx, %edx;" */ | ||
704 | asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||
705 | :: "a" (eax), "c" (ecx), "d"(edx)); | ||
706 | } | ||
707 | |||
708 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
709 | { | ||
710 | /* "mwait %eax, %ecx;" */ | ||
711 | asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||
712 | :: "a" (eax), "c" (ecx)); | ||
713 | } | ||
714 | |||
715 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
716 | { | ||
717 | trace_hardirqs_on(); | ||
718 | /* "mwait %eax, %ecx;" */ | ||
719 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" | ||
720 | :: "a" (eax), "c" (ecx)); | ||
721 | } | ||
722 | |||
723 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
724 | |||
725 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
726 | |||
727 | extern unsigned long boot_option_idle_override; | ||
728 | extern unsigned long idle_halt; | ||
729 | extern unsigned long idle_nomwait; | ||
730 | |||
731 | /* | ||
732 | * on systems with caches, caches must be flashed as the absolute | ||
733 | * last instruction before going into a suspended halt. Otherwise, | ||
734 | * dirty data can linger in the cache and become stale on resume, | ||
735 | * leading to strange errors. | ||
736 | * | ||
737 | * perform a variety of operations to guarantee that the compiler | ||
738 | * will not reorder instructions. wbinvd itself is serializing | ||
739 | * so the processor will not reorder. | ||
740 | * | ||
741 | * Systems without cache can just go into halt. | ||
742 | */ | ||
743 | static inline void wbinvd_halt(void) | ||
744 | { | ||
745 | mb(); | ||
746 | /* check for clflush to determine if wbinvd is legal */ | ||
747 | if (cpu_has_clflush) | ||
748 | asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); | ||
749 | else | ||
750 | while (1) | ||
751 | halt(); | ||
752 | } | ||
753 | |||
754 | extern void enable_sep_cpu(void); | ||
755 | extern int sysenter_setup(void); | ||
756 | |||
757 | /* Defined in head.S */ | ||
758 | extern struct desc_ptr early_gdt_descr; | ||
759 | |||
760 | extern void cpu_set_gdt(int); | ||
761 | extern void switch_to_new_gdt(void); | ||
762 | extern void cpu_init(void); | ||
763 | extern void init_gdt(int cpu); | ||
764 | |||
765 | static inline void update_debugctlmsr(unsigned long debugctlmsr) | ||
766 | { | ||
767 | #ifndef CONFIG_X86_DEBUGCTLMSR | ||
768 | if (boot_cpu_data.x86 < 6) | ||
769 | return; | ||
770 | #endif | ||
771 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * from system description table in BIOS. Mostly for MCA use, but | ||
776 | * others may find it useful: | ||
777 | */ | ||
778 | extern unsigned int machine_id; | ||
779 | extern unsigned int machine_submodel_id; | ||
780 | extern unsigned int BIOS_revision; | ||
781 | |||
782 | /* Boot loader type from the setup header: */ | ||
783 | extern int bootloader_type; | ||
784 | |||
785 | extern char ignore_fpu_irq; | ||
786 | |||
787 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
788 | #define ARCH_HAS_PREFETCHW | ||
789 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
790 | |||
791 | #ifdef CONFIG_X86_32 | ||
792 | # define BASE_PREFETCH ASM_NOP4 | ||
793 | # define ARCH_HAS_PREFETCH | ||
794 | #else | ||
795 | # define BASE_PREFETCH "prefetcht0 (%1)" | ||
796 | #endif | ||
797 | |||
798 | /* | ||
799 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) | ||
800 | * | ||
801 | * It's not worth to care about 3dnow prefetches for the K6 | ||
802 | * because they are microcoded there and very slow. | ||
803 | */ | ||
804 | static inline void prefetch(const void *x) | ||
805 | { | ||
806 | alternative_input(BASE_PREFETCH, | ||
807 | "prefetchnta (%1)", | ||
808 | X86_FEATURE_XMM, | ||
809 | "r" (x)); | ||
810 | } | ||
811 | |||
812 | /* | ||
813 | * 3dnow prefetch to get an exclusive cache line. | ||
814 | * Useful for spinlocks to avoid one state transition in the | ||
815 | * cache coherency protocol: | ||
816 | */ | ||
817 | static inline void prefetchw(const void *x) | ||
818 | { | ||
819 | alternative_input(BASE_PREFETCH, | ||
820 | "prefetchw (%1)", | ||
821 | X86_FEATURE_3DNOW, | ||
822 | "r" (x)); | ||
823 | } | ||
824 | |||
825 | static inline void spin_lock_prefetch(const void *x) | ||
826 | { | ||
827 | prefetchw(x); | ||
828 | } | ||
829 | |||
830 | #ifdef CONFIG_X86_32 | ||
831 | /* | ||
832 | * User space process size: 3GB (default). | ||
833 | */ | ||
834 | #define TASK_SIZE PAGE_OFFSET | ||
835 | #define STACK_TOP TASK_SIZE | ||
836 | #define STACK_TOP_MAX STACK_TOP | ||
837 | |||
838 | #define INIT_THREAD { \ | ||
839 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
840 | .vm86_info = NULL, \ | ||
841 | .sysenter_cs = __KERNEL_CS, \ | ||
842 | .io_bitmap_ptr = NULL, \ | ||
843 | .fs = __KERNEL_PERCPU, \ | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * Note that the .io_bitmap member must be extra-big. This is because | ||
848 | * the CPU will access an additional byte beyond the end of the IO | ||
849 | * permission bitmap. The extra byte must be all 1 bits, and must | ||
850 | * be within the limit. | ||
851 | */ | ||
852 | #define INIT_TSS { \ | ||
853 | .x86_tss = { \ | ||
854 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
855 | .ss0 = __KERNEL_DS, \ | ||
856 | .ss1 = __KERNEL_CS, \ | ||
857 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | ||
858 | }, \ | ||
859 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ | ||
860 | } | ||
861 | |||
862 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | ||
863 | |||
864 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | ||
865 | #define KSTK_TOP(info) \ | ||
866 | ({ \ | ||
867 | unsigned long *__ptr = (unsigned long *)(info); \ | ||
868 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | ||
869 | }) | ||
870 | |||
871 | /* | ||
872 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | ||
873 | * This is necessary to guarantee that the entire "struct pt_regs" | ||
874 | * is accessable even if the CPU haven't stored the SS/ESP registers | ||
875 | * on the stack (interrupt gate does not save these registers | ||
876 | * when switching to the same priv ring). | ||
877 | * Therefore beware: accessing the ss/esp fields of the | ||
878 | * "struct pt_regs" is possible, but they may contain the | ||
879 | * completely wrong values. | ||
880 | */ | ||
881 | #define task_pt_regs(task) \ | ||
882 | ({ \ | ||
883 | struct pt_regs *__regs__; \ | ||
884 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | ||
885 | __regs__ - 1; \ | ||
886 | }) | ||
887 | |||
888 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | ||
889 | |||
890 | #else | ||
891 | /* | ||
892 | * User space process size. 47bits minus one guard page. | ||
893 | */ | ||
894 | #define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) | ||
895 | |||
896 | /* This decides where the kernel will search for a free chunk of vm | ||
897 | * space during mmap's. | ||
898 | */ | ||
899 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | ||
900 | 0xc0000000 : 0xFFFFe000) | ||
901 | |||
902 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ | ||
903 | IA32_PAGE_OFFSET : TASK_SIZE64) | ||
904 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ | ||
905 | IA32_PAGE_OFFSET : TASK_SIZE64) | ||
906 | |||
907 | #define STACK_TOP TASK_SIZE | ||
908 | #define STACK_TOP_MAX TASK_SIZE64 | ||
909 | |||
910 | #define INIT_THREAD { \ | ||
911 | .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
912 | } | ||
913 | |||
914 | #define INIT_TSS { \ | ||
915 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * Return saved PC of a blocked thread. | ||
920 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
921 | */ | ||
922 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | ||
923 | |||
924 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | ||
925 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | ||
926 | #endif /* CONFIG_X86_64 */ | ||
927 | |||
928 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, | ||
929 | unsigned long new_sp); | ||
930 | |||
931 | /* | ||
932 | * This decides where the kernel will search for a free chunk of vm | ||
933 | * space during mmap's. | ||
934 | */ | ||
935 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
936 | |||
937 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | ||
938 | |||
939 | /* Get/set a process' ability to use the timestamp counter instruction */ | ||
940 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) | ||
941 | #define SET_TSC_CTL(val) set_tsc_mode((val)) | ||
942 | |||
943 | extern int get_tsc_mode(unsigned long adr); | ||
944 | extern int set_tsc_mode(unsigned int val); | ||
945 | |||
946 | #endif | ||