aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-02-20 22:24:40 -0500
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:40:49 -0400
commit4d46a89e7c867718020b2d5fd8f9e775293304be (patch)
treed594c7d816ce30aa0f7c9622069f6366419ee73d /include
parenteb19067d160416cd61fc92a8913ccfb3497b20b7 (diff)
x86: clean up include/asm-x86/processor.h
basic style cleanup to flush out years of neglect: - consistent indentation - whitespace fixes - consistent comments Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/processor.h638
1 files changed, 358 insertions, 280 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 45a2f0ab33d0..43d2cc829a94 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -24,6 +24,7 @@ struct mm_struct;
24#include <asm/msr.h> 24#include <asm/msr.h>
25#include <asm/desc_defs.h> 25#include <asm/desc_defs.h>
26#include <asm/nops.h> 26#include <asm/nops.h>
27
27#include <linux/personality.h> 28#include <linux/personality.h>
28#include <linux/cpumask.h> 29#include <linux/cpumask.h>
29#include <linux/cache.h> 30#include <linux/cache.h>
@@ -37,16 +38,18 @@ struct mm_struct;
37static inline void *current_text_addr(void) 38static inline void *current_text_addr(void)
38{ 39{
39 void *pc; 40 void *pc;
40 asm volatile("mov $1f,%0\n1:":"=r" (pc)); 41
42 asm volatile("mov $1f, %0; 1:":"=r" (pc));
43
41 return pc; 44 return pc;
42} 45}
43 46
44#ifdef CONFIG_X86_VSMP 47#ifdef CONFIG_X86_VSMP
45#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 48# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
46#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 49# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
47#else 50#else
48#define ARCH_MIN_TASKALIGN 16 51# define ARCH_MIN_TASKALIGN 16
49#define ARCH_MIN_MMSTRUCT_ALIGN 0 52# define ARCH_MIN_MMSTRUCT_ALIGN 0
50#endif 53#endif
51 54
52/* 55/*
@@ -56,69 +59,81 @@ static inline void *current_text_addr(void)
56 */ 59 */
57 60
58struct cpuinfo_x86 { 61struct cpuinfo_x86 {
59 __u8 x86; /* CPU family */ 62 __u8 x86; /* CPU family */
60 __u8 x86_vendor; /* CPU vendor */ 63 __u8 x86_vendor; /* CPU vendor */
61 __u8 x86_model; 64 __u8 x86_model;
62 __u8 x86_mask; 65 __u8 x86_mask;
63#ifdef CONFIG_X86_32 66#ifdef CONFIG_X86_32
64 char wp_works_ok; /* It doesn't on 386's */ 67 char wp_works_ok; /* It doesn't on 386's */
65 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ 68
66 char hard_math; 69 /* Problems on some 486Dx4's and old 386's: */
67 char rfu; 70 char hlt_works_ok;
68 char fdiv_bug; 71 char hard_math;
69 char f00f_bug; 72 char rfu;
70 char coma_bug; 73 char fdiv_bug;
71 char pad0; 74 char f00f_bug;
75 char coma_bug;
76 char pad0;
72#else 77#else
73 /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 78 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
74 int x86_tlbsize; 79 int x86_tlbsize;
75 __u8 x86_virt_bits, x86_phys_bits; 80 __u8 x86_virt_bits;
76 /* cpuid returned core id bits */ 81 __u8 x86_phys_bits;
77 __u8 x86_coreid_bits; 82 /* CPUID returned core id bits: */
78 /* Max extended CPUID function supported */ 83 __u8 x86_coreid_bits;
79 __u32 extended_cpuid_level; 84 /* Max extended CPUID function supported: */
85 __u32 extended_cpuid_level;
80#endif 86#endif
81 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ 87 /* Maximum supported CPUID level, -1=no CPUID: */
82 __u32 x86_capability[NCAPINTS]; 88 int cpuid_level;
83 char x86_vendor_id[16]; 89 __u32 x86_capability[NCAPINTS];
84 char x86_model_id[64]; 90 char x86_vendor_id[16];
85 int x86_cache_size; /* in KB - valid for CPUS which support this 91 char x86_model_id[64];
86 call */ 92 /* in KB - valid for CPUS which support this call: */
87 int x86_cache_alignment; /* In bytes */ 93 int x86_cache_size;
88 int x86_power; 94 int x86_cache_alignment; /* In bytes */
89 unsigned long loops_per_jiffy; 95 int x86_power;
96 unsigned long loops_per_jiffy;
90#ifdef CONFIG_SMP 97#ifdef CONFIG_SMP
91 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 98 /* cpus sharing the last level cache: */
99 cpumask_t llc_shared_map;
92#endif 100#endif
93 u16 x86_max_cores; /* cpuid returned max cores value */ 101 /* cpuid returned max cores value: */
94 u16 apicid; 102 u16 x86_max_cores;
95 u16 x86_clflush_size; 103 u16 apicid;
104 u16 x86_clflush_size;
96#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
97 u16 booted_cores; /* number of cores as seen by OS */ 106 /* number of cores as seen by the OS: */
98 u16 phys_proc_id; /* Physical processor id. */ 107 u16 booted_cores;
99 u16 cpu_core_id; /* Core id */ 108 /* Physical processor id: */
100 u16 cpu_index; /* index into per_cpu list */ 109 u16 phys_proc_id;
110 /* Core id: */
111 u16 cpu_core_id;
112 /* Index into per_cpu list: */
113 u16 cpu_index;
101#endif 114#endif
102} __attribute__((__aligned__(SMP_CACHE_BYTES))); 115} __attribute__((__aligned__(SMP_CACHE_BYTES)));
103 116
104#define X86_VENDOR_INTEL 0 117#define X86_VENDOR_INTEL 0
105#define X86_VENDOR_CYRIX 1 118#define X86_VENDOR_CYRIX 1
106#define X86_VENDOR_AMD 2 119#define X86_VENDOR_AMD 2
107#define X86_VENDOR_UMC 3 120#define X86_VENDOR_UMC 3
108#define X86_VENDOR_NEXGEN 4 121#define X86_VENDOR_NEXGEN 4
109#define X86_VENDOR_CENTAUR 5 122#define X86_VENDOR_CENTAUR 5
110#define X86_VENDOR_TRANSMETA 7 123#define X86_VENDOR_TRANSMETA 7
111#define X86_VENDOR_NSC 8 124#define X86_VENDOR_NSC 8
112#define X86_VENDOR_NUM 9 125#define X86_VENDOR_NUM 9
113#define X86_VENDOR_UNKNOWN 0xff 126
127#define X86_VENDOR_UNKNOWN 0xff
114 128
115/* 129/*
116 * capabilities of CPUs 130 * capabilities of CPUs
117 */ 131 */
118extern struct cpuinfo_x86 boot_cpu_data; 132extern struct cpuinfo_x86 boot_cpu_data;
119extern struct cpuinfo_x86 new_cpu_data; 133extern struct cpuinfo_x86 new_cpu_data;
120extern struct tss_struct doublefault_tss; 134
121extern __u32 cleared_cpu_caps[NCAPINTS]; 135extern struct tss_struct doublefault_tss;
136extern __u32 cleared_cpu_caps[NCAPINTS];
122 137
123#ifdef CONFIG_SMP 138#ifdef CONFIG_SMP
124DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 139DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@@ -129,7 +144,9 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
129#define current_cpu_data boot_cpu_data 144#define current_cpu_data boot_cpu_data
130#endif 145#endif
131 146
132void cpu_detect(struct cpuinfo_x86 *c); 147#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
148
149extern void cpu_detect(struct cpuinfo_x86 *c);
133 150
134extern void identify_cpu(struct cpuinfo_x86 *); 151extern void identify_cpu(struct cpuinfo_x86 *);
135extern void identify_boot_cpu(void); 152extern void identify_boot_cpu(void);
@@ -146,7 +163,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
146#endif 163#endif
147 164
148static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 165static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
149 unsigned int *ecx, unsigned int *edx) 166 unsigned int *ecx, unsigned int *edx)
150{ 167{
151 /* ecx is often an input as well as an output. */ 168 /* ecx is often an input as well as an output. */
152 __asm__("cpuid" 169 __asm__("cpuid"
@@ -165,54 +182,67 @@ static inline void load_cr3(pgd_t *pgdir)
165#ifdef CONFIG_X86_32 182#ifdef CONFIG_X86_32
166/* This is the TSS defined by the hardware. */ 183/* This is the TSS defined by the hardware. */
167struct x86_hw_tss { 184struct x86_hw_tss {
168 unsigned short back_link, __blh; 185 unsigned short back_link, __blh;
169 unsigned long sp0; 186 unsigned long sp0;
170 unsigned short ss0, __ss0h; 187 unsigned short ss0, __ss0h;
171 unsigned long sp1; 188 unsigned long sp1;
172 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ 189 /* ss1 caches MSR_IA32_SYSENTER_CS: */
173 unsigned long sp2; 190 unsigned short ss1, __ss1h;
174 unsigned short ss2, __ss2h; 191 unsigned long sp2;
175 unsigned long __cr3; 192 unsigned short ss2, __ss2h;
176 unsigned long ip; 193 unsigned long __cr3;
177 unsigned long flags; 194 unsigned long ip;
178 unsigned long ax, cx, dx, bx; 195 unsigned long flags;
179 unsigned long sp, bp, si, di; 196 unsigned long ax;
180 unsigned short es, __esh; 197 unsigned long cx;
181 unsigned short cs, __csh; 198 unsigned long dx;
182 unsigned short ss, __ssh; 199 unsigned long bx;
183 unsigned short ds, __dsh; 200 unsigned long sp;
184 unsigned short fs, __fsh; 201 unsigned long bp;
185 unsigned short gs, __gsh; 202 unsigned long si;
186 unsigned short ldt, __ldth; 203 unsigned long di;
187 unsigned short trace, io_bitmap_base; 204 unsigned short es, __esh;
205 unsigned short cs, __csh;
206 unsigned short ss, __ssh;
207 unsigned short ds, __dsh;
208 unsigned short fs, __fsh;
209 unsigned short gs, __gsh;
210 unsigned short ldt, __ldth;
211 unsigned short trace;
212 unsigned short io_bitmap_base;
213
188} __attribute__((packed)); 214} __attribute__((packed));
189#else 215#else
190struct x86_hw_tss { 216struct x86_hw_tss {
191 u32 reserved1; 217 u32 reserved1;
192 u64 sp0; 218 u64 sp0;
193 u64 sp1; 219 u64 sp1;
194 u64 sp2; 220 u64 sp2;
195 u64 reserved2; 221 u64 reserved2;
196 u64 ist[7]; 222 u64 ist[7];
197 u32 reserved3; 223 u32 reserved3;
198 u32 reserved4; 224 u32 reserved4;
199 u16 reserved5; 225 u16 reserved5;
200 u16 io_bitmap_base; 226 u16 io_bitmap_base;
227
201} __attribute__((packed)) ____cacheline_aligned; 228} __attribute__((packed)) ____cacheline_aligned;
202#endif 229#endif
203 230
204/* 231/*
205 * Size of io_bitmap. 232 * IO-bitmap sizes:
206 */ 233 */
207#define IO_BITMAP_BITS 65536 234#define IO_BITMAP_BITS 65536
208#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 235#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
209#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 236#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
210#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 237#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
211#define INVALID_IO_BITMAP_OFFSET 0x8000 238#define INVALID_IO_BITMAP_OFFSET 0x8000
212#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 239#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
213 240
214struct tss_struct { 241struct tss_struct {
215 struct x86_hw_tss x86_tss; 242 /*
243 * The hardware state:
244 */
245 struct x86_hw_tss x86_tss;
216 246
217 /* 247 /*
218 * The extra 1 is there because the CPU will access an 248 * The extra 1 is there because the CPU will access an
@@ -220,48 +250,54 @@ struct tss_struct {
220 * bitmap. The extra byte must be all 1 bits, and must 250 * bitmap. The extra byte must be all 1 bits, and must
221 * be within the limit. 251 * be within the limit.
222 */ 252 */
223 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 253 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
224 /* 254 /*
225 * Cache the current maximum and the last task that used the bitmap: 255 * Cache the current maximum and the last task that used the bitmap:
226 */ 256 */
227 unsigned long io_bitmap_max; 257 unsigned long io_bitmap_max;
228 struct thread_struct *io_bitmap_owner; 258 struct thread_struct *io_bitmap_owner;
259
229 /* 260 /*
230 * pads the TSS to be cacheline-aligned (size is 0x100) 261 * Pad the TSS to be cacheline-aligned (size is 0x100):
231 */ 262 */
232 unsigned long __cacheline_filler[35]; 263 unsigned long __cacheline_filler[35];
233 /* 264 /*
234 * .. and then another 0x100 bytes for emergency kernel stack 265 * .. and then another 0x100 bytes for the emergency kernel stack:
235 */ 266 */
236 unsigned long stack[64]; 267 unsigned long stack[64];
268
237} __attribute__((packed)); 269} __attribute__((packed));
238 270
239DECLARE_PER_CPU(struct tss_struct, init_tss); 271DECLARE_PER_CPU(struct tss_struct, init_tss);
240 272
241/* Save the original ist values for checking stack pointers during debugging */ 273/*
274 * Save the original ist values for checking stack pointers during debugging
275 */
242struct orig_ist { 276struct orig_ist {
243 unsigned long ist[7]; 277 unsigned long ist[7];
244}; 278};
245 279
246#define MXCSR_DEFAULT 0x1f80 280#define MXCSR_DEFAULT 0x1f80
247 281
248struct i387_fsave_struct { 282struct i387_fsave_struct {
249 u32 cwd; 283 u32 cwd;
250 u32 swd; 284 u32 swd;
251 u32 twd; 285 u32 twd;
252 u32 fip; 286 u32 fip;
253 u32 fcs; 287 u32 fcs;
254 u32 foo; 288 u32 foo;
255 u32 fos; 289 u32 fos;
256 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ 290 /* 8*10 bytes for each FP-reg = 80 bytes: */
257 u32 status; /* software status information */ 291 u32 st_space[20];
292 /* Software status information: */
293 u32 status;
258}; 294};
259 295
260struct i387_fxsave_struct { 296struct i387_fxsave_struct {
261 u16 cwd; 297 u16 cwd;
262 u16 swd; 298 u16 swd;
263 u16 twd; 299 u16 twd;
264 u16 fop; 300 u16 fop;
265 union { 301 union {
266 struct { 302 struct {
267 u64 rip; 303 u64 rip;
@@ -274,31 +310,40 @@ struct i387_fxsave_struct {
274 u32 fos; 310 u32 fos;
275 }; 311 };
276 }; 312 };
277 u32 mxcsr; 313 u32 mxcsr;
278 u32 mxcsr_mask; 314 u32 mxcsr_mask;
279 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ 315 /* 8*16 bytes for each FP-reg = 128 bytes: */
280 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ 316 u32 st_space[32];
281 u32 padding[24]; 317 /* 16*16 bytes for each XMM-reg = 256 bytes: */
318 u32 xmm_space[64];
319 u32 padding[24];
320
282} __attribute__((aligned(16))); 321} __attribute__((aligned(16)));
283 322
284struct i387_soft_struct { 323struct i387_soft_struct {
285 u32 cwd; 324 u32 cwd;
286 u32 swd; 325 u32 swd;
287 u32 twd; 326 u32 twd;
288 u32 fip; 327 u32 fip;
289 u32 fcs; 328 u32 fcs;
290 u32 foo; 329 u32 foo;
291 u32 fos; 330 u32 fos;
292 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ 331 /* 8*10 bytes for each FP-reg = 80 bytes: */
293 u8 ftop, changed, lookahead, no_update, rm, alimit; 332 u32 st_space[20];
294 struct info *info; 333 u8 ftop;
295 u32 entry_eip; 334 u8 changed;
335 u8 lookahead;
336 u8 no_update;
337 u8 rm;
338 u8 alimit;
339 struct info *info;
340 u32 entry_eip;
296}; 341};
297 342
298union i387_union { 343union i387_union {
299 struct i387_fsave_struct fsave; 344 struct i387_fsave_struct fsave;
300 struct i387_fxsave_struct fxsave; 345 struct i387_fxsave_struct fxsave;
301 struct i387_soft_struct soft; 346 struct i387_soft_struct soft;
302}; 347};
303 348
304#ifdef CONFIG_X86_32 349#ifdef CONFIG_X86_32
@@ -313,42 +358,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
313extern unsigned short num_cache_leaves; 358extern unsigned short num_cache_leaves;
314 359
315struct thread_struct { 360struct thread_struct {
316/* cached TLS descriptors. */ 361 /* Cached TLS descriptors: */
317 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 362 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
318 unsigned long sp0; 363 unsigned long sp0;
319 unsigned long sp; 364 unsigned long sp;
320#ifdef CONFIG_X86_32 365#ifdef CONFIG_X86_32
321 unsigned long sysenter_cs; 366 unsigned long sysenter_cs;
322#else 367#else
323 unsigned long usersp; /* Copy from PDA */ 368 unsigned long usersp; /* Copy from PDA */
324 unsigned short es, ds, fsindex, gsindex; 369 unsigned short es;
370 unsigned short ds;
371 unsigned short fsindex;
372 unsigned short gsindex;
325#endif 373#endif
326 unsigned long ip; 374 unsigned long ip;
327 unsigned long fs; 375 unsigned long fs;
328 unsigned long gs; 376 unsigned long gs;
329/* Hardware debugging registers */ 377 /* Hardware debugging registers: */
330 unsigned long debugreg0; 378 unsigned long debugreg0;
331 unsigned long debugreg1; 379 unsigned long debugreg1;
332 unsigned long debugreg2; 380 unsigned long debugreg2;
333 unsigned long debugreg3; 381 unsigned long debugreg3;
334 unsigned long debugreg6; 382 unsigned long debugreg6;
335 unsigned long debugreg7; 383 unsigned long debugreg7;
336/* fault info */ 384 /* Fault info: */
337 unsigned long cr2, trap_no, error_code; 385 unsigned long cr2;
338/* floating point info */ 386 unsigned long trap_no;
387 unsigned long error_code;
388 /* Floating point info: */
339 union i387_union i387 __attribute__((aligned(16)));; 389 union i387_union i387 __attribute__((aligned(16)));;
340#ifdef CONFIG_X86_32 390#ifdef CONFIG_X86_32
341/* virtual 86 mode info */ 391 /* Virtual 86 mode info */
342 struct vm86_struct __user *vm86_info; 392 struct vm86_struct __user *vm86_info;
343 unsigned long screen_bitmap; 393 unsigned long screen_bitmap;
344 unsigned long v86flags, v86mask, saved_sp0; 394 unsigned long v86flags;
345 unsigned int saved_fs, saved_gs; 395 unsigned long v86mask;
396 unsigned long saved_sp0;
397 unsigned int saved_fs;
398 unsigned int saved_gs;
346#endif 399#endif
347/* IO permissions */ 400 /* IO permissions: */
348 unsigned long *io_bitmap_ptr; 401 unsigned long *io_bitmap_ptr;
349 unsigned long iopl; 402 unsigned long iopl;
350/* max allowed port in the bitmap, in bytes: */ 403 /* Max allowed port in the bitmap, in bytes: */
351 unsigned io_bitmap_max; 404 unsigned io_bitmap_max;
352/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 405/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
353 unsigned long debugctlmsr; 406 unsigned long debugctlmsr;
354/* Debug Store - if not 0 points to a DS Save Area configuration; 407/* Debug Store - if not 0 points to a DS Save Area configuration;
@@ -358,7 +411,7 @@ struct thread_struct {
358 411
359static inline unsigned long native_get_debugreg(int regno) 412static inline unsigned long native_get_debugreg(int regno)
360{ 413{
361 unsigned long val = 0; /* Damn you, gcc! */ 414 unsigned long val = 0; /* Damn you, gcc! */
362 415
363 switch (regno) { 416 switch (regno) {
364 case 0: 417 case 0:
@@ -383,22 +436,22 @@ static inline void native_set_debugreg(int regno, unsigned long value)
383{ 436{
384 switch (regno) { 437 switch (regno) {
385 case 0: 438 case 0:
386 asm("mov %0,%%db0" : /* no output */ :"r" (value)); 439 asm("mov %0, %%db0" ::"r" (value));
387 break; 440 break;
388 case 1: 441 case 1:
389 asm("mov %0,%%db1" : /* no output */ :"r" (value)); 442 asm("mov %0, %%db1" ::"r" (value));
390 break; 443 break;
391 case 2: 444 case 2:
392 asm("mov %0,%%db2" : /* no output */ :"r" (value)); 445 asm("mov %0, %%db2" ::"r" (value));
393 break; 446 break;
394 case 3: 447 case 3:
395 asm("mov %0,%%db3" : /* no output */ :"r" (value)); 448 asm("mov %0, %%db3" ::"r" (value));
396 break; 449 break;
397 case 6: 450 case 6:
398 asm("mov %0,%%db6" : /* no output */ :"r" (value)); 451 asm("mov %0, %%db6" ::"r" (value));
399 break; 452 break;
400 case 7: 453 case 7:
401 asm("mov %0,%%db7" : /* no output */ :"r" (value)); 454 asm("mov %0, %%db7" ::"r" (value));
402 break; 455 break;
403 default: 456 default:
404 BUG(); 457 BUG();
@@ -412,6 +465,7 @@ static inline void native_set_iopl_mask(unsigned mask)
412{ 465{
413#ifdef CONFIG_X86_32 466#ifdef CONFIG_X86_32
414 unsigned int reg; 467 unsigned int reg;
468
415 __asm__ __volatile__ ("pushfl;" 469 __asm__ __volatile__ ("pushfl;"
416 "popl %0;" 470 "popl %0;"
417 "andl %1, %0;" 471 "andl %1, %0;"
@@ -423,12 +477,12 @@ static inline void native_set_iopl_mask(unsigned mask)
423#endif 477#endif
424} 478}
425 479
426static inline void native_load_sp0(struct tss_struct *tss, 480static inline void
427 struct thread_struct *thread) 481native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
428{ 482{
429 tss->x86_tss.sp0 = thread->sp0; 483 tss->x86_tss.sp0 = thread->sp0;
430#ifdef CONFIG_X86_32 484#ifdef CONFIG_X86_32
431 /* Only happens when SEP is enabled, no need to test "SEP"arately */ 485 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
432 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 486 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
433 tss->x86_tss.ss1 = thread->sysenter_cs; 487 tss->x86_tss.ss1 = thread->sysenter_cs;
434 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 488 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
@@ -446,8 +500,8 @@ static inline void native_swapgs(void)
446#ifdef CONFIG_PARAVIRT 500#ifdef CONFIG_PARAVIRT
447#include <asm/paravirt.h> 501#include <asm/paravirt.h>
448#else 502#else
449#define __cpuid native_cpuid 503#define __cpuid native_cpuid
450#define paravirt_enabled() 0 504#define paravirt_enabled() 0
451 505
452/* 506/*
453 * These special macros can be used to get or set a debugging register 507 * These special macros can be used to get or set a debugging register
@@ -457,8 +511,8 @@ static inline void native_swapgs(void)
457#define set_debugreg(value, register) \ 511#define set_debugreg(value, register) \
458 native_set_debugreg(register, value) 512 native_set_debugreg(register, value)
459 513
460static inline void load_sp0(struct tss_struct *tss, 514static inline void
461 struct thread_struct *thread) 515load_sp0(struct tss_struct *tss, struct thread_struct *thread)
462{ 516{
463 native_load_sp0(tss, thread); 517 native_load_sp0(tss, thread);
464} 518}
@@ -473,11 +527,12 @@ static inline void load_sp0(struct tss_struct *tss,
473 * enable), so that any CPU's that boot up 527 * enable), so that any CPU's that boot up
474 * after us can get the correct flags. 528 * after us can get the correct flags.
475 */ 529 */
476extern unsigned long mmu_cr4_features; 530extern unsigned long mmu_cr4_features;
477 531
478static inline void set_in_cr4(unsigned long mask) 532static inline void set_in_cr4(unsigned long mask)
479{ 533{
480 unsigned cr4; 534 unsigned cr4;
535
481 mmu_cr4_features |= mask; 536 mmu_cr4_features |= mask;
482 cr4 = read_cr4(); 537 cr4 = read_cr4();
483 cr4 |= mask; 538 cr4 |= mask;
@@ -487,6 +542,7 @@ static inline void set_in_cr4(unsigned long mask)
487static inline void clear_in_cr4(unsigned long mask) 542static inline void clear_in_cr4(unsigned long mask)
488{ 543{
489 unsigned cr4; 544 unsigned cr4;
545
490 mmu_cr4_features &= ~mask; 546 mmu_cr4_features &= ~mask;
491 cr4 = read_cr4(); 547 cr4 = read_cr4();
492 cr4 &= ~mask; 548 cr4 &= ~mask;
@@ -494,42 +550,42 @@ static inline void clear_in_cr4(unsigned long mask)
494} 550}
495 551
496struct microcode_header { 552struct microcode_header {
497 unsigned int hdrver; 553 unsigned int hdrver;
498 unsigned int rev; 554 unsigned int rev;
499 unsigned int date; 555 unsigned int date;
500 unsigned int sig; 556 unsigned int sig;
501 unsigned int cksum; 557 unsigned int cksum;
502 unsigned int ldrver; 558 unsigned int ldrver;
503 unsigned int pf; 559 unsigned int pf;
504 unsigned int datasize; 560 unsigned int datasize;
505 unsigned int totalsize; 561 unsigned int totalsize;
506 unsigned int reserved[3]; 562 unsigned int reserved[3];
507}; 563};
508 564
509struct microcode { 565struct microcode {
510 struct microcode_header hdr; 566 struct microcode_header hdr;
511 unsigned int bits[0]; 567 unsigned int bits[0];
512}; 568};
513 569
514typedef struct microcode microcode_t; 570typedef struct microcode microcode_t;
515typedef struct microcode_header microcode_header_t; 571typedef struct microcode_header microcode_header_t;
516 572
517/* microcode format is extended from prescott processors */ 573/* microcode format is extended from prescott processors */
518struct extended_signature { 574struct extended_signature {
519 unsigned int sig; 575 unsigned int sig;
520 unsigned int pf; 576 unsigned int pf;
521 unsigned int cksum; 577 unsigned int cksum;
522}; 578};
523 579
524struct extended_sigtable { 580struct extended_sigtable {
525 unsigned int count; 581 unsigned int count;
526 unsigned int cksum; 582 unsigned int cksum;
527 unsigned int reserved[3]; 583 unsigned int reserved[3];
528 struct extended_signature sigs[0]; 584 struct extended_signature sigs[0];
529}; 585};
530 586
531typedef struct { 587typedef struct {
532 unsigned long seg; 588 unsigned long seg;
533} mm_segment_t; 589} mm_segment_t;
534 590
535 591
@@ -541,7 +597,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
541/* Free all resources held by a thread. */ 597/* Free all resources held by a thread. */
542extern void release_thread(struct task_struct *); 598extern void release_thread(struct task_struct *);
543 599
544/* Prepare to copy thread state - unlazy all lazy status */ 600/* Prepare to copy thread state - unlazy all lazy state */
545extern void prepare_to_copy(struct task_struct *tsk); 601extern void prepare_to_copy(struct task_struct *tsk);
546 602
547unsigned long get_wchan(struct task_struct *p); 603unsigned long get_wchan(struct task_struct *p);
@@ -578,118 +634,131 @@ static inline unsigned int cpuid_eax(unsigned int op)
578 unsigned int eax, ebx, ecx, edx; 634 unsigned int eax, ebx, ecx, edx;
579 635
580 cpuid(op, &eax, &ebx, &ecx, &edx); 636 cpuid(op, &eax, &ebx, &ecx, &edx);
637
581 return eax; 638 return eax;
582} 639}
640
583static inline unsigned int cpuid_ebx(unsigned int op) 641static inline unsigned int cpuid_ebx(unsigned int op)
584{ 642{
585 unsigned int eax, ebx, ecx, edx; 643 unsigned int eax, ebx, ecx, edx;
586 644
587 cpuid(op, &eax, &ebx, &ecx, &edx); 645 cpuid(op, &eax, &ebx, &ecx, &edx);
646
588 return ebx; 647 return ebx;
589} 648}
649
590static inline unsigned int cpuid_ecx(unsigned int op) 650static inline unsigned int cpuid_ecx(unsigned int op)
591{ 651{
592 unsigned int eax, ebx, ecx, edx; 652 unsigned int eax, ebx, ecx, edx;
593 653
594 cpuid(op, &eax, &ebx, &ecx, &edx); 654 cpuid(op, &eax, &ebx, &ecx, &edx);
655
595 return ecx; 656 return ecx;
596} 657}
658
597static inline unsigned int cpuid_edx(unsigned int op) 659static inline unsigned int cpuid_edx(unsigned int op)
598{ 660{
599 unsigned int eax, ebx, ecx, edx; 661 unsigned int eax, ebx, ecx, edx;
600 662
601 cpuid(op, &eax, &ebx, &ecx, &edx); 663 cpuid(op, &eax, &ebx, &ecx, &edx);
664
602 return edx; 665 return edx;
603} 666}
604 667
605/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 668/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
606static inline void rep_nop(void) 669static inline void rep_nop(void)
607{ 670{
608 __asm__ __volatile__("rep;nop": : :"memory"); 671 __asm__ __volatile__("rep; nop" ::: "memory");
609} 672}
610 673
611/* Stop speculative execution */ 674static inline void cpu_relax(void)
675{
676 rep_nop();
677}
678
679/* Stop speculative execution: */
612static inline void sync_core(void) 680static inline void sync_core(void)
613{ 681{
614 int tmp; 682 int tmp;
683
615 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 684 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
616 : "ebx", "ecx", "edx", "memory"); 685 : "ebx", "ecx", "edx", "memory");
617} 686}
618 687
619#define cpu_relax() rep_nop() 688static inline void
620 689__monitor(const void *eax, unsigned long ecx, unsigned long edx)
621static inline void __monitor(const void *eax, unsigned long ecx,
622 unsigned long edx)
623{ 690{
624 /* "monitor %eax,%ecx,%edx;" */ 691 /* "monitor %eax, %ecx, %edx;" */
625 asm volatile( 692 asm volatile(
626 ".byte 0x0f,0x01,0xc8;" 693 ".byte 0x0f, 0x01, 0xc8;"
627 : :"a" (eax), "c" (ecx), "d"(edx)); 694 :: "a" (eax), "c" (ecx), "d"(edx));
628} 695}
629 696
630static inline void __mwait(unsigned long eax, unsigned long ecx) 697static inline void __mwait(unsigned long eax, unsigned long ecx)
631{ 698{
632 /* "mwait %eax,%ecx;" */ 699 /* "mwait %eax, %ecx;" */
633 asm volatile( 700 asm volatile(
634 ".byte 0x0f,0x01,0xc9;" 701 ".byte 0x0f, 0x01, 0xc9;"
635 : :"a" (eax), "c" (ecx)); 702 :: "a" (eax), "c" (ecx));
636} 703}
637 704
638static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 705static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
639{ 706{
640 /* "mwait %eax,%ecx;" */ 707 /* "mwait %eax, %ecx;" */
641 asm volatile( 708 asm volatile(
642 "sti; .byte 0x0f,0x01,0xc9;" 709 "sti; .byte 0x0f, 0x01, 0xc9;"
643 : :"a" (eax), "c" (ecx)); 710 :: "a" (eax), "c" (ecx));
644} 711}
645 712
646extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 713extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
647 714
648extern int force_mwait; 715extern int force_mwait;
649 716
650extern void select_idle_routine(const struct cpuinfo_x86 *c); 717extern void select_idle_routine(const struct cpuinfo_x86 *c);
651 718
652extern unsigned long boot_option_idle_override; 719extern unsigned long boot_option_idle_override;
653 720
654extern void enable_sep_cpu(void); 721extern void enable_sep_cpu(void);
655extern int sysenter_setup(void); 722extern int sysenter_setup(void);
656 723
657/* Defined in head.S */ 724/* Defined in head.S */
658extern struct desc_ptr early_gdt_descr; 725extern struct desc_ptr early_gdt_descr;
659 726
660extern void cpu_set_gdt(int); 727extern void cpu_set_gdt(int);
661extern void switch_to_new_gdt(void); 728extern void switch_to_new_gdt(void);
662extern void cpu_init(void); 729extern void cpu_init(void);
663extern void init_gdt(int cpu); 730extern void init_gdt(int cpu);
664 731
665/* from system description table in BIOS. Mostly for MCA use, but 732/*
666 * others may find it useful. */ 733 * from system description table in BIOS. Mostly for MCA use, but
667extern unsigned int machine_id; 734 * others may find it useful:
668extern unsigned int machine_submodel_id; 735 */
669extern unsigned int BIOS_revision; 736extern unsigned int machine_id;
737extern unsigned int machine_submodel_id;
738extern unsigned int BIOS_revision;
670 739
671/* Boot loader type from the setup header */ 740/* Boot loader type from the setup header: */
672extern int bootloader_type; 741extern int bootloader_type;
673 742
674extern char ignore_fpu_irq; 743extern char ignore_fpu_irq;
675#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
676 744
677#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 745#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
678#define ARCH_HAS_PREFETCHW 746#define ARCH_HAS_PREFETCHW
679#define ARCH_HAS_SPINLOCK_PREFETCH 747#define ARCH_HAS_SPINLOCK_PREFETCH
680 748
681#ifdef CONFIG_X86_32 749#ifdef CONFIG_X86_32
682#define BASE_PREFETCH ASM_NOP4 750# define BASE_PREFETCH ASM_NOP4
683#define ARCH_HAS_PREFETCH 751# define ARCH_HAS_PREFETCH
684#else 752#else
685#define BASE_PREFETCH "prefetcht0 (%1)" 753# define BASE_PREFETCH "prefetcht0 (%1)"
686#endif 754#endif
687 755
688/* Prefetch instructions for Pentium III and AMD Athlon */ 756/*
689/* It's not worth to care about 3dnow! prefetches for the K6 757 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
690 because they are microcoded there and very slow. 758 *
691 However we don't do prefetches for pre XP Athlons currently 759 * It's not worth to care about 3dnow prefetches for the K6
692 That should be fixed. */ 760 * because they are microcoded there and very slow.
761 */
693static inline void prefetch(const void *x) 762static inline void prefetch(const void *x)
694{ 763{
695 alternative_input(BASE_PREFETCH, 764 alternative_input(BASE_PREFETCH,
@@ -698,8 +767,11 @@ static inline void prefetch(const void *x)
698 "r" (x)); 767 "r" (x));
699} 768}
700 769
701/* 3dnow! prefetch to get an exclusive cache line. Useful for 770/*
702 spinlocks to avoid one state transition in the cache coherency protocol. */ 771 * 3dnow prefetch to get an exclusive cache line.
772 * Useful for spinlocks to avoid one state transition in the
773 * cache coherency protocol:
774 */
703static inline void prefetchw(const void *x) 775static inline void prefetchw(const void *x)
704{ 776{
705 alternative_input(BASE_PREFETCH, 777 alternative_input(BASE_PREFETCH,
@@ -708,21 +780,25 @@ static inline void prefetchw(const void *x)
708 "r" (x)); 780 "r" (x));
709} 781}
710 782
711#define spin_lock_prefetch(x) prefetchw(x) 783static inline void spin_lock_prefetch(const void *x)
784{
785 prefetchw(x);
786}
787
712#ifdef CONFIG_X86_32 788#ifdef CONFIG_X86_32
713/* 789/*
714 * User space process size: 3GB (default). 790 * User space process size: 3GB (default).
715 */ 791 */
716#define TASK_SIZE (PAGE_OFFSET) 792#define TASK_SIZE PAGE_OFFSET
717#define STACK_TOP TASK_SIZE 793#define STACK_TOP TASK_SIZE
718#define STACK_TOP_MAX STACK_TOP 794#define STACK_TOP_MAX STACK_TOP
719 795
720#define INIT_THREAD { \ 796#define INIT_THREAD { \
721 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 797 .sp0 = sizeof(init_stack) + (long)&init_stack, \
722 .vm86_info = NULL, \ 798 .vm86_info = NULL, \
723 .sysenter_cs = __KERNEL_CS, \ 799 .sysenter_cs = __KERNEL_CS, \
724 .io_bitmap_ptr = NULL, \ 800 .io_bitmap_ptr = NULL, \
725 .fs = __KERNEL_PERCPU, \ 801 .fs = __KERNEL_PERCPU, \
726} 802}
727 803
728/* 804/*
@@ -731,26 +807,27 @@ static inline void prefetchw(const void *x)
731 * permission bitmap. The extra byte must be all 1 bits, and must 807 * permission bitmap. The extra byte must be all 1 bits, and must
732 * be within the limit. 808 * be within the limit.
733 */ 809 */
734#define INIT_TSS { \ 810#define INIT_TSS { \
735 .x86_tss = { \ 811 .x86_tss = { \
736 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 812 .sp0 = sizeof(init_stack) + (long)&init_stack, \
737 .ss0 = __KERNEL_DS, \ 813 .ss0 = __KERNEL_DS, \
738 .ss1 = __KERNEL_CS, \ 814 .ss1 = __KERNEL_CS, \
739 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 815 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
740 }, \ 816 }, \
741 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 817 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
742} 818}
743 819
744#define start_thread(regs, new_eip, new_esp) do { \ 820#define start_thread(regs, new_eip, new_esp) \
821do { \
745 __asm__("movl %0,%%gs": :"r" (0)); \ 822 __asm__("movl %0,%%gs": :"r" (0)); \
746 regs->fs = 0; \ 823 regs->fs = 0; \
747 set_fs(USER_DS); \ 824 set_fs(USER_DS); \
748 regs->ds = __USER_DS; \ 825 regs->ds = __USER_DS; \
749 regs->es = __USER_DS; \ 826 regs->es = __USER_DS; \
750 regs->ss = __USER_DS; \ 827 regs->ss = __USER_DS; \
751 regs->cs = __USER_CS; \ 828 regs->cs = __USER_CS; \
752 regs->ip = new_eip; \ 829 regs->ip = new_eip; \
753 regs->sp = new_esp; \ 830 regs->sp = new_esp; \
754} while (0) 831} while (0)
755 832
756 833
@@ -780,24 +857,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
780 __regs__ - 1; \ 857 __regs__ - 1; \
781}) 858})
782 859
783#define KSTK_ESP(task) (task_pt_regs(task)->sp) 860#define KSTK_ESP(task) (task_pt_regs(task)->sp)
784 861
785#else 862#else
786/* 863/*
787 * User space process size. 47bits minus one guard page. 864 * User space process size. 47bits minus one guard page.
788 */ 865 */
789#define TASK_SIZE64 (0x800000000000UL - 4096) 866#define TASK_SIZE64 (0x800000000000UL - 4096)
790 867
791/* This decides where the kernel will search for a free chunk of vm 868/* This decides where the kernel will search for a free chunk of vm
792 * space during mmap's. 869 * space during mmap's.
793 */ 870 */
794#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 871#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
795 0xc0000000 : 0xFFFFe000) 872 0xc0000000 : 0xFFFFe000)
796 873
797#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 874#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
798 IA32_PAGE_OFFSET : TASK_SIZE64) 875 IA32_PAGE_OFFSET : TASK_SIZE64)
799#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 876#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
800 IA32_PAGE_OFFSET : TASK_SIZE64) 877 IA32_PAGE_OFFSET : TASK_SIZE64)
801 878
802#define STACK_TOP TASK_SIZE 879#define STACK_TOP TASK_SIZE
803#define STACK_TOP_MAX TASK_SIZE64 880#define STACK_TOP_MAX TASK_SIZE64
@@ -813,12 +890,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
813#define start_thread(regs, new_rip, new_rsp) do { \ 890#define start_thread(regs, new_rip, new_rsp) do { \
814 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ 891 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
815 load_gs_index(0); \ 892 load_gs_index(0); \
816 (regs)->ip = (new_rip); \ 893 (regs)->ip = (new_rip); \
817 (regs)->sp = (new_rsp); \ 894 (regs)->sp = (new_rsp); \
818 write_pda(oldrsp, (new_rsp)); \ 895 write_pda(oldrsp, (new_rsp)); \
819 (regs)->cs = __USER_CS; \ 896 (regs)->cs = __USER_CS; \
820 (regs)->ss = __USER_DS; \ 897 (regs)->ss = __USER_DS; \
821 (regs)->flags = 0x200; \ 898 (regs)->flags = 0x200; \
822 set_fs(USER_DS); \ 899 set_fs(USER_DS); \
823} while (0) 900} while (0)
824 901
@@ -826,17 +903,18 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
826 * Return saved PC of a blocked thread. 903 * Return saved PC of a blocked thread.
827 * What is this good for? it will be always the scheduler or ret_from_fork. 904 * What is this good for? it will be always the scheduler or ret_from_fork.
828 */ 905 */
829#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 906#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
830 907
831#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 908#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
832#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 909#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
833#endif /* CONFIG_X86_64 */ 910#endif /* CONFIG_X86_64 */
834 911
835/* This decides where the kernel will search for a free chunk of vm 912/*
913 * This decides where the kernel will search for a free chunk of vm
836 * space during mmap's. 914 * space during mmap's.
837 */ 915 */
838#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 916#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
839 917
840#define KSTK_EIP(task) (task_pt_regs(task)->ip) 918#define KSTK_EIP(task) (task_pt_regs(task)->ip)
841 919
842#endif 920#endif