aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/processor.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/processor.h')
-rw-r--r--include/asm-x86/processor.h743
1 files changed, 416 insertions, 327 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 45a2f0ab33d0..e6bf92ddeb21 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -3,8 +3,7 @@
3 3
4#include <asm/processor-flags.h> 4#include <asm/processor-flags.h>
5 5
6/* migration helpers, for KVM - will be removed in 2.6.25: */ 6/* migration helper, for KVM - will be removed in 2.6.25: */
7#include <asm/vm86.h>
8#define Xgt_desc_struct desc_ptr 7#define Xgt_desc_struct desc_ptr
9 8
10/* Forward declaration, a strange C thing */ 9/* Forward declaration, a strange C thing */
@@ -24,6 +23,7 @@ struct mm_struct;
24#include <asm/msr.h> 23#include <asm/msr.h>
25#include <asm/desc_defs.h> 24#include <asm/desc_defs.h>
26#include <asm/nops.h> 25#include <asm/nops.h>
26
27#include <linux/personality.h> 27#include <linux/personality.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/cache.h> 29#include <linux/cache.h>
@@ -37,16 +37,18 @@ struct mm_struct;
37static inline void *current_text_addr(void) 37static inline void *current_text_addr(void)
38{ 38{
39 void *pc; 39 void *pc;
40 asm volatile("mov $1f,%0\n1:":"=r" (pc)); 40
41 asm volatile("mov $1f, %0; 1:":"=r" (pc));
42
41 return pc; 43 return pc;
42} 44}
43 45
44#ifdef CONFIG_X86_VSMP 46#ifdef CONFIG_X86_VSMP
45#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 47# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
46#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 48# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
47#else 49#else
48#define ARCH_MIN_TASKALIGN 16 50# define ARCH_MIN_TASKALIGN 16
49#define ARCH_MIN_MMSTRUCT_ALIGN 0 51# define ARCH_MIN_MMSTRUCT_ALIGN 0
50#endif 52#endif
51 53
52/* 54/*
@@ -56,69 +58,82 @@ static inline void *current_text_addr(void)
56 */ 58 */
57 59
58struct cpuinfo_x86 { 60struct cpuinfo_x86 {
59 __u8 x86; /* CPU family */ 61 __u8 x86; /* CPU family */
60 __u8 x86_vendor; /* CPU vendor */ 62 __u8 x86_vendor; /* CPU vendor */
61 __u8 x86_model; 63 __u8 x86_model;
62 __u8 x86_mask; 64 __u8 x86_mask;
63#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
64 char wp_works_ok; /* It doesn't on 386's */ 66 char wp_works_ok; /* It doesn't on 386's */
65 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ 67
66 char hard_math; 68 /* Problems on some 486Dx4's and old 386's: */
67 char rfu; 69 char hlt_works_ok;
68 char fdiv_bug; 70 char hard_math;
69 char f00f_bug; 71 char rfu;
70 char coma_bug; 72 char fdiv_bug;
71 char pad0; 73 char f00f_bug;
74 char coma_bug;
75 char pad0;
72#else 76#else
73 /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 77 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
74 int x86_tlbsize; 78 int x86_tlbsize;
75 __u8 x86_virt_bits, x86_phys_bits; 79 __u8 x86_virt_bits;
76 /* cpuid returned core id bits */ 80 __u8 x86_phys_bits;
77 __u8 x86_coreid_bits; 81 /* CPUID returned core id bits: */
78 /* Max extended CPUID function supported */ 82 __u8 x86_coreid_bits;
79 __u32 extended_cpuid_level; 83 /* Max extended CPUID function supported: */
84 __u32 extended_cpuid_level;
80#endif 85#endif
81 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ 86 /* Maximum supported CPUID level, -1=no CPUID: */
82 __u32 x86_capability[NCAPINTS]; 87 int cpuid_level;
83 char x86_vendor_id[16]; 88 __u32 x86_capability[NCAPINTS];
84 char x86_model_id[64]; 89 char x86_vendor_id[16];
85 int x86_cache_size; /* in KB - valid for CPUS which support this 90 char x86_model_id[64];
86 call */ 91 /* in KB - valid for CPUS which support this call: */
87 int x86_cache_alignment; /* In bytes */ 92 int x86_cache_size;
88 int x86_power; 93 int x86_cache_alignment; /* In bytes */
89 unsigned long loops_per_jiffy; 94 int x86_power;
95 unsigned long loops_per_jiffy;
90#ifdef CONFIG_SMP 96#ifdef CONFIG_SMP
91 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 97 /* cpus sharing the last level cache: */
98 cpumask_t llc_shared_map;
92#endif 99#endif
93 u16 x86_max_cores; /* cpuid returned max cores value */ 100 /* cpuid returned max cores value: */
94 u16 apicid; 101 u16 x86_max_cores;
95 u16 x86_clflush_size; 102 u16 apicid;
103 u16 initial_apicid;
104 u16 x86_clflush_size;
96#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
97 u16 booted_cores; /* number of cores as seen by OS */ 106 /* number of cores as seen by the OS: */
98 u16 phys_proc_id; /* Physical processor id. */ 107 u16 booted_cores;
99 u16 cpu_core_id; /* Core id */ 108 /* Physical processor id: */
100 u16 cpu_index; /* index into per_cpu list */ 109 u16 phys_proc_id;
110 /* Core id: */
111 u16 cpu_core_id;
112 /* Index into per_cpu list: */
113 u16 cpu_index;
101#endif 114#endif
102} __attribute__((__aligned__(SMP_CACHE_BYTES))); 115} __attribute__((__aligned__(SMP_CACHE_BYTES)));
103 116
104#define X86_VENDOR_INTEL 0 117#define X86_VENDOR_INTEL 0
105#define X86_VENDOR_CYRIX 1 118#define X86_VENDOR_CYRIX 1
106#define X86_VENDOR_AMD 2 119#define X86_VENDOR_AMD 2
107#define X86_VENDOR_UMC 3 120#define X86_VENDOR_UMC 3
108#define X86_VENDOR_NEXGEN 4 121#define X86_VENDOR_NEXGEN 4
109#define X86_VENDOR_CENTAUR 5 122#define X86_VENDOR_CENTAUR 5
110#define X86_VENDOR_TRANSMETA 7 123#define X86_VENDOR_TRANSMETA 7
111#define X86_VENDOR_NSC 8 124#define X86_VENDOR_NSC 8
112#define X86_VENDOR_NUM 9 125#define X86_VENDOR_NUM 9
113#define X86_VENDOR_UNKNOWN 0xff 126
127#define X86_VENDOR_UNKNOWN 0xff
114 128
115/* 129/*
116 * capabilities of CPUs 130 * capabilities of CPUs
117 */ 131 */
118extern struct cpuinfo_x86 boot_cpu_data; 132extern struct cpuinfo_x86 boot_cpu_data;
119extern struct cpuinfo_x86 new_cpu_data; 133extern struct cpuinfo_x86 new_cpu_data;
120extern struct tss_struct doublefault_tss; 134
121extern __u32 cleared_cpu_caps[NCAPINTS]; 135extern struct tss_struct doublefault_tss;
136extern __u32 cleared_cpu_caps[NCAPINTS];
122 137
123#ifdef CONFIG_SMP 138#ifdef CONFIG_SMP
124DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 139DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@@ -129,7 +144,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
129#define current_cpu_data boot_cpu_data 144#define current_cpu_data boot_cpu_data
130#endif 145#endif
131 146
132void cpu_detect(struct cpuinfo_x86 *c); 147static inline int hlt_works(int cpu)
148{
149#ifdef CONFIG_X86_32
150 return cpu_data(cpu).hlt_works_ok;
151#else
152 return 1;
153#endif
154}
155
156#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
157
158extern void cpu_detect(struct cpuinfo_x86 *c);
133 159
134extern void identify_cpu(struct cpuinfo_x86 *); 160extern void identify_cpu(struct cpuinfo_x86 *);
135extern void identify_boot_cpu(void); 161extern void identify_boot_cpu(void);
@@ -146,15 +172,15 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
146#endif 172#endif
147 173
148static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 174static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
149 unsigned int *ecx, unsigned int *edx) 175 unsigned int *ecx, unsigned int *edx)
150{ 176{
151 /* ecx is often an input as well as an output. */ 177 /* ecx is often an input as well as an output. */
152 __asm__("cpuid" 178 asm("cpuid"
153 : "=a" (*eax), 179 : "=a" (*eax),
154 "=b" (*ebx), 180 "=b" (*ebx),
155 "=c" (*ecx), 181 "=c" (*ecx),
156 "=d" (*edx) 182 "=d" (*edx)
157 : "0" (*eax), "2" (*ecx)); 183 : "0" (*eax), "2" (*ecx));
158} 184}
159 185
160static inline void load_cr3(pgd_t *pgdir) 186static inline void load_cr3(pgd_t *pgdir)
@@ -165,54 +191,67 @@ static inline void load_cr3(pgd_t *pgdir)
165#ifdef CONFIG_X86_32 191#ifdef CONFIG_X86_32
166/* This is the TSS defined by the hardware. */ 192/* This is the TSS defined by the hardware. */
167struct x86_hw_tss { 193struct x86_hw_tss {
168 unsigned short back_link, __blh; 194 unsigned short back_link, __blh;
169 unsigned long sp0; 195 unsigned long sp0;
170 unsigned short ss0, __ss0h; 196 unsigned short ss0, __ss0h;
171 unsigned long sp1; 197 unsigned long sp1;
172 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ 198 /* ss1 caches MSR_IA32_SYSENTER_CS: */
173 unsigned long sp2; 199 unsigned short ss1, __ss1h;
174 unsigned short ss2, __ss2h; 200 unsigned long sp2;
175 unsigned long __cr3; 201 unsigned short ss2, __ss2h;
176 unsigned long ip; 202 unsigned long __cr3;
177 unsigned long flags; 203 unsigned long ip;
178 unsigned long ax, cx, dx, bx; 204 unsigned long flags;
179 unsigned long sp, bp, si, di; 205 unsigned long ax;
180 unsigned short es, __esh; 206 unsigned long cx;
181 unsigned short cs, __csh; 207 unsigned long dx;
182 unsigned short ss, __ssh; 208 unsigned long bx;
183 unsigned short ds, __dsh; 209 unsigned long sp;
184 unsigned short fs, __fsh; 210 unsigned long bp;
185 unsigned short gs, __gsh; 211 unsigned long si;
186 unsigned short ldt, __ldth; 212 unsigned long di;
187 unsigned short trace, io_bitmap_base; 213 unsigned short es, __esh;
214 unsigned short cs, __csh;
215 unsigned short ss, __ssh;
216 unsigned short ds, __dsh;
217 unsigned short fs, __fsh;
218 unsigned short gs, __gsh;
219 unsigned short ldt, __ldth;
220 unsigned short trace;
221 unsigned short io_bitmap_base;
222
188} __attribute__((packed)); 223} __attribute__((packed));
189#else 224#else
190struct x86_hw_tss { 225struct x86_hw_tss {
191 u32 reserved1; 226 u32 reserved1;
192 u64 sp0; 227 u64 sp0;
193 u64 sp1; 228 u64 sp1;
194 u64 sp2; 229 u64 sp2;
195 u64 reserved2; 230 u64 reserved2;
196 u64 ist[7]; 231 u64 ist[7];
197 u32 reserved3; 232 u32 reserved3;
198 u32 reserved4; 233 u32 reserved4;
199 u16 reserved5; 234 u16 reserved5;
200 u16 io_bitmap_base; 235 u16 io_bitmap_base;
236
201} __attribute__((packed)) ____cacheline_aligned; 237} __attribute__((packed)) ____cacheline_aligned;
202#endif 238#endif
203 239
204/* 240/*
205 * Size of io_bitmap. 241 * IO-bitmap sizes:
206 */ 242 */
207#define IO_BITMAP_BITS 65536 243#define IO_BITMAP_BITS 65536
208#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 244#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
209#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 245#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
210#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 246#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
211#define INVALID_IO_BITMAP_OFFSET 0x8000 247#define INVALID_IO_BITMAP_OFFSET 0x8000
212#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 248#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
213 249
214struct tss_struct { 250struct tss_struct {
215 struct x86_hw_tss x86_tss; 251 /*
252 * The hardware state:
253 */
254 struct x86_hw_tss x86_tss;
216 255
217 /* 256 /*
218 * The extra 1 is there because the CPU will access an 257 * The extra 1 is there because the CPU will access an
@@ -220,135 +259,164 @@ struct tss_struct {
220 * bitmap. The extra byte must be all 1 bits, and must 259 * bitmap. The extra byte must be all 1 bits, and must
221 * be within the limit. 260 * be within the limit.
222 */ 261 */
223 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 262 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
224 /* 263 /*
225 * Cache the current maximum and the last task that used the bitmap: 264 * Cache the current maximum and the last task that used the bitmap:
226 */ 265 */
227 unsigned long io_bitmap_max; 266 unsigned long io_bitmap_max;
228 struct thread_struct *io_bitmap_owner; 267 struct thread_struct *io_bitmap_owner;
268
229 /* 269 /*
230 * pads the TSS to be cacheline-aligned (size is 0x100) 270 * Pad the TSS to be cacheline-aligned (size is 0x100):
231 */ 271 */
232 unsigned long __cacheline_filler[35]; 272 unsigned long __cacheline_filler[35];
233 /* 273 /*
234 * .. and then another 0x100 bytes for emergency kernel stack 274 * .. and then another 0x100 bytes for the emergency kernel stack:
235 */ 275 */
236 unsigned long stack[64]; 276 unsigned long stack[64];
277
237} __attribute__((packed)); 278} __attribute__((packed));
238 279
239DECLARE_PER_CPU(struct tss_struct, init_tss); 280DECLARE_PER_CPU(struct tss_struct, init_tss);
240 281
241/* Save the original ist values for checking stack pointers during debugging */ 282/*
283 * Save the original ist values for checking stack pointers during debugging
284 */
242struct orig_ist { 285struct orig_ist {
243 unsigned long ist[7]; 286 unsigned long ist[7];
244}; 287};
245 288
246#define MXCSR_DEFAULT 0x1f80 289#define MXCSR_DEFAULT 0x1f80
247 290
248struct i387_fsave_struct { 291struct i387_fsave_struct {
249 u32 cwd; 292 u32 cwd; /* FPU Control Word */
250 u32 swd; 293 u32 swd; /* FPU Status Word */
251 u32 twd; 294 u32 twd; /* FPU Tag Word */
252 u32 fip; 295 u32 fip; /* FPU IP Offset */
253 u32 fcs; 296 u32 fcs; /* FPU IP Selector */
254 u32 foo; 297 u32 foo; /* FPU Operand Pointer Offset */
255 u32 fos; 298 u32 fos; /* FPU Operand Pointer Selector */
256 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ 299
257 u32 status; /* software status information */ 300 /* 8*10 bytes for each FP-reg = 80 bytes: */
301 u32 st_space[20];
302
303 /* Software status information [not touched by FSAVE ]: */
304 u32 status;
258}; 305};
259 306
260struct i387_fxsave_struct { 307struct i387_fxsave_struct {
261 u16 cwd; 308 u16 cwd; /* Control Word */
262 u16 swd; 309 u16 swd; /* Status Word */
263 u16 twd; 310 u16 twd; /* Tag Word */
264 u16 fop; 311 u16 fop; /* Last Instruction Opcode */
265 union { 312 union {
266 struct { 313 struct {
267 u64 rip; 314 u64 rip; /* Instruction Pointer */
268 u64 rdp; 315 u64 rdp; /* Data Pointer */
269 }; 316 };
270 struct { 317 struct {
271 u32 fip; 318 u32 fip; /* FPU IP Offset */
272 u32 fcs; 319 u32 fcs; /* FPU IP Selector */
273 u32 foo; 320 u32 foo; /* FPU Operand Offset */
274 u32 fos; 321 u32 fos; /* FPU Operand Selector */
275 }; 322 };
276 }; 323 };
277 u32 mxcsr; 324 u32 mxcsr; /* MXCSR Register State */
278 u32 mxcsr_mask; 325 u32 mxcsr_mask; /* MXCSR Mask */
279 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ 326
280 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ 327 /* 8*16 bytes for each FP-reg = 128 bytes: */
281 u32 padding[24]; 328 u32 st_space[32];
329
330 /* 16*16 bytes for each XMM-reg = 256 bytes: */
331 u32 xmm_space[64];
332
333 u32 padding[24];
334
282} __attribute__((aligned(16))); 335} __attribute__((aligned(16)));
283 336
284struct i387_soft_struct { 337struct i387_soft_struct {
285 u32 cwd; 338 u32 cwd;
286 u32 swd; 339 u32 swd;
287 u32 twd; 340 u32 twd;
288 u32 fip; 341 u32 fip;
289 u32 fcs; 342 u32 fcs;
290 u32 foo; 343 u32 foo;
291 u32 fos; 344 u32 fos;
292 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ 345 /* 8*10 bytes for each FP-reg = 80 bytes: */
293 u8 ftop, changed, lookahead, no_update, rm, alimit; 346 u32 st_space[20];
294 struct info *info; 347 u8 ftop;
295 u32 entry_eip; 348 u8 changed;
349 u8 lookahead;
350 u8 no_update;
351 u8 rm;
352 u8 alimit;
353 struct info *info;
354 u32 entry_eip;
296}; 355};
297 356
298union i387_union { 357union thread_xstate {
299 struct i387_fsave_struct fsave; 358 struct i387_fsave_struct fsave;
300 struct i387_fxsave_struct fxsave; 359 struct i387_fxsave_struct fxsave;
301 struct i387_soft_struct soft; 360 struct i387_soft_struct soft;
302}; 361};
303 362
304#ifdef CONFIG_X86_32 363#ifdef CONFIG_X86_64
305DECLARE_PER_CPU(u8, cpu_llc_id);
306#else
307DECLARE_PER_CPU(struct orig_ist, orig_ist); 364DECLARE_PER_CPU(struct orig_ist, orig_ist);
308#endif 365#endif
309 366
310extern void print_cpu_info(struct cpuinfo_x86 *); 367extern void print_cpu_info(struct cpuinfo_x86 *);
368extern unsigned int xstate_size;
369extern void free_thread_xstate(struct task_struct *);
370extern struct kmem_cache *task_xstate_cachep;
311extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 371extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
312extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 372extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
313extern unsigned short num_cache_leaves; 373extern unsigned short num_cache_leaves;
314 374
315struct thread_struct { 375struct thread_struct {
316/* cached TLS descriptors. */ 376 /* Cached TLS descriptors: */
317 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 377 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
318 unsigned long sp0; 378 unsigned long sp0;
319 unsigned long sp; 379 unsigned long sp;
320#ifdef CONFIG_X86_32 380#ifdef CONFIG_X86_32
321 unsigned long sysenter_cs; 381 unsigned long sysenter_cs;
322#else 382#else
323 unsigned long usersp; /* Copy from PDA */ 383 unsigned long usersp; /* Copy from PDA */
324 unsigned short es, ds, fsindex, gsindex; 384 unsigned short es;
385 unsigned short ds;
386 unsigned short fsindex;
387 unsigned short gsindex;
325#endif 388#endif
326 unsigned long ip; 389 unsigned long ip;
327 unsigned long fs; 390 unsigned long fs;
328 unsigned long gs; 391 unsigned long gs;
329/* Hardware debugging registers */ 392 /* Hardware debugging registers: */
330 unsigned long debugreg0; 393 unsigned long debugreg0;
331 unsigned long debugreg1; 394 unsigned long debugreg1;
332 unsigned long debugreg2; 395 unsigned long debugreg2;
333 unsigned long debugreg3; 396 unsigned long debugreg3;
334 unsigned long debugreg6; 397 unsigned long debugreg6;
335 unsigned long debugreg7; 398 unsigned long debugreg7;
336/* fault info */ 399 /* Fault info: */
337 unsigned long cr2, trap_no, error_code; 400 unsigned long cr2;
338/* floating point info */ 401 unsigned long trap_no;
339 union i387_union i387 __attribute__((aligned(16)));; 402 unsigned long error_code;
403 /* floating point and extended processor state */
404 union thread_xstate *xstate;
340#ifdef CONFIG_X86_32 405#ifdef CONFIG_X86_32
341/* virtual 86 mode info */ 406 /* Virtual 86 mode info */
342 struct vm86_struct __user *vm86_info; 407 struct vm86_struct __user *vm86_info;
343 unsigned long screen_bitmap; 408 unsigned long screen_bitmap;
344 unsigned long v86flags, v86mask, saved_sp0; 409 unsigned long v86flags;
345 unsigned int saved_fs, saved_gs; 410 unsigned long v86mask;
411 unsigned long saved_sp0;
412 unsigned int saved_fs;
413 unsigned int saved_gs;
346#endif 414#endif
347/* IO permissions */ 415 /* IO permissions: */
348 unsigned long *io_bitmap_ptr; 416 unsigned long *io_bitmap_ptr;
349 unsigned long iopl; 417 unsigned long iopl;
350/* max allowed port in the bitmap, in bytes: */ 418 /* Max allowed port in the bitmap, in bytes: */
351 unsigned io_bitmap_max; 419 unsigned io_bitmap_max;
352/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 420/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
353 unsigned long debugctlmsr; 421 unsigned long debugctlmsr;
354/* Debug Store - if not 0 points to a DS Save Area configuration; 422/* Debug Store - if not 0 points to a DS Save Area configuration;
@@ -358,21 +426,27 @@ struct thread_struct {
358 426
359static inline unsigned long native_get_debugreg(int regno) 427static inline unsigned long native_get_debugreg(int regno)
360{ 428{
361 unsigned long val = 0; /* Damn you, gcc! */ 429 unsigned long val = 0; /* Damn you, gcc! */
362 430
363 switch (regno) { 431 switch (regno) {
364 case 0: 432 case 0:
365 asm("mov %%db0, %0" :"=r" (val)); break; 433 asm("mov %%db0, %0" :"=r" (val));
434 break;
366 case 1: 435 case 1:
367 asm("mov %%db1, %0" :"=r" (val)); break; 436 asm("mov %%db1, %0" :"=r" (val));
437 break;
368 case 2: 438 case 2:
369 asm("mov %%db2, %0" :"=r" (val)); break; 439 asm("mov %%db2, %0" :"=r" (val));
440 break;
370 case 3: 441 case 3:
371 asm("mov %%db3, %0" :"=r" (val)); break; 442 asm("mov %%db3, %0" :"=r" (val));
443 break;
372 case 6: 444 case 6:
373 asm("mov %%db6, %0" :"=r" (val)); break; 445 asm("mov %%db6, %0" :"=r" (val));
446 break;
374 case 7: 447 case 7:
375 asm("mov %%db7, %0" :"=r" (val)); break; 448 asm("mov %%db7, %0" :"=r" (val));
449 break;
376 default: 450 default:
377 BUG(); 451 BUG();
378 } 452 }
@@ -383,22 +457,22 @@ static inline void native_set_debugreg(int regno, unsigned long value)
383{ 457{
384 switch (regno) { 458 switch (regno) {
385 case 0: 459 case 0:
386 asm("mov %0,%%db0" : /* no output */ :"r" (value)); 460 asm("mov %0, %%db0" ::"r" (value));
387 break; 461 break;
388 case 1: 462 case 1:
389 asm("mov %0,%%db1" : /* no output */ :"r" (value)); 463 asm("mov %0, %%db1" ::"r" (value));
390 break; 464 break;
391 case 2: 465 case 2:
392 asm("mov %0,%%db2" : /* no output */ :"r" (value)); 466 asm("mov %0, %%db2" ::"r" (value));
393 break; 467 break;
394 case 3: 468 case 3:
395 asm("mov %0,%%db3" : /* no output */ :"r" (value)); 469 asm("mov %0, %%db3" ::"r" (value));
396 break; 470 break;
397 case 6: 471 case 6:
398 asm("mov %0,%%db6" : /* no output */ :"r" (value)); 472 asm("mov %0, %%db6" ::"r" (value));
399 break; 473 break;
400 case 7: 474 case 7:
401 asm("mov %0,%%db7" : /* no output */ :"r" (value)); 475 asm("mov %0, %%db7" ::"r" (value));
402 break; 476 break;
403 default: 477 default:
404 BUG(); 478 BUG();
@@ -412,23 +486,24 @@ static inline void native_set_iopl_mask(unsigned mask)
412{ 486{
413#ifdef CONFIG_X86_32 487#ifdef CONFIG_X86_32
414 unsigned int reg; 488 unsigned int reg;
415 __asm__ __volatile__ ("pushfl;" 489
416 "popl %0;" 490 asm volatile ("pushfl;"
417 "andl %1, %0;" 491 "popl %0;"
418 "orl %2, %0;" 492 "andl %1, %0;"
419 "pushl %0;" 493 "orl %2, %0;"
420 "popfl" 494 "pushl %0;"
421 : "=&r" (reg) 495 "popfl"
422 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 496 : "=&r" (reg)
497 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
423#endif 498#endif
424} 499}
425 500
426static inline void native_load_sp0(struct tss_struct *tss, 501static inline void
427 struct thread_struct *thread) 502native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
428{ 503{
429 tss->x86_tss.sp0 = thread->sp0; 504 tss->x86_tss.sp0 = thread->sp0;
430#ifdef CONFIG_X86_32 505#ifdef CONFIG_X86_32
431 /* Only happens when SEP is enabled, no need to test "SEP"arately */ 506 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
432 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 507 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
433 tss->x86_tss.ss1 = thread->sysenter_cs; 508 tss->x86_tss.ss1 = thread->sysenter_cs;
434 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 509 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
@@ -446,8 +521,8 @@ static inline void native_swapgs(void)
446#ifdef CONFIG_PARAVIRT 521#ifdef CONFIG_PARAVIRT
447#include <asm/paravirt.h> 522#include <asm/paravirt.h>
448#else 523#else
449#define __cpuid native_cpuid 524#define __cpuid native_cpuid
450#define paravirt_enabled() 0 525#define paravirt_enabled() 0
451 526
452/* 527/*
453 * These special macros can be used to get or set a debugging register 528 * These special macros can be used to get or set a debugging register
@@ -473,11 +548,12 @@ static inline void load_sp0(struct tss_struct *tss,
473 * enable), so that any CPU's that boot up 548 * enable), so that any CPU's that boot up
474 * after us can get the correct flags. 549 * after us can get the correct flags.
475 */ 550 */
476extern unsigned long mmu_cr4_features; 551extern unsigned long mmu_cr4_features;
477 552
478static inline void set_in_cr4(unsigned long mask) 553static inline void set_in_cr4(unsigned long mask)
479{ 554{
480 unsigned cr4; 555 unsigned cr4;
556
481 mmu_cr4_features |= mask; 557 mmu_cr4_features |= mask;
482 cr4 = read_cr4(); 558 cr4 = read_cr4();
483 cr4 |= mask; 559 cr4 |= mask;
@@ -487,6 +563,7 @@ static inline void set_in_cr4(unsigned long mask)
487static inline void clear_in_cr4(unsigned long mask) 563static inline void clear_in_cr4(unsigned long mask)
488{ 564{
489 unsigned cr4; 565 unsigned cr4;
566
490 mmu_cr4_features &= ~mask; 567 mmu_cr4_features &= ~mask;
491 cr4 = read_cr4(); 568 cr4 = read_cr4();
492 cr4 &= ~mask; 569 cr4 &= ~mask;
@@ -494,42 +571,42 @@ static inline void clear_in_cr4(unsigned long mask)
494} 571}
495 572
496struct microcode_header { 573struct microcode_header {
497 unsigned int hdrver; 574 unsigned int hdrver;
498 unsigned int rev; 575 unsigned int rev;
499 unsigned int date; 576 unsigned int date;
500 unsigned int sig; 577 unsigned int sig;
501 unsigned int cksum; 578 unsigned int cksum;
502 unsigned int ldrver; 579 unsigned int ldrver;
503 unsigned int pf; 580 unsigned int pf;
504 unsigned int datasize; 581 unsigned int datasize;
505 unsigned int totalsize; 582 unsigned int totalsize;
506 unsigned int reserved[3]; 583 unsigned int reserved[3];
507}; 584};
508 585
509struct microcode { 586struct microcode {
510 struct microcode_header hdr; 587 struct microcode_header hdr;
511 unsigned int bits[0]; 588 unsigned int bits[0];
512}; 589};
513 590
514typedef struct microcode microcode_t; 591typedef struct microcode microcode_t;
515typedef struct microcode_header microcode_header_t; 592typedef struct microcode_header microcode_header_t;
516 593
517/* microcode format is extended from prescott processors */ 594/* microcode format is extended from prescott processors */
518struct extended_signature { 595struct extended_signature {
519 unsigned int sig; 596 unsigned int sig;
520 unsigned int pf; 597 unsigned int pf;
521 unsigned int cksum; 598 unsigned int cksum;
522}; 599};
523 600
524struct extended_sigtable { 601struct extended_sigtable {
525 unsigned int count; 602 unsigned int count;
526 unsigned int cksum; 603 unsigned int cksum;
527 unsigned int reserved[3]; 604 unsigned int reserved[3];
528 struct extended_signature sigs[0]; 605 struct extended_signature sigs[0];
529}; 606};
530 607
531typedef struct { 608typedef struct {
532 unsigned long seg; 609 unsigned long seg;
533} mm_segment_t; 610} mm_segment_t;
534 611
535 612
@@ -541,7 +618,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
541/* Free all resources held by a thread. */ 618/* Free all resources held by a thread. */
542extern void release_thread(struct task_struct *); 619extern void release_thread(struct task_struct *);
543 620
544/* Prepare to copy thread state - unlazy all lazy status */ 621/* Prepare to copy thread state - unlazy all lazy state */
545extern void prepare_to_copy(struct task_struct *tsk); 622extern void prepare_to_copy(struct task_struct *tsk);
546 623
547unsigned long get_wchan(struct task_struct *p); 624unsigned long get_wchan(struct task_struct *p);
@@ -578,118 +655,137 @@ static inline unsigned int cpuid_eax(unsigned int op)
578 unsigned int eax, ebx, ecx, edx; 655 unsigned int eax, ebx, ecx, edx;
579 656
580 cpuid(op, &eax, &ebx, &ecx, &edx); 657 cpuid(op, &eax, &ebx, &ecx, &edx);
658
581 return eax; 659 return eax;
582} 660}
661
583static inline unsigned int cpuid_ebx(unsigned int op) 662static inline unsigned int cpuid_ebx(unsigned int op)
584{ 663{
585 unsigned int eax, ebx, ecx, edx; 664 unsigned int eax, ebx, ecx, edx;
586 665
587 cpuid(op, &eax, &ebx, &ecx, &edx); 666 cpuid(op, &eax, &ebx, &ecx, &edx);
667
588 return ebx; 668 return ebx;
589} 669}
670
590static inline unsigned int cpuid_ecx(unsigned int op) 671static inline unsigned int cpuid_ecx(unsigned int op)
591{ 672{
592 unsigned int eax, ebx, ecx, edx; 673 unsigned int eax, ebx, ecx, edx;
593 674
594 cpuid(op, &eax, &ebx, &ecx, &edx); 675 cpuid(op, &eax, &ebx, &ecx, &edx);
676
595 return ecx; 677 return ecx;
596} 678}
679
597static inline unsigned int cpuid_edx(unsigned int op) 680static inline unsigned int cpuid_edx(unsigned int op)
598{ 681{
599 unsigned int eax, ebx, ecx, edx; 682 unsigned int eax, ebx, ecx, edx;
600 683
601 cpuid(op, &eax, &ebx, &ecx, &edx); 684 cpuid(op, &eax, &ebx, &ecx, &edx);
685
602 return edx; 686 return edx;
603} 687}
604 688
605/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 689/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
606static inline void rep_nop(void) 690static inline void rep_nop(void)
607{ 691{
608 __asm__ __volatile__("rep;nop": : :"memory"); 692 asm volatile("rep; nop" ::: "memory");
693}
694
695static inline void cpu_relax(void)
696{
697 rep_nop();
609} 698}
610 699
611/* Stop speculative execution */ 700/* Stop speculative execution: */
612static inline void sync_core(void) 701static inline void sync_core(void)
613{ 702{
614 int tmp; 703 int tmp;
704
615 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 705 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
616 : "ebx", "ecx", "edx", "memory"); 706 : "ebx", "ecx", "edx", "memory");
617} 707}
618 708
619#define cpu_relax() rep_nop()
620
621static inline void __monitor(const void *eax, unsigned long ecx, 709static inline void __monitor(const void *eax, unsigned long ecx,
622 unsigned long edx) 710 unsigned long edx)
623{ 711{
624 /* "monitor %eax,%ecx,%edx;" */ 712 /* "monitor %eax, %ecx, %edx;" */
625 asm volatile( 713 asm volatile(".byte 0x0f, 0x01, 0xc8;"
626 ".byte 0x0f,0x01,0xc8;" 714 :: "a" (eax), "c" (ecx), "d"(edx));
627 : :"a" (eax), "c" (ecx), "d"(edx));
628} 715}
629 716
630static inline void __mwait(unsigned long eax, unsigned long ecx) 717static inline void __mwait(unsigned long eax, unsigned long ecx)
631{ 718{
632 /* "mwait %eax,%ecx;" */ 719 /* "mwait %eax, %ecx;" */
633 asm volatile( 720 asm volatile(".byte 0x0f, 0x01, 0xc9;"
634 ".byte 0x0f,0x01,0xc9;" 721 :: "a" (eax), "c" (ecx));
635 : :"a" (eax), "c" (ecx));
636} 722}
637 723
638static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 724static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
639{ 725{
640 /* "mwait %eax,%ecx;" */ 726 /* "mwait %eax, %ecx;" */
641 asm volatile( 727 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
642 "sti; .byte 0x0f,0x01,0xc9;" 728 :: "a" (eax), "c" (ecx));
643 : :"a" (eax), "c" (ecx));
644} 729}
645 730
646extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 731extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
647 732
648extern int force_mwait; 733extern int force_mwait;
649 734
650extern void select_idle_routine(const struct cpuinfo_x86 *c); 735extern void select_idle_routine(const struct cpuinfo_x86 *c);
651 736
652extern unsigned long boot_option_idle_override; 737extern unsigned long boot_option_idle_override;
653 738
654extern void enable_sep_cpu(void); 739extern void enable_sep_cpu(void);
655extern int sysenter_setup(void); 740extern int sysenter_setup(void);
656 741
657/* Defined in head.S */ 742/* Defined in head.S */
658extern struct desc_ptr early_gdt_descr; 743extern struct desc_ptr early_gdt_descr;
659 744
660extern void cpu_set_gdt(int); 745extern void cpu_set_gdt(int);
661extern void switch_to_new_gdt(void); 746extern void switch_to_new_gdt(void);
662extern void cpu_init(void); 747extern void cpu_init(void);
663extern void init_gdt(int cpu); 748extern void init_gdt(int cpu);
664 749
665/* from system description table in BIOS. Mostly for MCA use, but 750static inline void update_debugctlmsr(unsigned long debugctlmsr)
666 * others may find it useful. */ 751{
667extern unsigned int machine_id; 752#ifndef CONFIG_X86_DEBUGCTLMSR
668extern unsigned int machine_submodel_id; 753 if (boot_cpu_data.x86 < 6)
669extern unsigned int BIOS_revision; 754 return;
755#endif
756 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
757}
758
759/*
760 * from system description table in BIOS. Mostly for MCA use, but
761 * others may find it useful:
762 */
763extern unsigned int machine_id;
764extern unsigned int machine_submodel_id;
765extern unsigned int BIOS_revision;
670 766
671/* Boot loader type from the setup header */ 767/* Boot loader type from the setup header: */
672extern int bootloader_type; 768extern int bootloader_type;
673 769
674extern char ignore_fpu_irq; 770extern char ignore_fpu_irq;
675#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
676 771
677#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 772#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
678#define ARCH_HAS_PREFETCHW 773#define ARCH_HAS_PREFETCHW
679#define ARCH_HAS_SPINLOCK_PREFETCH 774#define ARCH_HAS_SPINLOCK_PREFETCH
680 775
681#ifdef CONFIG_X86_32 776#ifdef CONFIG_X86_32
682#define BASE_PREFETCH ASM_NOP4 777# define BASE_PREFETCH ASM_NOP4
683#define ARCH_HAS_PREFETCH 778# define ARCH_HAS_PREFETCH
684#else 779#else
685#define BASE_PREFETCH "prefetcht0 (%1)" 780# define BASE_PREFETCH "prefetcht0 (%1)"
686#endif 781#endif
687 782
688/* Prefetch instructions for Pentium III and AMD Athlon */ 783/*
689/* It's not worth to care about 3dnow! prefetches for the K6 784 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
690 because they are microcoded there and very slow. 785 *
691 However we don't do prefetches for pre XP Athlons currently 786 * It's not worth to care about 3dnow prefetches for the K6
692 That should be fixed. */ 787 * because they are microcoded there and very slow.
788 */
693static inline void prefetch(const void *x) 789static inline void prefetch(const void *x)
694{ 790{
695 alternative_input(BASE_PREFETCH, 791 alternative_input(BASE_PREFETCH,
@@ -698,8 +794,11 @@ static inline void prefetch(const void *x)
698 "r" (x)); 794 "r" (x));
699} 795}
700 796
701/* 3dnow! prefetch to get an exclusive cache line. Useful for 797/*
702 spinlocks to avoid one state transition in the cache coherency protocol. */ 798 * 3dnow prefetch to get an exclusive cache line.
799 * Useful for spinlocks to avoid one state transition in the
800 * cache coherency protocol:
801 */
703static inline void prefetchw(const void *x) 802static inline void prefetchw(const void *x)
704{ 803{
705 alternative_input(BASE_PREFETCH, 804 alternative_input(BASE_PREFETCH,
@@ -708,21 +807,25 @@ static inline void prefetchw(const void *x)
708 "r" (x)); 807 "r" (x));
709} 808}
710 809
711#define spin_lock_prefetch(x) prefetchw(x) 810static inline void spin_lock_prefetch(const void *x)
811{
812 prefetchw(x);
813}
814
712#ifdef CONFIG_X86_32 815#ifdef CONFIG_X86_32
713/* 816/*
714 * User space process size: 3GB (default). 817 * User space process size: 3GB (default).
715 */ 818 */
716#define TASK_SIZE (PAGE_OFFSET) 819#define TASK_SIZE PAGE_OFFSET
717#define STACK_TOP TASK_SIZE 820#define STACK_TOP TASK_SIZE
718#define STACK_TOP_MAX STACK_TOP 821#define STACK_TOP_MAX STACK_TOP
719 822
720#define INIT_THREAD { \ 823#define INIT_THREAD { \
721 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 824 .sp0 = sizeof(init_stack) + (long)&init_stack, \
722 .vm86_info = NULL, \ 825 .vm86_info = NULL, \
723 .sysenter_cs = __KERNEL_CS, \ 826 .sysenter_cs = __KERNEL_CS, \
724 .io_bitmap_ptr = NULL, \ 827 .io_bitmap_ptr = NULL, \
725 .fs = __KERNEL_PERCPU, \ 828 .fs = __KERNEL_PERCPU, \
726} 829}
727 830
728/* 831/*
@@ -731,28 +834,15 @@ static inline void prefetchw(const void *x)
731 * permission bitmap. The extra byte must be all 1 bits, and must 834 * permission bitmap. The extra byte must be all 1 bits, and must
732 * be within the limit. 835 * be within the limit.
733 */ 836 */
734#define INIT_TSS { \ 837#define INIT_TSS { \
735 .x86_tss = { \ 838 .x86_tss = { \
736 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 839 .sp0 = sizeof(init_stack) + (long)&init_stack, \
737 .ss0 = __KERNEL_DS, \ 840 .ss0 = __KERNEL_DS, \
738 .ss1 = __KERNEL_CS, \ 841 .ss1 = __KERNEL_CS, \
739 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 842 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
740 }, \ 843 }, \
741 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 844 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
742} 845}
743
744#define start_thread(regs, new_eip, new_esp) do { \
745 __asm__("movl %0,%%gs": :"r" (0)); \
746 regs->fs = 0; \
747 set_fs(USER_DS); \
748 regs->ds = __USER_DS; \
749 regs->es = __USER_DS; \
750 regs->ss = __USER_DS; \
751 regs->cs = __USER_CS; \
752 regs->ip = new_eip; \
753 regs->sp = new_esp; \
754} while (0)
755
756 846
757extern unsigned long thread_saved_pc(struct task_struct *tsk); 847extern unsigned long thread_saved_pc(struct task_struct *tsk);
758 848
@@ -780,24 +870,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
780 __regs__ - 1; \ 870 __regs__ - 1; \
781}) 871})
782 872
783#define KSTK_ESP(task) (task_pt_regs(task)->sp) 873#define KSTK_ESP(task) (task_pt_regs(task)->sp)
784 874
785#else 875#else
786/* 876/*
787 * User space process size. 47bits minus one guard page. 877 * User space process size. 47bits minus one guard page.
788 */ 878 */
789#define TASK_SIZE64 (0x800000000000UL - 4096) 879#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
790 880
791/* This decides where the kernel will search for a free chunk of vm 881/* This decides where the kernel will search for a free chunk of vm
792 * space during mmap's. 882 * space during mmap's.
793 */ 883 */
794#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 884#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
795 0xc0000000 : 0xFFFFe000) 885 0xc0000000 : 0xFFFFe000)
796 886
797#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 887#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
798 IA32_PAGE_OFFSET : TASK_SIZE64) 888 IA32_PAGE_OFFSET : TASK_SIZE64)
799#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 889#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
800 IA32_PAGE_OFFSET : TASK_SIZE64) 890 IA32_PAGE_OFFSET : TASK_SIZE64)
801 891
802#define STACK_TOP TASK_SIZE 892#define STACK_TOP TASK_SIZE
803#define STACK_TOP_MAX TASK_SIZE64 893#define STACK_TOP_MAX TASK_SIZE64
@@ -810,33 +900,32 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
810 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 900 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
811} 901}
812 902
813#define start_thread(regs, new_rip, new_rsp) do { \
814 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
815 load_gs_index(0); \
816 (regs)->ip = (new_rip); \
817 (regs)->sp = (new_rsp); \
818 write_pda(oldrsp, (new_rsp)); \
819 (regs)->cs = __USER_CS; \
820 (regs)->ss = __USER_DS; \
821 (regs)->flags = 0x200; \
822 set_fs(USER_DS); \
823} while (0)
824
825/* 903/*
826 * Return saved PC of a blocked thread. 904 * Return saved PC of a blocked thread.
827 * What is this good for? it will be always the scheduler or ret_from_fork. 905 * What is this good for? it will be always the scheduler or ret_from_fork.
828 */ 906 */
829#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 907#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
830 908
831#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 909#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
832#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 910#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
833#endif /* CONFIG_X86_64 */ 911#endif /* CONFIG_X86_64 */
834 912
835/* This decides where the kernel will search for a free chunk of vm 913extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
914 unsigned long new_sp);
915
916/*
917 * This decides where the kernel will search for a free chunk of vm
836 * space during mmap's. 918 * space during mmap's.
837 */ 919 */
838#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 920#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
839 921
840#define KSTK_EIP(task) (task_pt_regs(task)->ip) 922#define KSTK_EIP(task) (task_pt_regs(task)->ip)
923
924/* Get/set a process' ability to use the timestamp counter instruction */
925#define GET_TSC_CTL(adr) get_tsc_mode((adr))
926#define SET_TSC_CTL(val) set_tsc_mode((val))
927
928extern int get_tsc_mode(unsigned long adr);
929extern int set_tsc_mode(unsigned int val);
841 930
842#endif 931#endif