aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/processor.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/processor.h')
-rw-r--r--include/asm-x86/processor.h729
1 files changed, 404 insertions, 325 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 45a2f0ab33d0..6e26c7c717a2 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -3,8 +3,7 @@
3 3
4#include <asm/processor-flags.h> 4#include <asm/processor-flags.h>
5 5
6/* migration helpers, for KVM - will be removed in 2.6.25: */ 6/* migration helper, for KVM - will be removed in 2.6.25: */
7#include <asm/vm86.h>
8#define Xgt_desc_struct desc_ptr 7#define Xgt_desc_struct desc_ptr
9 8
10/* Forward declaration, a strange C thing */ 9/* Forward declaration, a strange C thing */
@@ -24,6 +23,7 @@ struct mm_struct;
24#include <asm/msr.h> 23#include <asm/msr.h>
25#include <asm/desc_defs.h> 24#include <asm/desc_defs.h>
26#include <asm/nops.h> 25#include <asm/nops.h>
26
27#include <linux/personality.h> 27#include <linux/personality.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/cache.h> 29#include <linux/cache.h>
@@ -37,16 +37,18 @@ struct mm_struct;
37static inline void *current_text_addr(void) 37static inline void *current_text_addr(void)
38{ 38{
39 void *pc; 39 void *pc;
40 asm volatile("mov $1f,%0\n1:":"=r" (pc)); 40
41 asm volatile("mov $1f, %0; 1:":"=r" (pc));
42
41 return pc; 43 return pc;
42} 44}
43 45
44#ifdef CONFIG_X86_VSMP 46#ifdef CONFIG_X86_VSMP
45#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 47# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
46#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 48# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
47#else 49#else
48#define ARCH_MIN_TASKALIGN 16 50# define ARCH_MIN_TASKALIGN 16
49#define ARCH_MIN_MMSTRUCT_ALIGN 0 51# define ARCH_MIN_MMSTRUCT_ALIGN 0
50#endif 52#endif
51 53
52/* 54/*
@@ -56,69 +58,82 @@ static inline void *current_text_addr(void)
56 */ 58 */
57 59
58struct cpuinfo_x86 { 60struct cpuinfo_x86 {
59 __u8 x86; /* CPU family */ 61 __u8 x86; /* CPU family */
60 __u8 x86_vendor; /* CPU vendor */ 62 __u8 x86_vendor; /* CPU vendor */
61 __u8 x86_model; 63 __u8 x86_model;
62 __u8 x86_mask; 64 __u8 x86_mask;
63#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
64 char wp_works_ok; /* It doesn't on 386's */ 66 char wp_works_ok; /* It doesn't on 386's */
65 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ 67
66 char hard_math; 68 /* Problems on some 486Dx4's and old 386's: */
67 char rfu; 69 char hlt_works_ok;
68 char fdiv_bug; 70 char hard_math;
69 char f00f_bug; 71 char rfu;
70 char coma_bug; 72 char fdiv_bug;
71 char pad0; 73 char f00f_bug;
74 char coma_bug;
75 char pad0;
72#else 76#else
73 /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 77 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
74 int x86_tlbsize; 78 int x86_tlbsize;
75 __u8 x86_virt_bits, x86_phys_bits; 79 __u8 x86_virt_bits;
76 /* cpuid returned core id bits */ 80 __u8 x86_phys_bits;
77 __u8 x86_coreid_bits; 81 /* CPUID returned core id bits: */
78 /* Max extended CPUID function supported */ 82 __u8 x86_coreid_bits;
79 __u32 extended_cpuid_level; 83 /* Max extended CPUID function supported: */
84 __u32 extended_cpuid_level;
80#endif 85#endif
81 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ 86 /* Maximum supported CPUID level, -1=no CPUID: */
82 __u32 x86_capability[NCAPINTS]; 87 int cpuid_level;
83 char x86_vendor_id[16]; 88 __u32 x86_capability[NCAPINTS];
84 char x86_model_id[64]; 89 char x86_vendor_id[16];
85 int x86_cache_size; /* in KB - valid for CPUS which support this 90 char x86_model_id[64];
86 call */ 91 /* in KB - valid for CPUS which support this call: */
87 int x86_cache_alignment; /* In bytes */ 92 int x86_cache_size;
88 int x86_power; 93 int x86_cache_alignment; /* In bytes */
89 unsigned long loops_per_jiffy; 94 int x86_power;
95 unsigned long loops_per_jiffy;
90#ifdef CONFIG_SMP 96#ifdef CONFIG_SMP
91 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 97 /* cpus sharing the last level cache: */
98 cpumask_t llc_shared_map;
92#endif 99#endif
93 u16 x86_max_cores; /* cpuid returned max cores value */ 100 /* cpuid returned max cores value: */
94 u16 apicid; 101 u16 x86_max_cores;
95 u16 x86_clflush_size; 102 u16 apicid;
103 u16 initial_apicid;
104 u16 x86_clflush_size;
96#ifdef CONFIG_SMP 105#ifdef CONFIG_SMP
97 u16 booted_cores; /* number of cores as seen by OS */ 106 /* number of cores as seen by the OS: */
98 u16 phys_proc_id; /* Physical processor id. */ 107 u16 booted_cores;
99 u16 cpu_core_id; /* Core id */ 108 /* Physical processor id: */
100 u16 cpu_index; /* index into per_cpu list */ 109 u16 phys_proc_id;
110 /* Core id: */
111 u16 cpu_core_id;
112 /* Index into per_cpu list: */
113 u16 cpu_index;
101#endif 114#endif
102} __attribute__((__aligned__(SMP_CACHE_BYTES))); 115} __attribute__((__aligned__(SMP_CACHE_BYTES)));
103 116
104#define X86_VENDOR_INTEL 0 117#define X86_VENDOR_INTEL 0
105#define X86_VENDOR_CYRIX 1 118#define X86_VENDOR_CYRIX 1
106#define X86_VENDOR_AMD 2 119#define X86_VENDOR_AMD 2
107#define X86_VENDOR_UMC 3 120#define X86_VENDOR_UMC 3
108#define X86_VENDOR_NEXGEN 4 121#define X86_VENDOR_NEXGEN 4
109#define X86_VENDOR_CENTAUR 5 122#define X86_VENDOR_CENTAUR 5
110#define X86_VENDOR_TRANSMETA 7 123#define X86_VENDOR_TRANSMETA 7
111#define X86_VENDOR_NSC 8 124#define X86_VENDOR_NSC 8
112#define X86_VENDOR_NUM 9 125#define X86_VENDOR_NUM 9
113#define X86_VENDOR_UNKNOWN 0xff 126
127#define X86_VENDOR_UNKNOWN 0xff
114 128
115/* 129/*
116 * capabilities of CPUs 130 * capabilities of CPUs
117 */ 131 */
118extern struct cpuinfo_x86 boot_cpu_data; 132extern struct cpuinfo_x86 boot_cpu_data;
119extern struct cpuinfo_x86 new_cpu_data; 133extern struct cpuinfo_x86 new_cpu_data;
120extern struct tss_struct doublefault_tss; 134
121extern __u32 cleared_cpu_caps[NCAPINTS]; 135extern struct tss_struct doublefault_tss;
136extern __u32 cleared_cpu_caps[NCAPINTS];
122 137
123#ifdef CONFIG_SMP 138#ifdef CONFIG_SMP
124DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 139DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@@ -129,7 +144,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
129#define current_cpu_data boot_cpu_data 144#define current_cpu_data boot_cpu_data
130#endif 145#endif
131 146
132void cpu_detect(struct cpuinfo_x86 *c); 147static inline int hlt_works(int cpu)
148{
149#ifdef CONFIG_X86_32
150 return cpu_data(cpu).hlt_works_ok;
151#else
152 return 1;
153#endif
154}
155
156#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
157
158extern void cpu_detect(struct cpuinfo_x86 *c);
133 159
134extern void identify_cpu(struct cpuinfo_x86 *); 160extern void identify_cpu(struct cpuinfo_x86 *);
135extern void identify_boot_cpu(void); 161extern void identify_boot_cpu(void);
@@ -146,15 +172,15 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
146#endif 172#endif
147 173
148static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 174static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
149 unsigned int *ecx, unsigned int *edx) 175 unsigned int *ecx, unsigned int *edx)
150{ 176{
151 /* ecx is often an input as well as an output. */ 177 /* ecx is often an input as well as an output. */
152 __asm__("cpuid" 178 asm("cpuid"
153 : "=a" (*eax), 179 : "=a" (*eax),
154 "=b" (*ebx), 180 "=b" (*ebx),
155 "=c" (*ecx), 181 "=c" (*ecx),
156 "=d" (*edx) 182 "=d" (*edx)
157 : "0" (*eax), "2" (*ecx)); 183 : "0" (*eax), "2" (*ecx));
158} 184}
159 185
160static inline void load_cr3(pgd_t *pgdir) 186static inline void load_cr3(pgd_t *pgdir)
@@ -165,54 +191,67 @@ static inline void load_cr3(pgd_t *pgdir)
165#ifdef CONFIG_X86_32 191#ifdef CONFIG_X86_32
166/* This is the TSS defined by the hardware. */ 192/* This is the TSS defined by the hardware. */
167struct x86_hw_tss { 193struct x86_hw_tss {
168 unsigned short back_link, __blh; 194 unsigned short back_link, __blh;
169 unsigned long sp0; 195 unsigned long sp0;
170 unsigned short ss0, __ss0h; 196 unsigned short ss0, __ss0h;
171 unsigned long sp1; 197 unsigned long sp1;
172 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ 198 /* ss1 caches MSR_IA32_SYSENTER_CS: */
173 unsigned long sp2; 199 unsigned short ss1, __ss1h;
174 unsigned short ss2, __ss2h; 200 unsigned long sp2;
175 unsigned long __cr3; 201 unsigned short ss2, __ss2h;
176 unsigned long ip; 202 unsigned long __cr3;
177 unsigned long flags; 203 unsigned long ip;
178 unsigned long ax, cx, dx, bx; 204 unsigned long flags;
179 unsigned long sp, bp, si, di; 205 unsigned long ax;
180 unsigned short es, __esh; 206 unsigned long cx;
181 unsigned short cs, __csh; 207 unsigned long dx;
182 unsigned short ss, __ssh; 208 unsigned long bx;
183 unsigned short ds, __dsh; 209 unsigned long sp;
184 unsigned short fs, __fsh; 210 unsigned long bp;
185 unsigned short gs, __gsh; 211 unsigned long si;
186 unsigned short ldt, __ldth; 212 unsigned long di;
187 unsigned short trace, io_bitmap_base; 213 unsigned short es, __esh;
214 unsigned short cs, __csh;
215 unsigned short ss, __ssh;
216 unsigned short ds, __dsh;
217 unsigned short fs, __fsh;
218 unsigned short gs, __gsh;
219 unsigned short ldt, __ldth;
220 unsigned short trace;
221 unsigned short io_bitmap_base;
222
188} __attribute__((packed)); 223} __attribute__((packed));
189#else 224#else
190struct x86_hw_tss { 225struct x86_hw_tss {
191 u32 reserved1; 226 u32 reserved1;
192 u64 sp0; 227 u64 sp0;
193 u64 sp1; 228 u64 sp1;
194 u64 sp2; 229 u64 sp2;
195 u64 reserved2; 230 u64 reserved2;
196 u64 ist[7]; 231 u64 ist[7];
197 u32 reserved3; 232 u32 reserved3;
198 u32 reserved4; 233 u32 reserved4;
199 u16 reserved5; 234 u16 reserved5;
200 u16 io_bitmap_base; 235 u16 io_bitmap_base;
236
201} __attribute__((packed)) ____cacheline_aligned; 237} __attribute__((packed)) ____cacheline_aligned;
202#endif 238#endif
203 239
204/* 240/*
205 * Size of io_bitmap. 241 * IO-bitmap sizes:
206 */ 242 */
207#define IO_BITMAP_BITS 65536 243#define IO_BITMAP_BITS 65536
208#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 244#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
209#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 245#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
210#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 246#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
211#define INVALID_IO_BITMAP_OFFSET 0x8000 247#define INVALID_IO_BITMAP_OFFSET 0x8000
212#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 248#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
213 249
214struct tss_struct { 250struct tss_struct {
215 struct x86_hw_tss x86_tss; 251 /*
252 * The hardware state:
253 */
254 struct x86_hw_tss x86_tss;
216 255
217 /* 256 /*
218 * The extra 1 is there because the CPU will access an 257 * The extra 1 is there because the CPU will access an
@@ -220,90 +259,108 @@ struct tss_struct {
220 * bitmap. The extra byte must be all 1 bits, and must 259 * bitmap. The extra byte must be all 1 bits, and must
221 * be within the limit. 260 * be within the limit.
222 */ 261 */
223 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 262 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
224 /* 263 /*
225 * Cache the current maximum and the last task that used the bitmap: 264 * Cache the current maximum and the last task that used the bitmap:
226 */ 265 */
227 unsigned long io_bitmap_max; 266 unsigned long io_bitmap_max;
228 struct thread_struct *io_bitmap_owner; 267 struct thread_struct *io_bitmap_owner;
268
229 /* 269 /*
230 * pads the TSS to be cacheline-aligned (size is 0x100) 270 * Pad the TSS to be cacheline-aligned (size is 0x100):
231 */ 271 */
232 unsigned long __cacheline_filler[35]; 272 unsigned long __cacheline_filler[35];
233 /* 273 /*
234 * .. and then another 0x100 bytes for emergency kernel stack 274 * .. and then another 0x100 bytes for the emergency kernel stack:
235 */ 275 */
236 unsigned long stack[64]; 276 unsigned long stack[64];
277
237} __attribute__((packed)); 278} __attribute__((packed));
238 279
239DECLARE_PER_CPU(struct tss_struct, init_tss); 280DECLARE_PER_CPU(struct tss_struct, init_tss);
240 281
241/* Save the original ist values for checking stack pointers during debugging */ 282/*
283 * Save the original ist values for checking stack pointers during debugging
284 */
242struct orig_ist { 285struct orig_ist {
243 unsigned long ist[7]; 286 unsigned long ist[7];
244}; 287};
245 288
246#define MXCSR_DEFAULT 0x1f80 289#define MXCSR_DEFAULT 0x1f80
247 290
248struct i387_fsave_struct { 291struct i387_fsave_struct {
249 u32 cwd; 292 u32 cwd; /* FPU Control Word */
250 u32 swd; 293 u32 swd; /* FPU Status Word */
251 u32 twd; 294 u32 twd; /* FPU Tag Word */
252 u32 fip; 295 u32 fip; /* FPU IP Offset */
253 u32 fcs; 296 u32 fcs; /* FPU IP Selector */
254 u32 foo; 297 u32 foo; /* FPU Operand Pointer Offset */
255 u32 fos; 298 u32 fos; /* FPU Operand Pointer Selector */
256 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ 299
257 u32 status; /* software status information */ 300 /* 8*10 bytes for each FP-reg = 80 bytes: */
301 u32 st_space[20];
302
303 /* Software status information [not touched by FSAVE ]: */
304 u32 status;
258}; 305};
259 306
260struct i387_fxsave_struct { 307struct i387_fxsave_struct {
261 u16 cwd; 308 u16 cwd; /* Control Word */
262 u16 swd; 309 u16 swd; /* Status Word */
263 u16 twd; 310 u16 twd; /* Tag Word */
264 u16 fop; 311 u16 fop; /* Last Instruction Opcode */
265 union { 312 union {
266 struct { 313 struct {
267 u64 rip; 314 u64 rip; /* Instruction Pointer */
268 u64 rdp; 315 u64 rdp; /* Data Pointer */
269 }; 316 };
270 struct { 317 struct {
271 u32 fip; 318 u32 fip; /* FPU IP Offset */
272 u32 fcs; 319 u32 fcs; /* FPU IP Selector */
273 u32 foo; 320 u32 foo; /* FPU Operand Offset */
274 u32 fos; 321 u32 fos; /* FPU Operand Selector */
275 }; 322 };
276 }; 323 };
277 u32 mxcsr; 324 u32 mxcsr; /* MXCSR Register State */
278 u32 mxcsr_mask; 325 u32 mxcsr_mask; /* MXCSR Mask */
279 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ 326
280 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ 327 /* 8*16 bytes for each FP-reg = 128 bytes: */
281 u32 padding[24]; 328 u32 st_space[32];
329
330 /* 16*16 bytes for each XMM-reg = 256 bytes: */
331 u32 xmm_space[64];
332
333 u32 padding[24];
334
282} __attribute__((aligned(16))); 335} __attribute__((aligned(16)));
283 336
284struct i387_soft_struct { 337struct i387_soft_struct {
285 u32 cwd; 338 u32 cwd;
286 u32 swd; 339 u32 swd;
287 u32 twd; 340 u32 twd;
288 u32 fip; 341 u32 fip;
289 u32 fcs; 342 u32 fcs;
290 u32 foo; 343 u32 foo;
291 u32 fos; 344 u32 fos;
292 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ 345 /* 8*10 bytes for each FP-reg = 80 bytes: */
293 u8 ftop, changed, lookahead, no_update, rm, alimit; 346 u32 st_space[20];
294 struct info *info; 347 u8 ftop;
295 u32 entry_eip; 348 u8 changed;
349 u8 lookahead;
350 u8 no_update;
351 u8 rm;
352 u8 alimit;
353 struct info *info;
354 u32 entry_eip;
296}; 355};
297 356
298union i387_union { 357union i387_union {
299 struct i387_fsave_struct fsave; 358 struct i387_fsave_struct fsave;
300 struct i387_fxsave_struct fxsave; 359 struct i387_fxsave_struct fxsave;
301 struct i387_soft_struct soft; 360 struct i387_soft_struct soft;
302}; 361};
303 362
304#ifdef CONFIG_X86_32 363#ifdef CONFIG_X86_64
305DECLARE_PER_CPU(u8, cpu_llc_id);
306#else
307DECLARE_PER_CPU(struct orig_ist, orig_ist); 364DECLARE_PER_CPU(struct orig_ist, orig_ist);
308#endif 365#endif
309 366
@@ -313,42 +370,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
313extern unsigned short num_cache_leaves; 370extern unsigned short num_cache_leaves;
314 371
315struct thread_struct { 372struct thread_struct {
316/* cached TLS descriptors. */ 373 /* Cached TLS descriptors: */
317 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 374 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
318 unsigned long sp0; 375 unsigned long sp0;
319 unsigned long sp; 376 unsigned long sp;
320#ifdef CONFIG_X86_32 377#ifdef CONFIG_X86_32
321 unsigned long sysenter_cs; 378 unsigned long sysenter_cs;
322#else 379#else
323 unsigned long usersp; /* Copy from PDA */ 380 unsigned long usersp; /* Copy from PDA */
324 unsigned short es, ds, fsindex, gsindex; 381 unsigned short es;
382 unsigned short ds;
383 unsigned short fsindex;
384 unsigned short gsindex;
325#endif 385#endif
326 unsigned long ip; 386 unsigned long ip;
327 unsigned long fs; 387 unsigned long fs;
328 unsigned long gs; 388 unsigned long gs;
329/* Hardware debugging registers */ 389 /* Hardware debugging registers: */
330 unsigned long debugreg0; 390 unsigned long debugreg0;
331 unsigned long debugreg1; 391 unsigned long debugreg1;
332 unsigned long debugreg2; 392 unsigned long debugreg2;
333 unsigned long debugreg3; 393 unsigned long debugreg3;
334 unsigned long debugreg6; 394 unsigned long debugreg6;
335 unsigned long debugreg7; 395 unsigned long debugreg7;
336/* fault info */ 396 /* Fault info: */
337 unsigned long cr2, trap_no, error_code; 397 unsigned long cr2;
338/* floating point info */ 398 unsigned long trap_no;
399 unsigned long error_code;
400 /* Floating point info: */
339 union i387_union i387 __attribute__((aligned(16)));; 401 union i387_union i387 __attribute__((aligned(16)));;
340#ifdef CONFIG_X86_32 402#ifdef CONFIG_X86_32
341/* virtual 86 mode info */ 403 /* Virtual 86 mode info */
342 struct vm86_struct __user *vm86_info; 404 struct vm86_struct __user *vm86_info;
343 unsigned long screen_bitmap; 405 unsigned long screen_bitmap;
344 unsigned long v86flags, v86mask, saved_sp0; 406 unsigned long v86flags;
345 unsigned int saved_fs, saved_gs; 407 unsigned long v86mask;
408 unsigned long saved_sp0;
409 unsigned int saved_fs;
410 unsigned int saved_gs;
346#endif 411#endif
347/* IO permissions */ 412 /* IO permissions: */
348 unsigned long *io_bitmap_ptr; 413 unsigned long *io_bitmap_ptr;
349 unsigned long iopl; 414 unsigned long iopl;
350/* max allowed port in the bitmap, in bytes: */ 415 /* Max allowed port in the bitmap, in bytes: */
351 unsigned io_bitmap_max; 416 unsigned io_bitmap_max;
352/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 417/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
353 unsigned long debugctlmsr; 418 unsigned long debugctlmsr;
354/* Debug Store - if not 0 points to a DS Save Area configuration; 419/* Debug Store - if not 0 points to a DS Save Area configuration;
@@ -358,21 +423,27 @@ struct thread_struct {
358 423
359static inline unsigned long native_get_debugreg(int regno) 424static inline unsigned long native_get_debugreg(int regno)
360{ 425{
361 unsigned long val = 0; /* Damn you, gcc! */ 426 unsigned long val = 0; /* Damn you, gcc! */
362 427
363 switch (regno) { 428 switch (regno) {
364 case 0: 429 case 0:
365 asm("mov %%db0, %0" :"=r" (val)); break; 430 asm("mov %%db0, %0" :"=r" (val));
431 break;
366 case 1: 432 case 1:
367 asm("mov %%db1, %0" :"=r" (val)); break; 433 asm("mov %%db1, %0" :"=r" (val));
434 break;
368 case 2: 435 case 2:
369 asm("mov %%db2, %0" :"=r" (val)); break; 436 asm("mov %%db2, %0" :"=r" (val));
437 break;
370 case 3: 438 case 3:
371 asm("mov %%db3, %0" :"=r" (val)); break; 439 asm("mov %%db3, %0" :"=r" (val));
440 break;
372 case 6: 441 case 6:
373 asm("mov %%db6, %0" :"=r" (val)); break; 442 asm("mov %%db6, %0" :"=r" (val));
443 break;
374 case 7: 444 case 7:
375 asm("mov %%db7, %0" :"=r" (val)); break; 445 asm("mov %%db7, %0" :"=r" (val));
446 break;
376 default: 447 default:
377 BUG(); 448 BUG();
378 } 449 }
@@ -383,22 +454,22 @@ static inline void native_set_debugreg(int regno, unsigned long value)
383{ 454{
384 switch (regno) { 455 switch (regno) {
385 case 0: 456 case 0:
386 asm("mov %0,%%db0" : /* no output */ :"r" (value)); 457 asm("mov %0, %%db0" ::"r" (value));
387 break; 458 break;
388 case 1: 459 case 1:
389 asm("mov %0,%%db1" : /* no output */ :"r" (value)); 460 asm("mov %0, %%db1" ::"r" (value));
390 break; 461 break;
391 case 2: 462 case 2:
392 asm("mov %0,%%db2" : /* no output */ :"r" (value)); 463 asm("mov %0, %%db2" ::"r" (value));
393 break; 464 break;
394 case 3: 465 case 3:
395 asm("mov %0,%%db3" : /* no output */ :"r" (value)); 466 asm("mov %0, %%db3" ::"r" (value));
396 break; 467 break;
397 case 6: 468 case 6:
398 asm("mov %0,%%db6" : /* no output */ :"r" (value)); 469 asm("mov %0, %%db6" ::"r" (value));
399 break; 470 break;
400 case 7: 471 case 7:
401 asm("mov %0,%%db7" : /* no output */ :"r" (value)); 472 asm("mov %0, %%db7" ::"r" (value));
402 break; 473 break;
403 default: 474 default:
404 BUG(); 475 BUG();
@@ -412,23 +483,24 @@ static inline void native_set_iopl_mask(unsigned mask)
412{ 483{
413#ifdef CONFIG_X86_32 484#ifdef CONFIG_X86_32
414 unsigned int reg; 485 unsigned int reg;
415 __asm__ __volatile__ ("pushfl;" 486
416 "popl %0;" 487 asm volatile ("pushfl;"
417 "andl %1, %0;" 488 "popl %0;"
418 "orl %2, %0;" 489 "andl %1, %0;"
419 "pushl %0;" 490 "orl %2, %0;"
420 "popfl" 491 "pushl %0;"
421 : "=&r" (reg) 492 "popfl"
422 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 493 : "=&r" (reg)
494 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
423#endif 495#endif
424} 496}
425 497
426static inline void native_load_sp0(struct tss_struct *tss, 498static inline void
427 struct thread_struct *thread) 499native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
428{ 500{
429 tss->x86_tss.sp0 = thread->sp0; 501 tss->x86_tss.sp0 = thread->sp0;
430#ifdef CONFIG_X86_32 502#ifdef CONFIG_X86_32
431 /* Only happens when SEP is enabled, no need to test "SEP"arately */ 503 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
432 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 504 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
433 tss->x86_tss.ss1 = thread->sysenter_cs; 505 tss->x86_tss.ss1 = thread->sysenter_cs;
434 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 506 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
@@ -446,8 +518,8 @@ static inline void native_swapgs(void)
446#ifdef CONFIG_PARAVIRT 518#ifdef CONFIG_PARAVIRT
447#include <asm/paravirt.h> 519#include <asm/paravirt.h>
448#else 520#else
449#define __cpuid native_cpuid 521#define __cpuid native_cpuid
450#define paravirt_enabled() 0 522#define paravirt_enabled() 0
451 523
452/* 524/*
453 * These special macros can be used to get or set a debugging register 525 * These special macros can be used to get or set a debugging register
@@ -473,11 +545,12 @@ static inline void load_sp0(struct tss_struct *tss,
473 * enable), so that any CPU's that boot up 545 * enable), so that any CPU's that boot up
474 * after us can get the correct flags. 546 * after us can get the correct flags.
475 */ 547 */
476extern unsigned long mmu_cr4_features; 548extern unsigned long mmu_cr4_features;
477 549
478static inline void set_in_cr4(unsigned long mask) 550static inline void set_in_cr4(unsigned long mask)
479{ 551{
480 unsigned cr4; 552 unsigned cr4;
553
481 mmu_cr4_features |= mask; 554 mmu_cr4_features |= mask;
482 cr4 = read_cr4(); 555 cr4 = read_cr4();
483 cr4 |= mask; 556 cr4 |= mask;
@@ -487,6 +560,7 @@ static inline void set_in_cr4(unsigned long mask)
487static inline void clear_in_cr4(unsigned long mask) 560static inline void clear_in_cr4(unsigned long mask)
488{ 561{
489 unsigned cr4; 562 unsigned cr4;
563
490 mmu_cr4_features &= ~mask; 564 mmu_cr4_features &= ~mask;
491 cr4 = read_cr4(); 565 cr4 = read_cr4();
492 cr4 &= ~mask; 566 cr4 &= ~mask;
@@ -494,42 +568,42 @@ static inline void clear_in_cr4(unsigned long mask)
494} 568}
495 569
496struct microcode_header { 570struct microcode_header {
497 unsigned int hdrver; 571 unsigned int hdrver;
498 unsigned int rev; 572 unsigned int rev;
499 unsigned int date; 573 unsigned int date;
500 unsigned int sig; 574 unsigned int sig;
501 unsigned int cksum; 575 unsigned int cksum;
502 unsigned int ldrver; 576 unsigned int ldrver;
503 unsigned int pf; 577 unsigned int pf;
504 unsigned int datasize; 578 unsigned int datasize;
505 unsigned int totalsize; 579 unsigned int totalsize;
506 unsigned int reserved[3]; 580 unsigned int reserved[3];
507}; 581};
508 582
509struct microcode { 583struct microcode {
510 struct microcode_header hdr; 584 struct microcode_header hdr;
511 unsigned int bits[0]; 585 unsigned int bits[0];
512}; 586};
513 587
514typedef struct microcode microcode_t; 588typedef struct microcode microcode_t;
515typedef struct microcode_header microcode_header_t; 589typedef struct microcode_header microcode_header_t;
516 590
517/* microcode format is extended from prescott processors */ 591/* microcode format is extended from prescott processors */
518struct extended_signature { 592struct extended_signature {
519 unsigned int sig; 593 unsigned int sig;
520 unsigned int pf; 594 unsigned int pf;
521 unsigned int cksum; 595 unsigned int cksum;
522}; 596};
523 597
524struct extended_sigtable { 598struct extended_sigtable {
525 unsigned int count; 599 unsigned int count;
526 unsigned int cksum; 600 unsigned int cksum;
527 unsigned int reserved[3]; 601 unsigned int reserved[3];
528 struct extended_signature sigs[0]; 602 struct extended_signature sigs[0];
529}; 603};
530 604
531typedef struct { 605typedef struct {
532 unsigned long seg; 606 unsigned long seg;
533} mm_segment_t; 607} mm_segment_t;
534 608
535 609
@@ -541,7 +615,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
541/* Free all resources held by a thread. */ 615/* Free all resources held by a thread. */
542extern void release_thread(struct task_struct *); 616extern void release_thread(struct task_struct *);
543 617
544/* Prepare to copy thread state - unlazy all lazy status */ 618/* Prepare to copy thread state - unlazy all lazy state */
545extern void prepare_to_copy(struct task_struct *tsk); 619extern void prepare_to_copy(struct task_struct *tsk);
546 620
547unsigned long get_wchan(struct task_struct *p); 621unsigned long get_wchan(struct task_struct *p);
@@ -578,118 +652,137 @@ static inline unsigned int cpuid_eax(unsigned int op)
578 unsigned int eax, ebx, ecx, edx; 652 unsigned int eax, ebx, ecx, edx;
579 653
580 cpuid(op, &eax, &ebx, &ecx, &edx); 654 cpuid(op, &eax, &ebx, &ecx, &edx);
655
581 return eax; 656 return eax;
582} 657}
658
583static inline unsigned int cpuid_ebx(unsigned int op) 659static inline unsigned int cpuid_ebx(unsigned int op)
584{ 660{
585 unsigned int eax, ebx, ecx, edx; 661 unsigned int eax, ebx, ecx, edx;
586 662
587 cpuid(op, &eax, &ebx, &ecx, &edx); 663 cpuid(op, &eax, &ebx, &ecx, &edx);
664
588 return ebx; 665 return ebx;
589} 666}
667
590static inline unsigned int cpuid_ecx(unsigned int op) 668static inline unsigned int cpuid_ecx(unsigned int op)
591{ 669{
592 unsigned int eax, ebx, ecx, edx; 670 unsigned int eax, ebx, ecx, edx;
593 671
594 cpuid(op, &eax, &ebx, &ecx, &edx); 672 cpuid(op, &eax, &ebx, &ecx, &edx);
673
595 return ecx; 674 return ecx;
596} 675}
676
597static inline unsigned int cpuid_edx(unsigned int op) 677static inline unsigned int cpuid_edx(unsigned int op)
598{ 678{
599 unsigned int eax, ebx, ecx, edx; 679 unsigned int eax, ebx, ecx, edx;
600 680
601 cpuid(op, &eax, &ebx, &ecx, &edx); 681 cpuid(op, &eax, &ebx, &ecx, &edx);
682
602 return edx; 683 return edx;
603} 684}
604 685
605/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 686/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
606static inline void rep_nop(void) 687static inline void rep_nop(void)
607{ 688{
608 __asm__ __volatile__("rep;nop": : :"memory"); 689 asm volatile("rep; nop" ::: "memory");
609} 690}
610 691
611/* Stop speculative execution */ 692static inline void cpu_relax(void)
693{
694 rep_nop();
695}
696
697/* Stop speculative execution: */
612static inline void sync_core(void) 698static inline void sync_core(void)
613{ 699{
614 int tmp; 700 int tmp;
701
615 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 702 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
616 : "ebx", "ecx", "edx", "memory"); 703 : "ebx", "ecx", "edx", "memory");
617} 704}
618 705
619#define cpu_relax() rep_nop()
620
621static inline void __monitor(const void *eax, unsigned long ecx, 706static inline void __monitor(const void *eax, unsigned long ecx,
622 unsigned long edx) 707 unsigned long edx)
623{ 708{
624 /* "monitor %eax,%ecx,%edx;" */ 709 /* "monitor %eax, %ecx, %edx;" */
625 asm volatile( 710 asm volatile(".byte 0x0f, 0x01, 0xc8;"
626 ".byte 0x0f,0x01,0xc8;" 711 :: "a" (eax), "c" (ecx), "d"(edx));
627 : :"a" (eax), "c" (ecx), "d"(edx));
628} 712}
629 713
630static inline void __mwait(unsigned long eax, unsigned long ecx) 714static inline void __mwait(unsigned long eax, unsigned long ecx)
631{ 715{
632 /* "mwait %eax,%ecx;" */ 716 /* "mwait %eax, %ecx;" */
633 asm volatile( 717 asm volatile(".byte 0x0f, 0x01, 0xc9;"
634 ".byte 0x0f,0x01,0xc9;" 718 :: "a" (eax), "c" (ecx));
635 : :"a" (eax), "c" (ecx));
636} 719}
637 720
638static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 721static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
639{ 722{
640 /* "mwait %eax,%ecx;" */ 723 /* "mwait %eax, %ecx;" */
641 asm volatile( 724 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
642 "sti; .byte 0x0f,0x01,0xc9;" 725 :: "a" (eax), "c" (ecx));
643 : :"a" (eax), "c" (ecx));
644} 726}
645 727
646extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 728extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
647 729
648extern int force_mwait; 730extern int force_mwait;
649 731
650extern void select_idle_routine(const struct cpuinfo_x86 *c); 732extern void select_idle_routine(const struct cpuinfo_x86 *c);
651 733
652extern unsigned long boot_option_idle_override; 734extern unsigned long boot_option_idle_override;
653 735
654extern void enable_sep_cpu(void); 736extern void enable_sep_cpu(void);
655extern int sysenter_setup(void); 737extern int sysenter_setup(void);
656 738
657/* Defined in head.S */ 739/* Defined in head.S */
658extern struct desc_ptr early_gdt_descr; 740extern struct desc_ptr early_gdt_descr;
659 741
660extern void cpu_set_gdt(int); 742extern void cpu_set_gdt(int);
661extern void switch_to_new_gdt(void); 743extern void switch_to_new_gdt(void);
662extern void cpu_init(void); 744extern void cpu_init(void);
663extern void init_gdt(int cpu); 745extern void init_gdt(int cpu);
664 746
665/* from system description table in BIOS. Mostly for MCA use, but 747static inline void update_debugctlmsr(unsigned long debugctlmsr)
666 * others may find it useful. */ 748{
667extern unsigned int machine_id; 749#ifndef CONFIG_X86_DEBUGCTLMSR
668extern unsigned int machine_submodel_id; 750 if (boot_cpu_data.x86 < 6)
669extern unsigned int BIOS_revision; 751 return;
752#endif
753 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
754}
670 755
671/* Boot loader type from the setup header */ 756/*
672extern int bootloader_type; 757 * from system description table in BIOS. Mostly for MCA use, but
758 * others may find it useful:
759 */
760extern unsigned int machine_id;
761extern unsigned int machine_submodel_id;
762extern unsigned int BIOS_revision;
763
764/* Boot loader type from the setup header: */
765extern int bootloader_type;
673 766
674extern char ignore_fpu_irq; 767extern char ignore_fpu_irq;
675#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
676 768
677#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 769#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
678#define ARCH_HAS_PREFETCHW 770#define ARCH_HAS_PREFETCHW
679#define ARCH_HAS_SPINLOCK_PREFETCH 771#define ARCH_HAS_SPINLOCK_PREFETCH
680 772
681#ifdef CONFIG_X86_32 773#ifdef CONFIG_X86_32
682#define BASE_PREFETCH ASM_NOP4 774# define BASE_PREFETCH ASM_NOP4
683#define ARCH_HAS_PREFETCH 775# define ARCH_HAS_PREFETCH
684#else 776#else
685#define BASE_PREFETCH "prefetcht0 (%1)" 777# define BASE_PREFETCH "prefetcht0 (%1)"
686#endif 778#endif
687 779
688/* Prefetch instructions for Pentium III and AMD Athlon */ 780/*
689/* It's not worth to care about 3dnow! prefetches for the K6 781 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
690 because they are microcoded there and very slow. 782 *
691 However we don't do prefetches for pre XP Athlons currently 783 * It's not worth to care about 3dnow prefetches for the K6
692 That should be fixed. */ 784 * because they are microcoded there and very slow.
785 */
693static inline void prefetch(const void *x) 786static inline void prefetch(const void *x)
694{ 787{
695 alternative_input(BASE_PREFETCH, 788 alternative_input(BASE_PREFETCH,
@@ -698,8 +791,11 @@ static inline void prefetch(const void *x)
698 "r" (x)); 791 "r" (x));
699} 792}
700 793
701/* 3dnow! prefetch to get an exclusive cache line. Useful for 794/*
702 spinlocks to avoid one state transition in the cache coherency protocol. */ 795 * 3dnow prefetch to get an exclusive cache line.
796 * Useful for spinlocks to avoid one state transition in the
797 * cache coherency protocol:
798 */
703static inline void prefetchw(const void *x) 799static inline void prefetchw(const void *x)
704{ 800{
705 alternative_input(BASE_PREFETCH, 801 alternative_input(BASE_PREFETCH,
@@ -708,21 +804,25 @@ static inline void prefetchw(const void *x)
708 "r" (x)); 804 "r" (x));
709} 805}
710 806
711#define spin_lock_prefetch(x) prefetchw(x) 807static inline void spin_lock_prefetch(const void *x)
808{
809 prefetchw(x);
810}
811
712#ifdef CONFIG_X86_32 812#ifdef CONFIG_X86_32
713/* 813/*
714 * User space process size: 3GB (default). 814 * User space process size: 3GB (default).
715 */ 815 */
716#define TASK_SIZE (PAGE_OFFSET) 816#define TASK_SIZE PAGE_OFFSET
717#define STACK_TOP TASK_SIZE 817#define STACK_TOP TASK_SIZE
718#define STACK_TOP_MAX STACK_TOP 818#define STACK_TOP_MAX STACK_TOP
719 819
720#define INIT_THREAD { \ 820#define INIT_THREAD { \
721 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 821 .sp0 = sizeof(init_stack) + (long)&init_stack, \
722 .vm86_info = NULL, \ 822 .vm86_info = NULL, \
723 .sysenter_cs = __KERNEL_CS, \ 823 .sysenter_cs = __KERNEL_CS, \
724 .io_bitmap_ptr = NULL, \ 824 .io_bitmap_ptr = NULL, \
725 .fs = __KERNEL_PERCPU, \ 825 .fs = __KERNEL_PERCPU, \
726} 826}
727 827
728/* 828/*
@@ -731,28 +831,15 @@ static inline void prefetchw(const void *x)
731 * permission bitmap. The extra byte must be all 1 bits, and must 831 * permission bitmap. The extra byte must be all 1 bits, and must
732 * be within the limit. 832 * be within the limit.
733 */ 833 */
734#define INIT_TSS { \ 834#define INIT_TSS { \
735 .x86_tss = { \ 835 .x86_tss = { \
736 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 836 .sp0 = sizeof(init_stack) + (long)&init_stack, \
737 .ss0 = __KERNEL_DS, \ 837 .ss0 = __KERNEL_DS, \
738 .ss1 = __KERNEL_CS, \ 838 .ss1 = __KERNEL_CS, \
739 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 839 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
740 }, \ 840 }, \
741 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 841 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
742} 842}
743
744#define start_thread(regs, new_eip, new_esp) do { \
745 __asm__("movl %0,%%gs": :"r" (0)); \
746 regs->fs = 0; \
747 set_fs(USER_DS); \
748 regs->ds = __USER_DS; \
749 regs->es = __USER_DS; \
750 regs->ss = __USER_DS; \
751 regs->cs = __USER_CS; \
752 regs->ip = new_eip; \
753 regs->sp = new_esp; \
754} while (0)
755
756 843
757extern unsigned long thread_saved_pc(struct task_struct *tsk); 844extern unsigned long thread_saved_pc(struct task_struct *tsk);
758 845
@@ -780,24 +867,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
780 __regs__ - 1; \ 867 __regs__ - 1; \
781}) 868})
782 869
783#define KSTK_ESP(task) (task_pt_regs(task)->sp) 870#define KSTK_ESP(task) (task_pt_regs(task)->sp)
784 871
785#else 872#else
786/* 873/*
787 * User space process size. 47bits minus one guard page. 874 * User space process size. 47bits minus one guard page.
788 */ 875 */
789#define TASK_SIZE64 (0x800000000000UL - 4096) 876#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
790 877
791/* This decides where the kernel will search for a free chunk of vm 878/* This decides where the kernel will search for a free chunk of vm
792 * space during mmap's. 879 * space during mmap's.
793 */ 880 */
794#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 881#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
795 0xc0000000 : 0xFFFFe000) 882 0xc0000000 : 0xFFFFe000)
796 883
797#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 884#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
798 IA32_PAGE_OFFSET : TASK_SIZE64) 885 IA32_PAGE_OFFSET : TASK_SIZE64)
799#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 886#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
800 IA32_PAGE_OFFSET : TASK_SIZE64) 887 IA32_PAGE_OFFSET : TASK_SIZE64)
801 888
802#define STACK_TOP TASK_SIZE 889#define STACK_TOP TASK_SIZE
803#define STACK_TOP_MAX TASK_SIZE64 890#define STACK_TOP_MAX TASK_SIZE64
@@ -810,33 +897,25 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
810 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 897 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
811} 898}
812 899
813#define start_thread(regs, new_rip, new_rsp) do { \
814 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
815 load_gs_index(0); \
816 (regs)->ip = (new_rip); \
817 (regs)->sp = (new_rsp); \
818 write_pda(oldrsp, (new_rsp)); \
819 (regs)->cs = __USER_CS; \
820 (regs)->ss = __USER_DS; \
821 (regs)->flags = 0x200; \
822 set_fs(USER_DS); \
823} while (0)
824
825/* 900/*
826 * Return saved PC of a blocked thread. 901 * Return saved PC of a blocked thread.
827 * What is this good for? it will be always the scheduler or ret_from_fork. 902 * What is this good for? it will be always the scheduler or ret_from_fork.
828 */ 903 */
829#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 904#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
830 905
831#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 906#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
832#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 907#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
833#endif /* CONFIG_X86_64 */ 908#endif /* CONFIG_X86_64 */
834 909
835/* This decides where the kernel will search for a free chunk of vm 910extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
911 unsigned long new_sp);
912
913/*
914 * This decides where the kernel will search for a free chunk of vm
836 * space during mmap's. 915 * space during mmap's.
837 */ 916 */
838#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 917#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
839 918
840#define KSTK_EIP(task) (task_pt_regs(task)->ip) 919#define KSTK_EIP(task) (task_pt_regs(task)->ip)
841 920
842#endif 921#endif