aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-05-02 13:27:13 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:13 -0400
commita75c54f933bd8db9f4a609bd128663c179b3e6a1 (patch)
tree8b7dd866185bec34146eb537f057b6b496c78443
parent82d1bb725e128c97b362a4b33fcbfff08fdaaa5a (diff)
[PATCH] i386: i386 separate hardware-defined TSS from Linux additions
On Thu, 2007-03-29 at 13:16 +0200, Andi Kleen wrote: > Please clean it up properly with two structs. Not sure about this, now I've done it. Running it here. If you like it, I can do x86-64 as well. == lguest defines its own TSS struct because the "struct tss_struct" contains linux-specific additions. Andi asked me to split the struct in processor.h. Unfortunately it makes usage a little awkward. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/i386/kernel/asm-offsets.c2
-rw-r--r--arch/i386/kernel/doublefault.c29
-rw-r--r--arch/i386/kernel/ioport.c2
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/sysenter.c6
-rw-r--r--arch/i386/kernel/traps.c4
-rw-r--r--arch/i386/kernel/vmi.c8
-rw-r--r--include/asm-i386/processor.h24
8 files changed, 47 insertions, 36 deletions
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 655cc8d4c745..d558adfc293c 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -93,7 +93,7 @@ void foo(void)
93 OFFSET(pbe_next, pbe, next); 93 OFFSET(pbe_next, pbe, next);
94 94
95 /* Offset from the sysenter stack to tss.esp0 */ 95 /* Offset from the sysenter stack to tss.esp0 */
96 DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) - 96 DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
97 sizeof(struct tss_struct)); 97 sizeof(struct tss_struct));
98 98
99 DEFINE(PAGE_SIZE_asm, PAGE_SIZE); 99 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c
index b4d14c2eb345..265c5597efb0 100644
--- a/arch/i386/kernel/doublefault.c
+++ b/arch/i386/kernel/doublefault.c
@@ -33,7 +33,7 @@ static void doublefault_fn(void)
33 printk("double fault, tss at %08lx\n", tss); 33 printk("double fault, tss at %08lx\n", tss);
34 34
35 if (ptr_ok(tss)) { 35 if (ptr_ok(tss)) {
36 struct tss_struct *t = (struct tss_struct *)tss; 36 struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
37 37
38 printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp); 38 printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp);
39 39
@@ -49,18 +49,21 @@ static void doublefault_fn(void)
49} 49}
50 50
51struct tss_struct doublefault_tss __cacheline_aligned = { 51struct tss_struct doublefault_tss __cacheline_aligned = {
52 .esp0 = STACK_START, 52 .x86_tss = {
53 .ss0 = __KERNEL_DS, 53 .esp0 = STACK_START,
54 .ldt = 0, 54 .ss0 = __KERNEL_DS,
55 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, 55 .ldt = 0,
56 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
56 57
57 .eip = (unsigned long) doublefault_fn, 58 .eip = (unsigned long) doublefault_fn,
58 .eflags = X86_EFLAGS_SF | 0x2, /* 0x2 bit is always set */ 59 /* 0x2 bit is always set */
59 .esp = STACK_START, 60 .eflags = X86_EFLAGS_SF | 0x2,
60 .es = __USER_DS, 61 .esp = STACK_START,
61 .cs = __KERNEL_CS, 62 .es = __USER_DS,
62 .ss = __KERNEL_DS, 63 .cs = __KERNEL_CS,
63 .ds = __USER_DS, 64 .ss = __KERNEL_DS,
65 .ds = __USER_DS,
64 66
65 .__cr3 = __pa(swapper_pg_dir) 67 .__cr3 = __pa(swapper_pg_dir)
68 }
66}; 69};
diff --git a/arch/i386/kernel/ioport.c b/arch/i386/kernel/ioport.c
index 1b4530e6cd82..d1e42e0dbe67 100644
--- a/arch/i386/kernel/ioport.c
+++ b/arch/i386/kernel/ioport.c
@@ -114,7 +114,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
114 * Reset the owner so that a process switch will not set 114 * Reset the owner so that a process switch will not set
115 * tss->io_bitmap_base to IO_BITMAP_OFFSET. 115 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
116 */ 116 */
117 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 117 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
118 tss->io_bitmap_owner = NULL; 118 tss->io_bitmap_owner = NULL;
119 119
120 put_cpu(); 120 put_cpu();
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 7e8e129b3d7d..5fb9524c6f4b 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -375,7 +375,7 @@ void exit_thread(void)
375 t->io_bitmap_max = 0; 375 t->io_bitmap_max = 0;
376 tss->io_bitmap_owner = NULL; 376 tss->io_bitmap_owner = NULL;
377 tss->io_bitmap_max = 0; 377 tss->io_bitmap_max = 0;
378 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 378 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
379 put_cpu(); 379 put_cpu();
380 } 380 }
381} 381}
@@ -554,7 +554,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
554 * Disable the bitmap via an invalid offset. We still cache 554 * Disable the bitmap via an invalid offset. We still cache
555 * the previous bitmap owner and the IO bitmap contents: 555 * the previous bitmap owner and the IO bitmap contents:
556 */ 556 */
557 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 557 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
558 return; 558 return;
559 } 559 }
560 560
@@ -564,7 +564,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
564 * matches the next task, we dont have to do anything but 564 * matches the next task, we dont have to do anything but
565 * to set a valid offset in the TSS: 565 * to set a valid offset in the TSS:
566 */ 566 */
567 tss->io_bitmap_base = IO_BITMAP_OFFSET; 567 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
568 return; 568 return;
569 } 569 }
570 /* 570 /*
@@ -576,7 +576,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
576 * redundant copies when the currently switched task does not 576 * redundant copies when the currently switched task does not
577 * perform any I/O during its timeslice. 577 * perform any I/O during its timeslice.
578 */ 578 */
579 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 579 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
580} 580}
581 581
582/* 582/*
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 0b9768ee1e8d..94defac6fc3d 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -183,10 +183,10 @@ void enable_sep_cpu(void)
183 return; 183 return;
184 } 184 }
185 185
186 tss->ss1 = __KERNEL_CS; 186 tss->x86_tss.ss1 = __KERNEL_CS;
187 tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; 187 tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
188 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); 188 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
189 wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0); 189 wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
190 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0); 190 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
191 put_cpu(); 191 put_cpu();
192} 192}
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 8722444cacaa..e0a23bee6967 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -596,7 +596,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
596 * and we set the offset field correctly. Then we let the CPU to 596 * and we set the offset field correctly. Then we let the CPU to
597 * restart the faulting instruction. 597 * restart the faulting instruction.
598 */ 598 */
599 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && 599 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
600 thread->io_bitmap_ptr) { 600 thread->io_bitmap_ptr) {
601 memcpy(tss->io_bitmap, thread->io_bitmap_ptr, 601 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
602 thread->io_bitmap_max); 602 thread->io_bitmap_max);
@@ -609,7 +609,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
609 thread->io_bitmap_max, 0xff, 609 thread->io_bitmap_max, 0xff,
610 tss->io_bitmap_max - thread->io_bitmap_max); 610 tss->io_bitmap_max - thread->io_bitmap_max);
611 tss->io_bitmap_max = thread->io_bitmap_max; 611 tss->io_bitmap_max = thread->io_bitmap_max;
612 tss->io_bitmap_base = IO_BITMAP_OFFSET; 612 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
613 tss->io_bitmap_owner = thread; 613 tss->io_bitmap_owner = thread;
614 put_cpu(); 614 put_cpu();
615 return; 615 return;
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index 626c82063d19..8f3bac473450 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -230,14 +230,14 @@ static void vmi_set_tr(void)
230static void vmi_load_esp0(struct tss_struct *tss, 230static void vmi_load_esp0(struct tss_struct *tss,
231 struct thread_struct *thread) 231 struct thread_struct *thread)
232{ 232{
233 tss->esp0 = thread->esp0; 233 tss->x86_tss.esp0 = thread->esp0;
234 234
235 /* This can only happen when SEP is enabled, no need to test "SEP"arately */ 235 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
236 if (unlikely(tss->ss1 != thread->sysenter_cs)) { 236 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
237 tss->ss1 = thread->sysenter_cs; 237 tss->x86_tss.ss1 = thread->sysenter_cs;
238 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 238 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
239 } 239 }
240 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->esp0); 240 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0);
241} 241}
242 242
243static void vmi_flush_tlb_user(void) 243static void vmi_flush_tlb_user(void)
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 77e263267aa6..922260474646 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -291,7 +291,8 @@ typedef struct {
291 291
292struct thread_struct; 292struct thread_struct;
293 293
294struct tss_struct { 294/* This is the TSS defined by the hardware. */
295struct i386_hw_tss {
295 unsigned short back_link,__blh; 296 unsigned short back_link,__blh;
296 unsigned long esp0; 297 unsigned long esp0;
297 unsigned short ss0,__ss0h; 298 unsigned short ss0,__ss0h;
@@ -315,6 +316,11 @@ struct tss_struct {
315 unsigned short gs, __gsh; 316 unsigned short gs, __gsh;
316 unsigned short ldt, __ldth; 317 unsigned short ldt, __ldth;
317 unsigned short trace, io_bitmap_base; 318 unsigned short trace, io_bitmap_base;
319} __attribute__((packed));
320
321struct tss_struct {
322 struct i386_hw_tss x86_tss;
323
318 /* 324 /*
319 * The extra 1 is there because the CPU will access an 325 * The extra 1 is there because the CPU will access an
320 * additional byte beyond the end of the IO permission 326 * additional byte beyond the end of the IO permission
@@ -381,10 +387,12 @@ struct thread_struct {
381 * be within the limit. 387 * be within the limit.
382 */ 388 */
383#define INIT_TSS { \ 389#define INIT_TSS { \
384 .esp0 = sizeof(init_stack) + (long)&init_stack, \ 390 .x86_tss = { \
385 .ss0 = __KERNEL_DS, \ 391 .esp0 = sizeof(init_stack) + (long)&init_stack, \
386 .ss1 = __KERNEL_CS, \ 392 .ss0 = __KERNEL_DS, \
387 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 393 .ss1 = __KERNEL_CS, \
394 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
395 }, \
388 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 396 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
389} 397}
390 398
@@ -493,10 +501,10 @@ static inline void rep_nop(void)
493 501
494static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) 502static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
495{ 503{
496 tss->esp0 = thread->esp0; 504 tss->x86_tss.esp0 = thread->esp0;
497 /* This can only happen when SEP is enabled, no need to test "SEP"arately */ 505 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
498 if (unlikely(tss->ss1 != thread->sysenter_cs)) { 506 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
499 tss->ss1 = thread->sysenter_cs; 507 tss->x86_tss.ss1 = thread->sysenter_cs;
500 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 508 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
501 } 509 }
502} 510}