aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2006-09-26 04:52:35 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:35 -0400
commit522e93e3fcdbf00ba85c72fde6df28cfc0486a65 (patch)
treec6d1d0fd3109a667ca4ee0c0f8dc8a2fe4767240 /include/asm-i386
parent02ba1a32dbd3d406530a17a2643a8f0f8cbf3acc (diff)
[PATCH] i386: Descriptor and trap table cleanups.
The implementation comes from Zach's [RFC, PATCH 10/24] i386 Vmi descriptor changes: Descriptor and trap table cleanups. Add cleanly written accessors for IDT and GDT gates so the subarch may override them. Note that this allows the hypervisor to transparently tweak the DPL of the descriptors as well as the RPL of segments in those descriptors, with no unnecessary kernel code modification. It also allows the hypervisor implementation of the VMI to tweak the gates, allowing for custom exception frames or extra layers of indirection above the guest fault / IRQ handlers. Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/desc.h121
1 files changed, 76 insertions, 45 deletions
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 89b8b82c82b3..5db9e96e8dc1 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -33,50 +33,99 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
33 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; 33 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
34} 34}
35 35
36/*
37 * This is the ldt that every process will get unless we need
38 * something other than this.
39 */
40extern struct desc_struct default_ldt[];
41extern struct desc_struct idt_table[];
42extern void set_intr_gate(unsigned int irq, void * addr);
43
44static inline void pack_descriptor(__u32 *a, __u32 *b,
45 unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
46{
47 *a = ((base & 0xffff) << 16) | (limit & 0xffff);
48 *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
49 ((type & 0xff) << 8) | ((flags & 0xf) << 12);
50}
51
52static inline void pack_gate(__u32 *a, __u32 *b,
53 unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
54{
55 *a = (seg << 16) | (base & 0xffff);
56 *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
57}
58
59#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */
60#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */
61#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */
62#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */
63#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */
64#define DESCTYPE_DPL3 0x60 /* DPL-3 */
65#define DESCTYPE_S 0x10 /* !system */
66
36#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 67#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
37#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) 68#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
38 69
39#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 70#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
40#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) 71#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
41#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) 72#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
42#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) 73#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
43 74
44#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) 75#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
45#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) 76#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
46#define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) 77#define store_tr(tr) __asm__ ("str %0":"=m" (tr))
47#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) 78#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
48 79
49/* 80#if TLS_SIZE != 24
50 * This is the ldt that every process will get unless we need 81# error update this code.
51 * something other than this. 82#endif
52 */
53extern struct desc_struct default_ldt[];
54extern void set_intr_gate(unsigned int irq, void * addr);
55 83
56#define _set_tssldt_desc(n,addr,limit,type) \ 84static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
57__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
58 "movw %w1,2(%2)\n\t" \
59 "rorl $16,%1\n\t" \
60 "movb %b1,4(%2)\n\t" \
61 "movb %4,5(%2)\n\t" \
62 "movb $0,6(%2)\n\t" \
63 "movb %h1,7(%2)\n\t" \
64 "rorl $16,%1" \
65 : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
66
67static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
68{ 85{
69 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, 86#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
70 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); 87 C(0); C(1); C(2);
88#undef C
71} 89}
72 90
73#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) 91static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
92{
93 __u32 *lp = (__u32 *)((char *)dt + entry*8);
94 *lp = entry_a;
95 *(lp+1) = entry_b;
96}
97
98#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
99#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
100#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
101
102static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
103{
104 __u32 a, b;
105 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
106 write_idt_entry(idt_table, gate, a, b);
107}
74 108
75static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) 109static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
76{ 110{
77 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); 111 __u32 a, b;
112 pack_descriptor(&a, &b, (unsigned long)addr,
113 offsetof(struct tss_struct, __cacheline_filler) - 1,
114 DESCTYPE_TSS, 0);
115 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
78} 116}
79 117
118static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
119{
120 __u32 a, b;
121 pack_descriptor(&a, &b, (unsigned long)addr,
122 entries * sizeof(struct desc_struct) - 1,
123 DESCTYPE_LDT, 0);
124 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
125}
126
127#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
128
80#define LDT_entry_a(info) \ 129#define LDT_entry_a(info) \
81 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) 130 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
82 131
@@ -102,24 +151,6 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
102 (info)->seg_not_present == 1 && \ 151 (info)->seg_not_present == 1 && \
103 (info)->useable == 0 ) 152 (info)->useable == 0 )
104 153
105static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
106{
107 __u32 *lp = (__u32 *)((char *)ldt + entry*8);
108 *lp = entry_a;
109 *(lp+1) = entry_b;
110}
111
112#if TLS_SIZE != 24
113# error update this code.
114#endif
115
116static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
117{
118#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
119 C(0); C(1); C(2);
120#undef C
121}
122
123static inline void clear_LDT(void) 154static inline void clear_LDT(void)
124{ 155{
125 int cpu = get_cpu(); 156 int cpu = get_cpu();