aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/desc.h
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-05-02 13:27:10 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:10 -0400
commit90a0a06aa81692028864c21f981905fda46b1208 (patch)
tree516528b328d5288ee057d1eff5491e2ba1b49af1 /include/asm-i386/desc.h
parent52de74dd3994e165ef1b35c33d54655a6400e30c (diff)
[PATCH] i386: rationalize paravirt wrappers
paravirt.c used to implement native versions of all low-level functions. Far cleaner is to have the native versions exposed in the headers and as inline native_XXX, and if !CONFIG_PARAVIRT, then simply #define XXX native_XXX. There are several nice side effects: 1) write_dt_entry() now takes the correct "struct Xgt_desc_struct *" not "void *". 2) load_TLS is reintroduced to the for loop, not manually unrolled with a #error in case the bounds ever change. 3) Macros become inlines, with type checking. 4) Access to the native versions is trivial for KVM, lguest, Xen and others who might want it. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@muc.de> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/asm-i386/desc.h')
-rw-r--r--include/asm-i386/desc.h82
1 files changed, 56 insertions, 26 deletions
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index a75ae6b97860..13f701ea9a8f 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -57,45 +57,33 @@ static inline void pack_gate(__u32 *a, __u32 *b,
57#ifdef CONFIG_PARAVIRT 57#ifdef CONFIG_PARAVIRT
58#include <asm/paravirt.h> 58#include <asm/paravirt.h>
59#else 59#else
60#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 60#define load_TR_desc() native_load_tr_desc()
61 61#define load_gdt(dtr) native_load_gdt(dtr)
62#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 62#define load_idt(dtr) native_load_idt(dtr)
63#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
64#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) 63#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
65#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) 64#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
66 65
67#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) 66#define store_gdt(dtr) native_store_gdt(dtr)
68#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) 67#define store_idt(dtr) native_store_idt(dtr)
69#define store_tr(tr) __asm__ ("str %0":"=m" (tr)) 68#define store_tr(tr) (tr = native_store_tr())
70#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) 69#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
71 70
72#if TLS_SIZE != 24 71#define load_TLS(t, cpu) native_load_tls(t, cpu)
73# error update this code. 72#define set_ldt native_set_ldt
74#endif
75
76static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
77{
78#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
79 C(0); C(1); C(2);
80#undef C
81}
82 73
83#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 74#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
84#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 75#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
85#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 76#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
77#endif
86 78
87static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) 79static inline void write_dt_entry(struct desc_struct *dt,
80 int entry, u32 entry_low, u32 entry_high)
88{ 81{
89 __u32 *lp = (__u32 *)((char *)dt + entry*8); 82 dt[entry].a = entry_low;
90 *lp = entry_a; 83 dt[entry].b = entry_high;
91 *(lp+1) = entry_b;
92} 84}
93 85
94#define set_ldt native_set_ldt 86static inline void native_set_ldt(const void *addr, unsigned int entries)
95#endif /* CONFIG_PARAVIRT */
96
97static inline fastcall void native_set_ldt(const void *addr,
98 unsigned int entries)
99{ 87{
100 if (likely(entries == 0)) 88 if (likely(entries == 0))
101 __asm__ __volatile__("lldt %w0"::"q" (0)); 89 __asm__ __volatile__("lldt %w0"::"q" (0));
@@ -111,6 +99,48 @@ static inline fastcall void native_set_ldt(const void *addr,
111 } 99 }
112} 100}
113 101
102
103static inline void native_load_tr_desc(void)
104{
105 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
106}
107
108static inline void native_load_gdt(const struct Xgt_desc_struct *dtr)
109{
110 asm volatile("lgdt %0"::"m" (*dtr));
111}
112
113static inline void native_load_idt(const struct Xgt_desc_struct *dtr)
114{
115 asm volatile("lidt %0"::"m" (*dtr));
116}
117
118static inline void native_store_gdt(struct Xgt_desc_struct *dtr)
119{
120 asm ("sgdt %0":"=m" (*dtr));
121}
122
123static inline void native_store_idt(struct Xgt_desc_struct *dtr)
124{
125 asm ("sidt %0":"=m" (*dtr));
126}
127
128static inline unsigned long native_store_tr(void)
129{
130 unsigned long tr;
131 asm ("str %0":"=r" (tr));
132 return tr;
133}
134
135static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
136{
137 unsigned int i;
138 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
139
140 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
141 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
142}
143
114static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 144static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
115{ 145{
116 __u32 a, b; 146 __u32 a, b;