aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/delay.h8
-rw-r--r--include/asm-i386/desc.h9
-rw-r--r--include/asm-i386/io.h8
-rw-r--r--include/asm-i386/irq.h3
-rw-r--r--include/asm-i386/irqflags.h42
-rw-r--r--include/asm-i386/mach-default/setup_arch.h2
-rw-r--r--include/asm-i386/msr.h5
-rw-r--r--include/asm-i386/paravirt.h281
-rw-r--r--include/asm-i386/processor.h15
-rw-r--r--include/asm-i386/segment.h2
-rw-r--r--include/asm-i386/setup.h1
-rw-r--r--include/asm-i386/spinlock.h4
-rw-r--r--include/asm-i386/suspend.h8
-rw-r--r--include/asm-i386/system.h16
-rw-r--r--include/asm-i386/time.h41
15 files changed, 411 insertions, 34 deletions
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index 9ae5e3782ed8..32d6678d0bbf 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -16,6 +16,13 @@ extern void __ndelay(unsigned long nsecs);
16extern void __const_udelay(unsigned long usecs); 16extern void __const_udelay(unsigned long usecs);
17extern void __delay(unsigned long loops); 17extern void __delay(unsigned long loops);
18 18
19#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
20#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
21
22#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
23
24#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
25
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */ 26/* 0x10c7 is 2**32 / 1000000 (rounded up) */
20#define udelay(n) (__builtin_constant_p(n) ? \ 27#define udelay(n) (__builtin_constant_p(n) ? \
21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ 28 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
@@ -25,6 +32,7 @@ extern void __delay(unsigned long loops);
25#define ndelay(n) (__builtin_constant_p(n) ? \ 32#define ndelay(n) (__builtin_constant_p(n) ? \
26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 33 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
27 __ndelay(n)) 34 __ndelay(n))
35#endif
28 36
29void use_tsc_delay(void); 37void use_tsc_delay(void);
30 38
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 6cf2ac2bfde7..f19820f0834f 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -55,6 +55,9 @@ static inline void pack_gate(__u32 *a, __u32 *b,
55#define DESCTYPE_DPL3 0x60 /* DPL-3 */ 55#define DESCTYPE_DPL3 0x60 /* DPL-3 */
56#define DESCTYPE_S 0x10 /* !system */ 56#define DESCTYPE_S 0x10 /* !system */
57 57
58#ifdef CONFIG_PARAVIRT
59#include <asm/paravirt.h>
60#else
58#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 61#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
59 62
60#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 63#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
@@ -105,7 +108,11 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo
105 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); 108 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
106} 109}
107 110
108static inline void set_ldt(void *addr, unsigned int entries) 111#define set_ldt native_set_ldt
112#endif /* CONFIG_PARAVIRT */
113
114static inline fastcall void native_set_ldt(const void *addr,
115 unsigned int entries)
109{ 116{
110 if (likely(entries == 0)) 117 if (likely(entries == 0))
111 __asm__ __volatile__("lldt %w0"::"q" (0)); 118 __asm__ __volatile__("lldt %w0"::"q" (0));
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 68df0dc3ab8f..86ff5e83be2f 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void)
256 256
257#endif /* __KERNEL__ */ 257#endif /* __KERNEL__ */
258 258
259#ifdef SLOW_IO_BY_JUMPING 259#if defined(CONFIG_PARAVIRT)
260#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" 260#include <asm/paravirt.h>
261#else 261#else
262
262#define __SLOW_DOWN_IO "outb %%al,$0x80;" 263#define __SLOW_DOWN_IO "outb %%al,$0x80;"
263#endif
264 264
265static inline void slow_down_io(void) { 265static inline void slow_down_io(void) {
266 __asm__ __volatile__( 266 __asm__ __volatile__(
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) {
271 : : ); 271 : : );
272} 272}
273 273
274#endif
275
274#ifdef CONFIG_X86_NUMAQ 276#ifdef CONFIG_X86_NUMAQ
275extern void *xquad_portio; /* Where the IO area was mapped */ 277extern void *xquad_portio; /* Where the IO area was mapped */
276#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) 278#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 331726b41128..9e15ce0006eb 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -41,4 +41,7 @@ extern int irqbalance_disable(char *str);
41extern void fixup_irqs(cpumask_t map); 41extern void fixup_irqs(cpumask_t map);
42#endif 42#endif
43 43
44void init_IRQ(void);
45void __init native_init_IRQ(void);
46
44#endif /* _ASM_IRQ_H */ 47#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index e1bdb97c07fa..9ce01f3fb7bc 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,9 @@
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12 12
13#ifdef CONFIG_PARAVIRT
14#include <asm/paravirt.h>
15#else
13#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
14 17
15static inline unsigned long __raw_local_save_flags(void) 18static inline unsigned long __raw_local_save_flags(void)
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void)
25 return flags; 28 return flags;
26} 29}
27 30
28#define raw_local_save_flags(flags) \
29 do { (flags) = __raw_local_save_flags(); } while (0)
30
31static inline void raw_local_irq_restore(unsigned long flags) 31static inline void raw_local_irq_restore(unsigned long flags)
32{ 32{
33 __asm__ __volatile__( 33 __asm__ __volatile__(
@@ -66,18 +66,6 @@ static inline void halt(void)
66 __asm__ __volatile__("hlt": : :"memory"); 66 __asm__ __volatile__("hlt": : :"memory");
67} 67}
68 68
69static inline int raw_irqs_disabled_flags(unsigned long flags)
70{
71 return !(flags & (1 << 9));
72}
73
74static inline int raw_irqs_disabled(void)
75{
76 unsigned long flags = __raw_local_save_flags();
77
78 return raw_irqs_disabled_flags(flags);
79}
80
81/* 69/*
82 * For spinlocks, etc: 70 * For spinlocks, etc:
83 */ 71 */
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void)
90 return flags; 78 return flags;
91} 79}
92 80
81#else
82#define DISABLE_INTERRUPTS cli
83#define ENABLE_INTERRUPTS sti
84#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
85#define INTERRUPT_RETURN iret
86#define GET_CR0_INTO_EAX movl %cr0, %eax
87#endif /* __ASSEMBLY__ */
88#endif /* CONFIG_PARAVIRT */
89
90#ifndef __ASSEMBLY__
91#define raw_local_save_flags(flags) \
92 do { (flags) = __raw_local_save_flags(); } while (0)
93
93#define raw_local_irq_save(flags) \ 94#define raw_local_irq_save(flags) \
94 do { (flags) = __raw_local_irq_save(); } while (0) 95 do { (flags) = __raw_local_irq_save(); } while (0)
95 96
97static inline int raw_irqs_disabled_flags(unsigned long flags)
98{
99 return !(flags & (1 << 9));
100}
101
102static inline int raw_irqs_disabled(void)
103{
104 unsigned long flags = __raw_local_save_flags();
105
106 return raw_irqs_disabled_flags(flags);
107}
96#endif /* __ASSEMBLY__ */ 108#endif /* __ASSEMBLY__ */
97 109
98/* 110/*
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h
index fb42099e7bd4..605e3ccb991b 100644
--- a/include/asm-i386/mach-default/setup_arch.h
+++ b/include/asm-i386/mach-default/setup_arch.h
@@ -2,4 +2,6 @@
2 2
3/* no action for generic */ 3/* no action for generic */
4 4
5#ifndef ARCH_SETUP
5#define ARCH_SETUP 6#define ARCH_SETUP
7#endif
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 1820d9d73af3..5679d4993072 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,10 @@
1#ifndef __ASM_MSR_H 1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H 2#define __ASM_MSR_H
3 3
4#ifdef CONFIG_PARAVIRT
5#include <asm/paravirt.h>
6#else
7
4/* 8/*
5 * Access to machine-specific registers (available on 586 and better only) 9 * Access to machine-specific registers (available on 586 and better only)
6 * Note: the rd* operations modify the parameters directly (without using 10 * Note: the rd* operations modify the parameters directly (without using
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
77 __asm__ __volatile__("rdpmc" \ 81 __asm__ __volatile__("rdpmc" \
78 : "=a" (low), "=d" (high) \ 82 : "=a" (low), "=d" (high) \
79 : "c" (counter)) 83 : "c" (counter))
84#endif /* !CONFIG_PARAVIRT */
80 85
81/* symbolic names for some interesting MSRs */ 86/* symbolic names for some interesting MSRs */
82/* Intel defined MSRs. */ 87/* Intel defined MSRs. */
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
new file mode 100644
index 000000000000..a7551a44686f
--- /dev/null
+++ b/include/asm-i386/paravirt.h
@@ -0,0 +1,281 @@
1#ifndef __ASM_PARAVIRT_H
2#define __ASM_PARAVIRT_H
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
5#include <linux/linkage.h>
6
7#ifdef CONFIG_PARAVIRT
8#ifndef __ASSEMBLY__
9struct thread_struct;
10struct Xgt_desc_struct;
11struct tss_struct;
12struct paravirt_ops
13{
14 unsigned int kernel_rpl;
15 int paravirt_enabled;
16 const char *name;
17
18 void (*arch_setup)(void);
19 char *(*memory_setup)(void);
20 void (*init_IRQ)(void);
21
22 void (*banner)(void);
23
24 unsigned long (*get_wallclock)(void);
25 int (*set_wallclock)(unsigned long);
26 void (*time_init)(void);
27
28 /* All the function pointers here are declared as "fastcall"
29 so that we get a specific register-based calling
30 convention. This makes it easier to implement inline
31 assembler replacements. */
32
33 void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
34 unsigned int *ecx, unsigned int *edx);
35
36 unsigned long (fastcall *get_debugreg)(int regno);
37 void (fastcall *set_debugreg)(int regno, unsigned long value);
38
39 void (fastcall *clts)(void);
40
41 unsigned long (fastcall *read_cr0)(void);
42 void (fastcall *write_cr0)(unsigned long);
43
44 unsigned long (fastcall *read_cr2)(void);
45 void (fastcall *write_cr2)(unsigned long);
46
47 unsigned long (fastcall *read_cr3)(void);
48 void (fastcall *write_cr3)(unsigned long);
49
50 unsigned long (fastcall *read_cr4_safe)(void);
51 unsigned long (fastcall *read_cr4)(void);
52 void (fastcall *write_cr4)(unsigned long);
53
54 unsigned long (fastcall *save_fl)(void);
55 void (fastcall *restore_fl)(unsigned long);
56 void (fastcall *irq_disable)(void);
57 void (fastcall *irq_enable)(void);
58 void (fastcall *safe_halt)(void);
59 void (fastcall *halt)(void);
60 void (fastcall *wbinvd)(void);
61
62 /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
63 u64 (fastcall *read_msr)(unsigned int msr, int *err);
64 int (fastcall *write_msr)(unsigned int msr, u64 val);
65
66 u64 (fastcall *read_tsc)(void);
67 u64 (fastcall *read_pmc)(void);
68
69 void (fastcall *load_tr_desc)(void);
70 void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
71 void (fastcall *load_idt)(const struct Xgt_desc_struct *);
72 void (fastcall *store_gdt)(struct Xgt_desc_struct *);
73 void (fastcall *store_idt)(struct Xgt_desc_struct *);
74 void (fastcall *set_ldt)(const void *desc, unsigned entries);
75 unsigned long (fastcall *store_tr)(void);
76 void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
77 void (fastcall *write_ldt_entry)(void *dt, int entrynum,
78 u32 low, u32 high);
79 void (fastcall *write_gdt_entry)(void *dt, int entrynum,
80 u32 low, u32 high);
81 void (fastcall *write_idt_entry)(void *dt, int entrynum,
82 u32 low, u32 high);
83 void (fastcall *load_esp0)(struct tss_struct *tss,
84 struct thread_struct *thread);
85
86 void (fastcall *set_iopl_mask)(unsigned mask);
87
88 void (fastcall *io_delay)(void);
89 void (*const_udelay)(unsigned long loops);
90
91 /* These two are jmp to, not actually called. */
92 void (fastcall *irq_enable_sysexit)(void);
93 void (fastcall *iret)(void);
94};
95
96extern struct paravirt_ops paravirt_ops;
97
98#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
99
100static inline void load_esp0(struct tss_struct *tss,
101 struct thread_struct *thread)
102{
103 paravirt_ops.load_esp0(tss, thread);
104}
105
106#define ARCH_SETUP paravirt_ops.arch_setup();
107static inline unsigned long get_wallclock(void)
108{
109 return paravirt_ops.get_wallclock();
110}
111
112static inline int set_wallclock(unsigned long nowtime)
113{
114 return paravirt_ops.set_wallclock(nowtime);
115}
116
117static inline void do_time_init(void)
118{
119 return paravirt_ops.time_init();
120}
121
122/* The paravirtualized CPUID instruction. */
123static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
124 unsigned int *ecx, unsigned int *edx)
125{
126 paravirt_ops.cpuid(eax, ebx, ecx, edx);
127}
128
129/*
130 * These special macros can be used to get or set a debugging register
131 */
132#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
133#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
134
135#define clts() paravirt_ops.clts()
136
137#define read_cr0() paravirt_ops.read_cr0()
138#define write_cr0(x) paravirt_ops.write_cr0(x)
139
140#define read_cr2() paravirt_ops.read_cr2()
141#define write_cr2(x) paravirt_ops.write_cr2(x)
142
143#define read_cr3() paravirt_ops.read_cr3()
144#define write_cr3(x) paravirt_ops.write_cr3(x)
145
146#define read_cr4() paravirt_ops.read_cr4()
147#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
148#define write_cr4(x) paravirt_ops.write_cr4(x)
149
150static inline unsigned long __raw_local_save_flags(void)
151{
152 return paravirt_ops.save_fl();
153}
154
155static inline void raw_local_irq_restore(unsigned long flags)
156{
157 return paravirt_ops.restore_fl(flags);
158}
159
160static inline void raw_local_irq_disable(void)
161{
162 paravirt_ops.irq_disable();
163}
164
165static inline void raw_local_irq_enable(void)
166{
167 paravirt_ops.irq_enable();
168}
169
170static inline unsigned long __raw_local_irq_save(void)
171{
172 unsigned long flags = paravirt_ops.save_fl();
173
174 paravirt_ops.irq_disable();
175
176 return flags;
177}
178
179static inline void raw_safe_halt(void)
180{
181 paravirt_ops.safe_halt();
182}
183
184static inline void halt(void)
185{
186 paravirt_ops.safe_halt();
187}
188#define wbinvd() paravirt_ops.wbinvd()
189
190#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
191
192#define rdmsr(msr,val1,val2) do { \
193 int _err; \
194 u64 _l = paravirt_ops.read_msr(msr,&_err); \
195 val1 = (u32)_l; \
196 val2 = _l >> 32; \
197} while(0)
198
199#define wrmsr(msr,val1,val2) do { \
200 u64 _l = ((u64)(val2) << 32) | (val1); \
201 paravirt_ops.write_msr((msr), _l); \
202} while(0)
203
204#define rdmsrl(msr,val) do { \
205 int _err; \
206 val = paravirt_ops.read_msr((msr),&_err); \
207} while(0)
208
209#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
210#define wrmsr_safe(msr,a,b) ({ \
211 u64 _l = ((u64)(b) << 32) | (a); \
212 paravirt_ops.write_msr((msr),_l); \
213})
214
215/* rdmsr with exception handling */
216#define rdmsr_safe(msr,a,b) ({ \
217 int _err; \
218 u64 _l = paravirt_ops.read_msr(msr,&_err); \
219 (*a) = (u32)_l; \
220 (*b) = _l >> 32; \
221 _err; })
222
223#define rdtsc(low,high) do { \
224 u64 _l = paravirt_ops.read_tsc(); \
225 low = (u32)_l; \
226 high = _l >> 32; \
227} while(0)
228
229#define rdtscl(low) do { \
230 u64 _l = paravirt_ops.read_tsc(); \
231 low = (int)_l; \
232} while(0)
233
234#define rdtscll(val) (val = paravirt_ops.read_tsc())
235
236#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
237
238#define rdpmc(counter,low,high) do { \
239 u64 _l = paravirt_ops.read_pmc(); \
240 low = (u32)_l; \
241 high = _l >> 32; \
242} while(0)
243
244#define load_TR_desc() (paravirt_ops.load_tr_desc())
245#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
246#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
247#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
248#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
249#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
250#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
251#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
252#define write_ldt_entry(dt, entry, low, high) \
253 (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
254#define write_gdt_entry(dt, entry, low, high) \
255 (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
256#define write_idt_entry(dt, entry, low, high) \
257 (paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
258#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
259
260/* The paravirtualized I/O functions */
261static inline void slow_down_io(void) {
262 paravirt_ops.io_delay();
263#ifdef REALLY_SLOW_IO
264 paravirt_ops.io_delay();
265 paravirt_ops.io_delay();
266 paravirt_ops.io_delay();
267#endif
268}
269
270#define CLI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax"
271#define STI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax"
272#else /* __ASSEMBLY__ */
273
274#define INTERRUPT_RETURN jmp *%cs:paravirt_ops+PARAVIRT_iret
275#define DISABLE_INTERRUPTS pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax
276#define ENABLE_INTERRUPTS pushl %eax; pushl %ecx; pushl %edx; call *%cs:paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax
277#define ENABLE_INTERRUPTS_SYSEXIT jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit
278#define GET_CR0_INTO_EAX call *paravirt_ops+PARAVIRT_read_cr0
279#endif /* __ASSEMBLY__ */
280#endif /* CONFIG_PARAVIRT */
281#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 98fa73b71760..6c2c4457be0a 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -144,8 +144,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
144#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 144#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
145#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 145#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
146 146
147static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 147static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
148 unsigned int *ecx, unsigned int *edx) 148 unsigned int *ecx, unsigned int *edx)
149{ 149{
150 /* ecx is often an input as well as an output. */ 150 /* ecx is often an input as well as an output. */
151 __asm__("cpuid" 151 __asm__("cpuid"
@@ -491,6 +491,12 @@ struct thread_struct {
491 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 491 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
492} 492}
493 493
494#ifdef CONFIG_PARAVIRT
495#include <asm/paravirt.h>
496#else
497#define paravirt_enabled() 0
498#define __cpuid native_cpuid
499
494static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) 500static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
495{ 501{
496 tss->esp0 = thread->esp0; 502 tss->esp0 = thread->esp0;
@@ -524,10 +530,13 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
524 : /* no output */ \ 530 : /* no output */ \
525 :"r" (value)) 531 :"r" (value))
526 532
533#define set_iopl_mask native_set_iopl_mask
534#endif /* CONFIG_PARAVIRT */
535
527/* 536/*
528 * Set IOPL bits in EFLAGS from given mask 537 * Set IOPL bits in EFLAGS from given mask
529 */ 538 */
530static inline void set_iopl_mask(unsigned mask) 539static fastcall inline void native_set_iopl_mask(unsigned mask)
531{ 540{
532 unsigned int reg; 541 unsigned int reg;
533 __asm__ __volatile__ ("pushfl;" 542 __asm__ __volatile__ ("pushfl;"
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index 5bdda79b6b53..3c796af33776 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -131,5 +131,7 @@
131#define SEGMENT_LDT 0x4 131#define SEGMENT_LDT 0x4
132#define SEGMENT_GDT 0x0 132#define SEGMENT_GDT 0x0
133 133
134#ifndef CONFIG_PARAVIRT
134#define get_kernel_rpl() 0 135#define get_kernel_rpl() 0
135#endif 136#endif
137#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 2734909eff84..9930c5a355fc 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE];
70struct e820entry; 70struct e820entry;
71 71
72char * __init machine_specific_memory_setup(void); 72char * __init machine_specific_memory_setup(void);
73char *memory_setup(void);
73 74
74int __init copy_e820_map(struct e820entry * biosmap, int nr_map); 75int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
75int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); 76int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index c18b71fae6b3..dea60709db29 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,8 +7,12 @@
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
10#define CLI_STRING "cli" 13#define CLI_STRING "cli"
11#define STI_STRING "sti" 14#define STI_STRING "sti"
15#endif /* CONFIG_PARAVIRT */
12 16
13/* 17/*
14 * Your basic SMP spinlocks, allowing only a single CPU anywhere 18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 08be1e5009d4..30361526d568 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -23,12 +23,8 @@ arch_prepare_suspend(void)
23struct saved_context { 23struct saved_context {
24 u16 es, fs, gs, ss; 24 u16 es, fs, gs, ss;
25 unsigned long cr0, cr2, cr3, cr4; 25 unsigned long cr0, cr2, cr3, cr4;
26 u16 gdt_pad; 26 struct Xgt_desc_struct gdt;
27 u16 gdt_limit; 27 struct Xgt_desc_struct idt;
28 unsigned long gdt_base;
29 u16 idt_pad;
30 u16 idt_limit;
31 unsigned long idt_base;
32 u16 ldt; 28 u16 ldt;
33 u16 tss; 29 u16 tss;
34 unsigned long tr; 30 unsigned long tr;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6dabbcd6e6a..a6d20d9a1a30 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
88#define savesegment(seg, value) \ 88#define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
90 90
91#ifdef CONFIG_PARAVIRT
92#include <asm/paravirt.h>
93#else
91#define read_cr0() ({ \ 94#define read_cr0() ({ \
92 unsigned int __dummy; \ 95 unsigned int __dummy; \
93 __asm__ __volatile__( \ 96 __asm__ __volatile__( \
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
139#define write_cr4(x) \ 142#define write_cr4(x) \
140 __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) 143 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
141 144
142/* 145#define wbinvd() \
143 * Clear and set 'TS' bit respectively 146 __asm__ __volatile__ ("wbinvd": : :"memory")
144 */ 147
148/* Clear the 'TS' bit */
145#define clts() __asm__ __volatile__ ("clts") 149#define clts() __asm__ __volatile__ ("clts")
150#endif/* CONFIG_PARAVIRT */
151
152/* Set the 'TS' bit */
146#define stts() write_cr0(8 | read_cr0()) 153#define stts() write_cr0(8 | read_cr0())
147 154
148#endif /* __KERNEL__ */ 155#endif /* __KERNEL__ */
149 156
150#define wbinvd() \
151 __asm__ __volatile__ ("wbinvd": : :"memory")
152
153static inline unsigned long get_limit(unsigned long segment) 157static inline unsigned long get_limit(unsigned long segment)
154{ 158{
155 unsigned long __limit; 159 unsigned long __limit;
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
new file mode 100644
index 000000000000..ea8065af825a
--- /dev/null
+++ b/include/asm-i386/time.h
@@ -0,0 +1,41 @@
1#ifndef _ASMi386_TIME_H
2#define _ASMi386_TIME_H
3
4#include <linux/efi.h>
5#include "mach_time.h"
6
7static inline unsigned long native_get_wallclock(void)
8{
9 unsigned long retval;
10
11 if (efi_enabled)
12 retval = efi_get_time();
13 else
14 retval = mach_get_cmos_time();
15
16 return retval;
17}
18
19static inline int native_set_wallclock(unsigned long nowtime)
20{
21 int retval;
22
23 if (efi_enabled)
24 retval = efi_set_rtc_mmss(nowtime);
25 else
26 retval = mach_set_rtc_mmss(nowtime);
27
28 return retval;
29}
30
31#ifdef CONFIG_PARAVIRT
32#include <asm/paravirt.h>
33#else /* !CONFIG_PARAVIRT */
34
35#define get_wallclock() native_get_wallclock()
36#define set_wallclock(x) native_set_wallclock(x)
37#define do_time_init() time_init_hook()
38
39#endif /* CONFIG_PARAVIRT */
40
41#endif