aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/processor.h
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2006-12-06 20:14:07 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:07 -0500
commitd3561b7fa0fb0fc583bab0eeda32bec9e4c4056d (patch)
tree39d835965878622d052ef3b3c7b759d83b6bc327 /include/asm-i386/processor.h
parentdb91b882aabd0b3b55a87cbfb344f2798bb740b4 (diff)
[PATCH] paravirt: header and stubs for paravirtualisation
Create a paravirt.h header for all the critical operations which need to be replaced with hypervisor calls, and include that instead of defining native operations, when CONFIG_PARAVIRT. This patch does the dumbest possible replacement of paravirtualized instructions: calls through a "paravirt_ops" structure. Currently these are function implementations of native hardware: hypervisors will override the ops structure with their own variants. All the pv-ops functions are declared "fastcall" so that a specific register-based ABI is used, to make inlining assember easier. And: +From: Andy Whitcroft <apw@shadowen.org> The paravirt ops introduce a 'weak' attribute onto memory_setup(). Code ordering leads to the following warnings on x86: arch/i386/kernel/setup.c:651: warning: weak declaration of `memory_setup' after first use results in unspecified behavior Move memory_setup() to avoid this. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Diffstat (limited to 'include/asm-i386/processor.h')
-rw-r--r--include/asm-i386/processor.h15
1 files changed, 12 insertions, 3 deletions
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 98fa73b71760..6c2c4457be0a 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -144,8 +144,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
144#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 144#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
145#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 145#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
146 146
147static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 147static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
148 unsigned int *ecx, unsigned int *edx) 148 unsigned int *ecx, unsigned int *edx)
149{ 149{
150 /* ecx is often an input as well as an output. */ 150 /* ecx is often an input as well as an output. */
151 __asm__("cpuid" 151 __asm__("cpuid"
@@ -491,6 +491,12 @@ struct thread_struct {
491 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 491 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
492} 492}
493 493
494#ifdef CONFIG_PARAVIRT
495#include <asm/paravirt.h>
496#else
497#define paravirt_enabled() 0
498#define __cpuid native_cpuid
499
494static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) 500static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
495{ 501{
496 tss->esp0 = thread->esp0; 502 tss->esp0 = thread->esp0;
@@ -524,10 +530,13 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
524 : /* no output */ \ 530 : /* no output */ \
525 :"r" (value)) 531 :"r" (value))
526 532
533#define set_iopl_mask native_set_iopl_mask
534#endif /* CONFIG_PARAVIRT */
535
527/* 536/*
528 * Set IOPL bits in EFLAGS from given mask 537 * Set IOPL bits in EFLAGS from given mask
529 */ 538 */
530static inline void set_iopl_mask(unsigned mask) 539static fastcall inline void native_set_iopl_mask(unsigned mask)
531{ 540{
532 unsigned int reg; 541 unsigned int reg;
533 __asm__ __volatile__ ("pushfl;" 542 __asm__ __volatile__ ("pushfl;"