aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-07-17 21:37:07 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-07-18 11:47:45 -0400
commit6487673b8a858f99a5348e1078b3f5aec700f9e0 (patch)
tree2c96da8fde879561f36678691ef997f3fdb7b3f0 /arch/i386/xen
parent60223a326fc8fa6e90e2c3fd28ae6de4a311d731 (diff)
xen: Attempt to patch inline versions of common operations
This patchs adds the mechanism to allow us to patch inline versions of common operations. The implementations of the direct-access versions save_fl, restore_fl, irq_enable and irq_disable are now in assembler, and the same code is used for both out of line and inline uses. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Chris Wright <chrisw@sous-sol.org> Cc: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'arch/i386/xen')
-rw-r--r--arch/i386/xen/Makefile2
-rw-r--r--arch/i386/xen/enlighten.c107
-rw-r--r--arch/i386/xen/xen-asm.S114
-rw-r--r--arch/i386/xen/xen-ops.h13
4 files changed, 182 insertions, 54 deletions
diff --git a/arch/i386/xen/Makefile b/arch/i386/xen/Makefile
index 7bf2ce399a2a..343df246bd3e 100644
--- a/arch/i386/xen/Makefile
+++ b/arch/i386/xen/Makefile
@@ -1,4 +1,4 @@
1obj-y := enlighten.o setup.o features.o multicalls.o mmu.o \ 1obj-y := enlighten.o setup.o features.o multicalls.o mmu.o \
2 events.o time.o manage.o 2 events.o time.o manage.o xen-asm.o
3 3
4obj-$(CONFIG_SMP) += smp.o 4obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c
index e33fa0990eda..4fa62a4cb7cc 100644
--- a/arch/i386/xen/enlighten.c
+++ b/arch/i386/xen/enlighten.c
@@ -115,6 +115,7 @@ static void __init xen_vcpu_setup(int cpu)
115 /* This cpu is using the registered vcpu info, even if 115 /* This cpu is using the registered vcpu info, even if
116 later ones fail to. */ 116 later ones fail to. */
117 per_cpu(xen_vcpu, cpu) = vcpup; 117 per_cpu(xen_vcpu, cpu) = vcpup;
118
118 printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", 119 printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
119 cpu, vcpup); 120 cpu, vcpup);
120 } 121 }
@@ -177,20 +178,6 @@ static unsigned long xen_save_fl(void)
177 return (-flags) & X86_EFLAGS_IF; 178 return (-flags) & X86_EFLAGS_IF;
178} 179}
179 180
180static unsigned long xen_save_fl_direct(void)
181{
182 unsigned long flags;
183
184 /* flag has opposite sense of mask */
185 flags = !x86_read_percpu(xen_vcpu_info.evtchn_upcall_mask);
186
187 /* convert to IF type flag
188 -0 -> 0x00000000
189 -1 -> 0xffffffff
190 */
191 return (-flags) & X86_EFLAGS_IF;
192}
193
194static void xen_restore_fl(unsigned long flags) 181static void xen_restore_fl(unsigned long flags)
195{ 182{
196 struct vcpu_info *vcpu; 183 struct vcpu_info *vcpu;
@@ -217,25 +204,6 @@ static void xen_restore_fl(unsigned long flags)
217 } 204 }
218} 205}
219 206
220static void xen_restore_fl_direct(unsigned long flags)
221{
222 /* convert from IF type flag */
223 flags = !(flags & X86_EFLAGS_IF);
224
225 /* This is an atomic update, so no need to worry about
226 preemption. */
227 x86_write_percpu(xen_vcpu_info.evtchn_upcall_mask, flags);
228
229 /* If we get preempted here, then any pending event will be
230 handled anyway. */
231
232 if (flags == 0) {
233 barrier(); /* unmask then check (avoid races) */
234 if (unlikely(x86_read_percpu(xen_vcpu_info.evtchn_upcall_pending)))
235 force_evtchn_callback();
236 }
237}
238
239static void xen_irq_disable(void) 207static void xen_irq_disable(void)
240{ 208{
241 /* There's a one instruction preempt window here. We need to 209 /* There's a one instruction preempt window here. We need to
@@ -246,12 +214,6 @@ static void xen_irq_disable(void)
246 preempt_enable_no_resched(); 214 preempt_enable_no_resched();
247} 215}
248 216
249static void xen_irq_disable_direct(void)
250{
251 /* Atomic update, so preemption not a concern. */
252 x86_write_percpu(xen_vcpu_info.evtchn_upcall_mask, 1);
253}
254
255static void xen_irq_enable(void) 217static void xen_irq_enable(void)
256{ 218{
257 struct vcpu_info *vcpu; 219 struct vcpu_info *vcpu;
@@ -272,19 +234,6 @@ static void xen_irq_enable(void)
272 force_evtchn_callback(); 234 force_evtchn_callback();
273} 235}
274 236
275static void xen_irq_enable_direct(void)
276{
277 /* Atomic update, so preemption not a concern. */
278 x86_write_percpu(xen_vcpu_info.evtchn_upcall_mask, 0);
279
280 /* Doesn't matter if we get preempted here, because any
281 pending event will get dealt with anyway. */
282
283 barrier(); /* unmask then check (avoid races) */
284 if (unlikely(x86_read_percpu(xen_vcpu_info.evtchn_upcall_pending)))
285 force_evtchn_callback();
286}
287
288static void xen_safe_halt(void) 237static void xen_safe_halt(void)
289{ 238{
290 /* Blocking includes an implicit local_irq_enable(). */ 239 /* Blocking includes an implicit local_irq_enable(). */
@@ -892,6 +841,57 @@ void __init xen_setup_vcpu_info_placement(void)
892 } 841 }
893} 842}
894 843
844static unsigned xen_patch(u8 type, u16 clobbers, void *insns, unsigned len)
845{
846 char *start, *end, *reloc;
847 unsigned ret;
848
849 start = end = reloc = NULL;
850
851#define SITE(x) \
852 case PARAVIRT_PATCH(x): \
853 if (have_vcpu_info_placement) { \
854 start = (char *)xen_##x##_direct; \
855 end = xen_##x##_direct_end; \
856 reloc = xen_##x##_direct_reloc; \
857 } \
858 goto patch_site
859
860 switch (type) {
861 SITE(irq_enable);
862 SITE(irq_disable);
863 SITE(save_fl);
864 SITE(restore_fl);
865#undef SITE
866
867 patch_site:
868 if (start == NULL || (end-start) > len)
869 goto default_patch;
870
871 ret = paravirt_patch_insns(insns, len, start, end);
872
873 /* Note: because reloc is assigned from something that
874 appears to be an array, gcc assumes it's non-null,
875 but doesn't know its relationship with start and
876 end. */
877 if (reloc > start && reloc < end) {
878 int reloc_off = reloc - start;
879 long *relocp = (long *)(insns + reloc_off);
880 long delta = start - (char *)insns;
881
882 *relocp += delta;
883 }
884 break;
885
886 default_patch:
887 default:
888 ret = paravirt_patch_default(type, clobbers, insns, len);
889 break;
890 }
891
892 return ret;
893}
894
895static const struct paravirt_ops xen_paravirt_ops __initdata = { 895static const struct paravirt_ops xen_paravirt_ops __initdata = {
896 .paravirt_enabled = 1, 896 .paravirt_enabled = 1,
897 .shared_kernel_pmd = 0, 897 .shared_kernel_pmd = 0,
@@ -899,7 +899,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
899 .name = "Xen", 899 .name = "Xen",
900 .banner = xen_banner, 900 .banner = xen_banner,
901 901
902 .patch = paravirt_patch_default, 902 .patch = xen_patch,
903 903
904 .memory_setup = xen_memory_setup, 904 .memory_setup = xen_memory_setup,
905 .arch_setup = xen_arch_setup, 905 .arch_setup = xen_arch_setup,
@@ -1076,6 +1076,7 @@ static const struct machine_ops __initdata xen_machine_ops = {
1076 .emergency_restart = xen_emergency_restart, 1076 .emergency_restart = xen_emergency_restart,
1077}; 1077};
1078 1078
1079
1079/* First C function to be called on Xen boot */ 1080/* First C function to be called on Xen boot */
1080asmlinkage void __init xen_start_kernel(void) 1081asmlinkage void __init xen_start_kernel(void)
1081{ 1082{
diff --git a/arch/i386/xen/xen-asm.S b/arch/i386/xen/xen-asm.S
new file mode 100644
index 000000000000..dc4d36d51bc1
--- /dev/null
+++ b/arch/i386/xen/xen-asm.S
@@ -0,0 +1,114 @@
1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
5
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
8
9 We only bother with direct forms (ie, vcpu in pda) of the operations
10 here; the indirect forms are better handled in C, since they're
11 generally too large to inline anyway.
12 */
13
14#include <linux/linkage.h>
15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h>
17#include <asm/percpu.h>
18#include <asm/asm-offsets.h>
19#include <asm/processor-flags.h>
20
21#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
22#define ENDPATCH(x) .globl x##_end; x##_end=.
23
24/*
25 Enable events. This clears the event mask and tests the pending
26 event status with one and operation. If there are pending
27 events, then enter the hypervisor to get them handled.
28 */
29ENTRY(xen_irq_enable_direct)
30 /* Clear mask and test pending */
31 andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
32 /* Preempt here doesn't matter because that will deal with
33 any pending interrupts. The pending check may end up being
34 run on the wrong CPU, but that doesn't hurt. */
35 jz 1f
362: call check_events
371:
38ENDPATCH(xen_irq_enable_direct)
39 ret
40 ENDPROC(xen_irq_enable_direct)
41 RELOC(xen_irq_enable_direct, 2b+1)
42
43
44/*
45 Disabling events is simply a matter of making the event mask
46 non-zero.
47 */
48ENTRY(xen_irq_disable_direct)
49 movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
50ENDPATCH(xen_irq_disable_direct)
51 ret
52 ENDPROC(xen_irq_disable_direct)
53 RELOC(xen_irq_disable_direct, 0)
54
55/*
56 (xen_)save_fl is used to get the current interrupt enable status.
57 Callers expect the status to be in X86_EFLAGS_IF, and other bits
58 may be set in the return value. We take advantage of this by
59 making sure that X86_EFLAGS_IF has the right value (and other bits
60 in that byte are 0), but other bits in the return value are
61 undefined. We need to toggle the state of the bit, because
62 Xen and x86 use opposite senses (mask vs enable).
63 */
64ENTRY(xen_save_fl_direct)
65 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
66 setz %ah
67 addb %ah,%ah
68ENDPATCH(xen_save_fl_direct)
69 ret
70 ENDPROC(xen_save_fl_direct)
71 RELOC(xen_save_fl_direct, 0)
72
73
74/*
75 In principle the caller should be passing us a value return
76 from xen_save_fl_direct, but for robustness sake we test only
77 the X86_EFLAGS_IF flag rather than the whole byte. After
78 setting the interrupt mask state, it checks for unmasked
79 pending events and enters the hypervisor to get them delivered
80 if so.
81 */
82ENTRY(xen_restore_fl_direct)
83 testb $X86_EFLAGS_IF>>8, %ah
84 setz %al
85 movb %al, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
86 /* Preempt here doesn't matter because that will deal with
87 any pending interrupts. The pending check may end up being
88 run on the wrong CPU, but that doesn't hurt. */
89
90 /* check for pending but unmasked */
91 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
92 jz 1f
932: call check_events
941:
95ENDPATCH(xen_restore_fl_direct)
96 ret
97 ENDPROC(xen_restore_fl_direct)
98 RELOC(xen_restore_fl_direct, 2b+1)
99
100
101
102/*
103 Force an event check by making a hypercall,
104 but preserve regs before making the call.
105 */
106check_events:
107 push %eax
108 push %ecx
109 push %edx
110 call force_evtchn_callback
111 pop %edx
112 pop %ecx
113 pop %eax
114 ret
diff --git a/arch/i386/xen/xen-ops.h b/arch/i386/xen/xen-ops.h
index 5b56f7fecd19..33e4c8a16289 100644
--- a/arch/i386/xen/xen-ops.h
+++ b/arch/i386/xen/xen-ops.h
@@ -54,4 +54,17 @@ int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
54int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), 54int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
55 void *info, int wait); 55 void *info, int wait);
56 56
57
58/* Declare an asm function, along with symbols needed to make it
59 inlineable */
60#define DECL_ASM(ret, name, ...) \
61 ret name(__VA_ARGS__); \
62 extern char name##_end[]; \
63 extern char name##_reloc[] \
64
65DECL_ASM(void, xen_irq_enable_direct, void);
66DECL_ASM(void, xen_irq_disable_direct, void);
67DECL_ASM(unsigned long, xen_save_fl_direct, void);
68DECL_ASM(void, xen_restore_fl_direct, unsigned long);
69
57#endif /* XEN_OPS_H */ 70#endif /* XEN_OPS_H */