diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-07-17 21:37:06 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-07-18 11:47:44 -0400 |
commit | f120f13ea0dbb0b0d6675683d5f6faea71277e65 (patch) | |
tree | 6b525ab73bedfa78e43dee303ac991099377e9c5 /arch/i386/xen/enlighten.c | |
parent | f87e4cac4f4e940b328d3deb5b53e642e3881f43 (diff) |
xen: Add support for preemption
Add Xen support for preemption. This is mostly a cleanup of existing
preempt_enable/disable calls, or just comments to explain the current
usage.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Diffstat (limited to 'arch/i386/xen/enlighten.c')
-rw-r--r-- | arch/i386/xen/enlighten.c | 80 |
1 files changed, 49 insertions, 31 deletions
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c index de62d66e0893..a1124b7f1d14 100644 --- a/arch/i386/xen/enlighten.c +++ b/arch/i386/xen/enlighten.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/preempt.h> | 17 | #include <linux/preempt.h> |
18 | #include <linux/hardirq.h> | ||
18 | #include <linux/percpu.h> | 19 | #include <linux/percpu.h> |
19 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
20 | #include <linux/start_kernel.h> | 21 | #include <linux/start_kernel.h> |
@@ -108,11 +109,10 @@ static unsigned long xen_save_fl(void) | |||
108 | struct vcpu_info *vcpu; | 109 | struct vcpu_info *vcpu; |
109 | unsigned long flags; | 110 | unsigned long flags; |
110 | 111 | ||
111 | preempt_disable(); | ||
112 | vcpu = x86_read_percpu(xen_vcpu); | 112 | vcpu = x86_read_percpu(xen_vcpu); |
113 | |||
113 | /* flag has opposite sense of mask */ | 114 | /* flag has opposite sense of mask */ |
114 | flags = !vcpu->evtchn_upcall_mask; | 115 | flags = !vcpu->evtchn_upcall_mask; |
115 | preempt_enable(); | ||
116 | 116 | ||
117 | /* convert to IF type flag | 117 | /* convert to IF type flag |
118 | -0 -> 0x00000000 | 118 | -0 -> 0x00000000 |
@@ -125,32 +125,35 @@ static void xen_restore_fl(unsigned long flags) | |||
125 | { | 125 | { |
126 | struct vcpu_info *vcpu; | 126 | struct vcpu_info *vcpu; |
127 | 127 | ||
128 | preempt_disable(); | ||
129 | |||
130 | /* convert from IF type flag */ | 128 | /* convert from IF type flag */ |
131 | flags = !(flags & X86_EFLAGS_IF); | 129 | flags = !(flags & X86_EFLAGS_IF); |
130 | |||
131 | /* There's a one instruction preempt window here. We need to | ||
132 | make sure we're don't switch CPUs between getting the vcpu | ||
133 | pointer and updating the mask. */ | ||
134 | preempt_disable(); | ||
132 | vcpu = x86_read_percpu(xen_vcpu); | 135 | vcpu = x86_read_percpu(xen_vcpu); |
133 | vcpu->evtchn_upcall_mask = flags; | 136 | vcpu->evtchn_upcall_mask = flags; |
137 | preempt_enable_no_resched(); | ||
134 | 138 | ||
135 | if (flags == 0) { | 139 | /* Doesn't matter if we get preempted here, because any |
136 | /* Unmask then check (avoid races). We're only protecting | 140 | pending event will get dealt with anyway. */ |
137 | against updates by this CPU, so there's no need for | ||
138 | anything stronger. */ | ||
139 | barrier(); | ||
140 | 141 | ||
142 | if (flags == 0) { | ||
143 | preempt_check_resched(); | ||
144 | barrier(); /* unmask then check (avoid races) */ | ||
141 | if (unlikely(vcpu->evtchn_upcall_pending)) | 145 | if (unlikely(vcpu->evtchn_upcall_pending)) |
142 | force_evtchn_callback(); | 146 | force_evtchn_callback(); |
143 | preempt_enable(); | 147 | } |
144 | } else | ||
145 | preempt_enable_no_resched(); | ||
146 | } | 148 | } |
147 | 149 | ||
148 | static void xen_irq_disable(void) | 150 | static void xen_irq_disable(void) |
149 | { | 151 | { |
150 | struct vcpu_info *vcpu; | 152 | /* There's a one instruction preempt window here. We need to |
153 | make sure we're don't switch CPUs between getting the vcpu | ||
154 | pointer and updating the mask. */ | ||
151 | preempt_disable(); | 155 | preempt_disable(); |
152 | vcpu = x86_read_percpu(xen_vcpu); | 156 | x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; |
153 | vcpu->evtchn_upcall_mask = 1; | ||
154 | preempt_enable_no_resched(); | 157 | preempt_enable_no_resched(); |
155 | } | 158 | } |
156 | 159 | ||
@@ -158,18 +161,20 @@ static void xen_irq_enable(void) | |||
158 | { | 161 | { |
159 | struct vcpu_info *vcpu; | 162 | struct vcpu_info *vcpu; |
160 | 163 | ||
164 | /* There's a one instruction preempt window here. We need to | ||
165 | make sure we're don't switch CPUs between getting the vcpu | ||
166 | pointer and updating the mask. */ | ||
161 | preempt_disable(); | 167 | preempt_disable(); |
162 | vcpu = x86_read_percpu(xen_vcpu); | 168 | vcpu = x86_read_percpu(xen_vcpu); |
163 | vcpu->evtchn_upcall_mask = 0; | 169 | vcpu->evtchn_upcall_mask = 0; |
170 | preempt_enable_no_resched(); | ||
164 | 171 | ||
165 | /* Unmask then check (avoid races). We're only protecting | 172 | /* Doesn't matter if we get preempted here, because any |
166 | against updates by this CPU, so there's no need for | 173 | pending event will get dealt with anyway. */ |
167 | anything stronger. */ | ||
168 | barrier(); | ||
169 | 174 | ||
175 | barrier(); /* unmask then check (avoid races) */ | ||
170 | if (unlikely(vcpu->evtchn_upcall_pending)) | 176 | if (unlikely(vcpu->evtchn_upcall_pending)) |
171 | force_evtchn_callback(); | 177 | force_evtchn_callback(); |
172 | preempt_enable(); | ||
173 | } | 178 | } |
174 | 179 | ||
175 | static void xen_safe_halt(void) | 180 | static void xen_safe_halt(void) |
@@ -189,6 +194,8 @@ static void xen_halt(void) | |||
189 | 194 | ||
190 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) | 195 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) |
191 | { | 196 | { |
197 | BUG_ON(preemptible()); | ||
198 | |||
192 | switch (mode) { | 199 | switch (mode) { |
193 | case PARAVIRT_LAZY_NONE: | 200 | case PARAVIRT_LAZY_NONE: |
194 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); | 201 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); |
@@ -293,9 +300,13 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, | |||
293 | xmaddr_t mach_lp = virt_to_machine(lp); | 300 | xmaddr_t mach_lp = virt_to_machine(lp); |
294 | u64 entry = (u64)high << 32 | low; | 301 | u64 entry = (u64)high << 32 | low; |
295 | 302 | ||
303 | preempt_disable(); | ||
304 | |||
296 | xen_mc_flush(); | 305 | xen_mc_flush(); |
297 | if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) | 306 | if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) |
298 | BUG(); | 307 | BUG(); |
308 | |||
309 | preempt_enable(); | ||
299 | } | 310 | } |
300 | 311 | ||
301 | static int cvt_gate_to_trap(int vector, u32 low, u32 high, | 312 | static int cvt_gate_to_trap(int vector, u32 low, u32 high, |
@@ -328,11 +339,13 @@ static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc); | |||
328 | static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, | 339 | static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, |
329 | u32 low, u32 high) | 340 | u32 low, u32 high) |
330 | { | 341 | { |
331 | |||
332 | int cpu = smp_processor_id(); | ||
333 | unsigned long p = (unsigned long)&dt[entrynum]; | 342 | unsigned long p = (unsigned long)&dt[entrynum]; |
334 | unsigned long start = per_cpu(idt_desc, cpu).address; | 343 | unsigned long start, end; |
335 | unsigned long end = start + per_cpu(idt_desc, cpu).size + 1; | 344 | |
345 | preempt_disable(); | ||
346 | |||
347 | start = __get_cpu_var(idt_desc).address; | ||
348 | end = start + __get_cpu_var(idt_desc).size + 1; | ||
336 | 349 | ||
337 | xen_mc_flush(); | 350 | xen_mc_flush(); |
338 | 351 | ||
@@ -347,6 +360,8 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, | |||
347 | if (HYPERVISOR_set_trap_table(info)) | 360 | if (HYPERVISOR_set_trap_table(info)) |
348 | BUG(); | 361 | BUG(); |
349 | } | 362 | } |
363 | |||
364 | preempt_enable(); | ||
350 | } | 365 | } |
351 | 366 | ||
352 | static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, | 367 | static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, |
@@ -368,11 +383,9 @@ static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, | |||
368 | 383 | ||
369 | void xen_copy_trap_info(struct trap_info *traps) | 384 | void xen_copy_trap_info(struct trap_info *traps) |
370 | { | 385 | { |
371 | const struct Xgt_desc_struct *desc = &get_cpu_var(idt_desc); | 386 | const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc); |
372 | 387 | ||
373 | xen_convert_trap_info(desc, traps); | 388 | xen_convert_trap_info(desc, traps); |
374 | |||
375 | put_cpu_var(idt_desc); | ||
376 | } | 389 | } |
377 | 390 | ||
378 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we | 391 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we |
@@ -382,12 +395,11 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc) | |||
382 | { | 395 | { |
383 | static DEFINE_SPINLOCK(lock); | 396 | static DEFINE_SPINLOCK(lock); |
384 | static struct trap_info traps[257]; | 397 | static struct trap_info traps[257]; |
385 | int cpu = smp_processor_id(); | ||
386 | |||
387 | per_cpu(idt_desc, cpu) = *desc; | ||
388 | 398 | ||
389 | spin_lock(&lock); | 399 | spin_lock(&lock); |
390 | 400 | ||
401 | __get_cpu_var(idt_desc) = *desc; | ||
402 | |||
391 | xen_convert_trap_info(desc, traps); | 403 | xen_convert_trap_info(desc, traps); |
392 | 404 | ||
393 | xen_mc_flush(); | 405 | xen_mc_flush(); |
@@ -402,6 +414,8 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc) | |||
402 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | 414 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, |
403 | u32 low, u32 high) | 415 | u32 low, u32 high) |
404 | { | 416 | { |
417 | preempt_disable(); | ||
418 | |||
405 | switch ((high >> 8) & 0xff) { | 419 | switch ((high >> 8) & 0xff) { |
406 | case DESCTYPE_LDT: | 420 | case DESCTYPE_LDT: |
407 | case DESCTYPE_TSS: | 421 | case DESCTYPE_TSS: |
@@ -418,10 +432,12 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |||
418 | } | 432 | } |
419 | 433 | ||
420 | } | 434 | } |
435 | |||
436 | preempt_enable(); | ||
421 | } | 437 | } |
422 | 438 | ||
423 | static void xen_load_esp0(struct tss_struct *tss, | 439 | static void xen_load_esp0(struct tss_struct *tss, |
424 | struct thread_struct *thread) | 440 | struct thread_struct *thread) |
425 | { | 441 | { |
426 | struct multicall_space mcs = xen_mc_entry(0); | 442 | struct multicall_space mcs = xen_mc_entry(0); |
427 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); | 443 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); |
@@ -525,6 +541,8 @@ static unsigned long xen_read_cr3(void) | |||
525 | 541 | ||
526 | static void xen_write_cr3(unsigned long cr3) | 542 | static void xen_write_cr3(unsigned long cr3) |
527 | { | 543 | { |
544 | BUG_ON(preemptible()); | ||
545 | |||
528 | if (cr3 == x86_read_percpu(xen_cr3)) { | 546 | if (cr3 == x86_read_percpu(xen_cr3)) { |
529 | /* just a simple tlb flush */ | 547 | /* just a simple tlb flush */ |
530 | xen_flush_tlb(); | 548 | xen_flush_tlb(); |