aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-07 15:07:50 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-16 05:15:52 -0400
commit74d4affde8feb8d5bdebf7fba8e90e4eae3b7b1d (patch)
treeea70d2323c8a424e8c20389514c6c91f149cdf72 /arch
parent094029479be8eb380447f42eff1b35362ef1a464 (diff)
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance characteristics in a virtual environment. If there is any contention for physical CPUs (ie, there are more runnable vcpus than cpus), then ticket locks can cause the system to end up spending 90+% of its time spinning. The problem is that (v)cpus waiting on a ticket spinlock will be granted access to the lock in strict order they got their tickets. If the hypervisor scheduler doesn't give the vcpus time in that order, they will burn timeslices waiting for the scheduler to give the right vcpu some time. In the worst case it could take O(n^2) vcpu scheduler timeslices for everyone waiting on the lock to get it, not counting new cpus trying to take the lock while the log-jam is sorted out. These hooks allow a paravirt backend to replace the spinlock implementation. At the very least, this could revert the implementation back to the old lock algorithm, which allows the next scheduled vcpu to take the lock, and has basically fairly good performance. It also allows the spinlocks to take advantages of the hypervisor features to make locks more efficient (spin and block, for example). The cost to native execution is an extra direct call when using a spinlock function. There's no overhead if CONFIG_PARAVIRT is turned off. The lock structure is fixed at a single "unsigned int", initialized to zero, but the spinlock implementation can use it as it wishes. Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from Spinning Around" for pointing out this problem. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Lameter <clameter@linux-foundation.org> Cc: Petr Tesarik <ptesarik@suse.cz> Cc: Virtualization <virtualization@lists.linux-foundation.org> Cc: Xen devel <xen-devel@lists.xensource.com> Cc: Thomas Friebel <thomas.friebel@amd.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/paravirt.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 2963ab5d91ee..f33816868707 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -124,6 +124,7 @@ static void *get_call_destination(u8 type)
124 .pv_irq_ops = pv_irq_ops, 124 .pv_irq_ops = pv_irq_ops,
125 .pv_apic_ops = pv_apic_ops, 125 .pv_apic_ops = pv_apic_ops,
126 .pv_mmu_ops = pv_mmu_ops, 126 .pv_mmu_ops = pv_mmu_ops,
127 .pv_lock_ops = pv_lock_ops,
127 }; 128 };
128 return *((void **)&tmpl + type); 129 return *((void **)&tmpl + type);
129} 130}
@@ -450,6 +451,15 @@ struct pv_mmu_ops pv_mmu_ops = {
450 .set_fixmap = native_set_fixmap, 451 .set_fixmap = native_set_fixmap,
451}; 452};
452 453
454struct pv_lock_ops pv_lock_ops = {
455 .spin_is_locked = __ticket_spin_is_locked,
456 .spin_is_contended = __ticket_spin_is_contended,
457
458 .spin_lock = __ticket_spin_lock,
459 .spin_trylock = __ticket_spin_trylock,
460 .spin_unlock = __ticket_spin_unlock,
461};
462
453EXPORT_SYMBOL_GPL(pv_time_ops); 463EXPORT_SYMBOL_GPL(pv_time_ops);
454EXPORT_SYMBOL (pv_cpu_ops); 464EXPORT_SYMBOL (pv_cpu_ops);
455EXPORT_SYMBOL (pv_mmu_ops); 465EXPORT_SYMBOL (pv_mmu_ops);