diff options
-rw-r--r-- | arch/x86/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 29 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/qspinlock.h | 25 | ||||
-rw-r--r-- | arch/x86/include/asm/qspinlock_paravirt.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_32.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_64.c | 22 |
8 files changed, 128 insertions, 12 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 90b1b54f4f38..50ec043a920d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -667,7 +667,7 @@ config PARAVIRT_DEBUG | |||
667 | config PARAVIRT_SPINLOCKS | 667 | config PARAVIRT_SPINLOCKS |
668 | bool "Paravirtualization layer for spinlocks" | 668 | bool "Paravirtualization layer for spinlocks" |
669 | depends on PARAVIRT && SMP | 669 | depends on PARAVIRT && SMP |
670 | select UNINLINE_SPIN_UNLOCK | 670 | select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCK |
671 | ---help--- | 671 | ---help--- |
672 | Paravirtualized spinlocks allow a pvops backend to replace the | 672 | Paravirtualized spinlocks allow a pvops backend to replace the |
673 | spinlock implementation with something virtualization-friendly | 673 | spinlock implementation with something virtualization-friendly |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 8957810ad7d1..266c35381b62 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -712,6 +712,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
712 | 712 | ||
713 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 713 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
714 | 714 | ||
715 | #ifdef CONFIG_QUEUED_SPINLOCK | ||
716 | |||
717 | static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, | ||
718 | u32 val) | ||
719 | { | ||
720 | PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val); | ||
721 | } | ||
722 | |||
723 | static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) | ||
724 | { | ||
725 | PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); | ||
726 | } | ||
727 | |||
728 | static __always_inline void pv_wait(u8 *ptr, u8 val) | ||
729 | { | ||
730 | PVOP_VCALL2(pv_lock_ops.wait, ptr, val); | ||
731 | } | ||
732 | |||
733 | static __always_inline void pv_kick(int cpu) | ||
734 | { | ||
735 | PVOP_VCALL1(pv_lock_ops.kick, cpu); | ||
736 | } | ||
737 | |||
738 | #else /* !CONFIG_QUEUED_SPINLOCK */ | ||
739 | |||
715 | static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, | 740 | static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, |
716 | __ticket_t ticket) | 741 | __ticket_t ticket) |
717 | { | 742 | { |
@@ -724,7 +749,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, | |||
724 | PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); | 749 | PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); |
725 | } | 750 | } |
726 | 751 | ||
727 | #endif | 752 | #endif /* CONFIG_QUEUED_SPINLOCK */ |
753 | |||
754 | #endif /* SMP && PARAVIRT_SPINLOCKS */ | ||
728 | 755 | ||
729 | #ifdef CONFIG_X86_32 | 756 | #ifdef CONFIG_X86_32 |
730 | #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" | 757 | #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f7b0b5c112f2..76cd68426af8 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -333,9 +333,19 @@ struct arch_spinlock; | |||
333 | typedef u16 __ticket_t; | 333 | typedef u16 __ticket_t; |
334 | #endif | 334 | #endif |
335 | 335 | ||
336 | struct qspinlock; | ||
337 | |||
336 | struct pv_lock_ops { | 338 | struct pv_lock_ops { |
339 | #ifdef CONFIG_QUEUED_SPINLOCK | ||
340 | void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); | ||
341 | struct paravirt_callee_save queued_spin_unlock; | ||
342 | |||
343 | void (*wait)(u8 *ptr, u8 val); | ||
344 | void (*kick)(int cpu); | ||
345 | #else /* !CONFIG_QUEUED_SPINLOCK */ | ||
337 | struct paravirt_callee_save lock_spinning; | 346 | struct paravirt_callee_save lock_spinning; |
338 | void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); | 347 | void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); |
348 | #endif /* !CONFIG_QUEUED_SPINLOCK */ | ||
339 | }; | 349 | }; |
340 | 350 | ||
341 | /* This contains all the paravirt structures: we get a convenient | 351 | /* This contains all the paravirt structures: we get a convenient |
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index f079b7020e3f..9d51fae1cba3 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/cpufeature.h> | 4 | #include <asm/cpufeature.h> |
5 | #include <asm-generic/qspinlock_types.h> | 5 | #include <asm-generic/qspinlock_types.h> |
6 | #include <asm/paravirt.h> | ||
6 | 7 | ||
7 | #define queued_spin_unlock queued_spin_unlock | 8 | #define queued_spin_unlock queued_spin_unlock |
8 | /** | 9 | /** |
@@ -11,11 +12,33 @@ | |||
11 | * | 12 | * |
12 | * A smp_store_release() on the least-significant byte. | 13 | * A smp_store_release() on the least-significant byte. |
13 | */ | 14 | */ |
14 | static inline void queued_spin_unlock(struct qspinlock *lock) | 15 | static inline void native_queued_spin_unlock(struct qspinlock *lock) |
15 | { | 16 | { |
16 | smp_store_release((u8 *)lock, 0); | 17 | smp_store_release((u8 *)lock, 0); |
17 | } | 18 | } |
18 | 19 | ||
20 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
21 | extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | ||
22 | extern void __pv_init_lock_hash(void); | ||
23 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | ||
24 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); | ||
25 | |||
26 | static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | ||
27 | { | ||
28 | pv_queued_spin_lock_slowpath(lock, val); | ||
29 | } | ||
30 | |||
31 | static inline void queued_spin_unlock(struct qspinlock *lock) | ||
32 | { | ||
33 | pv_queued_spin_unlock(lock); | ||
34 | } | ||
35 | #else | ||
36 | static inline void queued_spin_unlock(struct qspinlock *lock) | ||
37 | { | ||
38 | native_queued_spin_unlock(lock); | ||
39 | } | ||
40 | #endif | ||
41 | |||
19 | #define virt_queued_spin_lock virt_queued_spin_lock | 42 | #define virt_queued_spin_lock virt_queued_spin_lock |
20 | 43 | ||
21 | static inline bool virt_queued_spin_lock(struct qspinlock *lock) | 44 | static inline bool virt_queued_spin_lock(struct qspinlock *lock) |
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..b002e711ba88 --- /dev/null +++ b/arch/x86/include/asm/qspinlock_paravirt.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_QSPINLOCK_PARAVIRT_H | ||
2 | #define __ASM_QSPINLOCK_PARAVIRT_H | ||
3 | |||
4 | PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock); | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index bbb6c7316341..a33f1eb15003 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
@@ -8,11 +8,33 @@ | |||
8 | 8 | ||
9 | #include <asm/paravirt.h> | 9 | #include <asm/paravirt.h> |
10 | 10 | ||
11 | #ifdef CONFIG_QUEUED_SPINLOCK | ||
12 | __visible void __native_queued_spin_unlock(struct qspinlock *lock) | ||
13 | { | ||
14 | native_queued_spin_unlock(lock); | ||
15 | } | ||
16 | |||
17 | PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); | ||
18 | |||
19 | bool pv_is_native_spin_unlock(void) | ||
20 | { | ||
21 | return pv_lock_ops.queued_spin_unlock.func == | ||
22 | __raw_callee_save___native_queued_spin_unlock; | ||
23 | } | ||
24 | #endif | ||
25 | |||
11 | struct pv_lock_ops pv_lock_ops = { | 26 | struct pv_lock_ops pv_lock_ops = { |
12 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
28 | #ifdef CONFIG_QUEUED_SPINLOCK | ||
29 | .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, | ||
30 | .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), | ||
31 | .wait = paravirt_nop, | ||
32 | .kick = paravirt_nop, | ||
33 | #else /* !CONFIG_QUEUED_SPINLOCK */ | ||
13 | .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), | 34 | .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), |
14 | .unlock_kick = paravirt_nop, | 35 | .unlock_kick = paravirt_nop, |
15 | #endif | 36 | #endif /* !CONFIG_QUEUED_SPINLOCK */ |
37 | #endif /* SMP */ | ||
16 | }; | 38 | }; |
17 | EXPORT_SYMBOL(pv_lock_ops); | 39 | EXPORT_SYMBOL(pv_lock_ops); |
18 | 40 | ||
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index d9f32e6d6ab6..e1b013696dde 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c | |||
@@ -12,6 +12,10 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); | |||
12 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); | 12 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); |
13 | DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); | 13 | DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); |
14 | 14 | ||
15 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | ||
16 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); | ||
17 | #endif | ||
18 | |||
15 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) | 19 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
16 | { | 20 | { |
17 | /* arg in %eax, return in %eax */ | 21 | /* arg in %eax, return in %eax */ |
@@ -24,6 +28,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) | |||
24 | return 0; | 28 | return 0; |
25 | } | 29 | } |
26 | 30 | ||
31 | extern bool pv_is_native_spin_unlock(void); | ||
32 | |||
27 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | 33 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
28 | unsigned long addr, unsigned len) | 34 | unsigned long addr, unsigned len) |
29 | { | 35 | { |
@@ -47,14 +53,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
47 | PATCH_SITE(pv_mmu_ops, write_cr3); | 53 | PATCH_SITE(pv_mmu_ops, write_cr3); |
48 | PATCH_SITE(pv_cpu_ops, clts); | 54 | PATCH_SITE(pv_cpu_ops, clts); |
49 | PATCH_SITE(pv_cpu_ops, read_tsc); | 55 | PATCH_SITE(pv_cpu_ops, read_tsc); |
50 | 56 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | |
51 | patch_site: | 57 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
52 | ret = paravirt_patch_insns(ibuf, len, start, end); | 58 | if (pv_is_native_spin_unlock()) { |
53 | break; | 59 | start = start_pv_lock_ops_queued_spin_unlock; |
60 | end = end_pv_lock_ops_queued_spin_unlock; | ||
61 | goto patch_site; | ||
62 | } | ||
63 | #endif | ||
54 | 64 | ||
55 | default: | 65 | default: |
56 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); | 66 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
57 | break; | 67 | break; |
68 | |||
69 | patch_site: | ||
70 | ret = paravirt_patch_insns(ibuf, len, start, end); | ||
71 | break; | ||
58 | } | 72 | } |
59 | #undef PATCH_SITE | 73 | #undef PATCH_SITE |
60 | return ret; | 74 | return ret; |
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index a1da6737ba5b..e0fb41c8255b 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c | |||
@@ -21,6 +21,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); | |||
21 | DEF_NATIVE(, mov32, "mov %edi, %eax"); | 21 | DEF_NATIVE(, mov32, "mov %edi, %eax"); |
22 | DEF_NATIVE(, mov64, "mov %rdi, %rax"); | 22 | DEF_NATIVE(, mov64, "mov %rdi, %rax"); |
23 | 23 | ||
24 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK) | ||
25 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); | ||
26 | #endif | ||
27 | |||
24 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) | 28 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
25 | { | 29 | { |
26 | return paravirt_patch_insns(insnbuf, len, | 30 | return paravirt_patch_insns(insnbuf, len, |
@@ -33,6 +37,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) | |||
33 | start__mov64, end__mov64); | 37 | start__mov64, end__mov64); |
34 | } | 38 | } |
35 | 39 | ||
40 | extern bool pv_is_native_spin_unlock(void); | ||
41 | |||
36 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | 42 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
37 | unsigned long addr, unsigned len) | 43 | unsigned long addr, unsigned len) |
38 | { | 44 | { |
@@ -59,14 +65,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
59 | PATCH_SITE(pv_cpu_ops, clts); | 65 | PATCH_SITE(pv_cpu_ops, clts); |
60 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); | 66 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); |
61 | PATCH_SITE(pv_cpu_ops, wbinvd); | 67 | PATCH_SITE(pv_cpu_ops, wbinvd); |
62 | 68 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK) | |
63 | patch_site: | 69 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
64 | ret = paravirt_patch_insns(ibuf, len, start, end); | 70 | if (pv_is_native_spin_unlock()) { |
65 | break; | 71 | start = start_pv_lock_ops_queued_spin_unlock; |
72 | end = end_pv_lock_ops_queued_spin_unlock; | ||
73 | goto patch_site; | ||
74 | } | ||
75 | #endif | ||
66 | 76 | ||
67 | default: | 77 | default: |
68 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); | 78 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
69 | break; | 79 | break; |
80 | |||
81 | patch_site: | ||
82 | ret = paravirt_patch_insns(ibuf, len, start, end); | ||
83 | break; | ||
70 | } | 84 | } |
71 | #undef PATCH_SITE | 85 | #undef PATCH_SITE |
72 | return ret; | 86 | return ret; |