aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-05-11 03:47:23 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-11 03:52:09 -0400
commit62c7a1e9ae54ef66658df9614bdbc09cbbdaa6f0 (patch)
treebdbb852b6302f589e357f9c86966fc0865b8ab49
parent52c9d2badd1ae4d11c29de57d4e964e48afd3cb4 (diff)
locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS
Valentin Rothberg reported that we use CONFIG_QUEUED_SPINLOCKS in arch/x86/kernel/paravirt_patch_32.c, while the symbol is called CONFIG_QUEUED_SPINLOCK. (Note the extra 'S') But the typo was natural: the proper English term for such a generic object would be 'queued spinlocks' - so rename this and related symbols accordingly to the plural form. Reported-by: Valentin Rothberg <valentinrothberg@gmail.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hp.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/paravirt_types.h6
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/include/asm/spinlock_types.h4
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c8
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c4
-rw-r--r--arch/x86/xen/spinlock.c10
-rw-r--r--kernel/Kconfig.locks6
-rw-r--r--kernel/locking/Makefile2
11 files changed, 32 insertions, 32 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 50ec043a920d..f8dc6abbe6ae 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -127,7 +127,7 @@ config X86
127 select MODULES_USE_ELF_RELA if X86_64 127 select MODULES_USE_ELF_RELA if X86_64
128 select CLONE_BACKWARDS if X86_32 128 select CLONE_BACKWARDS if X86_32
129 select ARCH_USE_BUILTIN_BSWAP 129 select ARCH_USE_BUILTIN_BSWAP
130 select ARCH_USE_QUEUED_SPINLOCK 130 select ARCH_USE_QUEUED_SPINLOCKS
131 select ARCH_USE_QUEUE_RWLOCK 131 select ARCH_USE_QUEUE_RWLOCK
132 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION 132 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
133 select OLD_SIGACTION if X86_32 133 select OLD_SIGACTION if X86_32
@@ -667,7 +667,7 @@ config PARAVIRT_DEBUG
667config PARAVIRT_SPINLOCKS 667config PARAVIRT_SPINLOCKS
668 bool "Paravirtualization layer for spinlocks" 668 bool "Paravirtualization layer for spinlocks"
669 depends on PARAVIRT && SMP 669 depends on PARAVIRT && SMP
670 select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCK 670 select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
671 ---help--- 671 ---help---
672 Paravirtualized spinlocks allow a pvops backend to replace the 672 Paravirtualized spinlocks allow a pvops backend to replace the
673 spinlock implementation with something virtualization-friendly 673 spinlock implementation with something virtualization-friendly
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 266c35381b62..d143bfad45d7 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,7 +712,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
712 712
713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
714 714
715#ifdef CONFIG_QUEUED_SPINLOCK 715#ifdef CONFIG_QUEUED_SPINLOCKS
716 716
717static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, 717static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
718 u32 val) 718 u32 val)
@@ -735,7 +735,7 @@ static __always_inline void pv_kick(int cpu)
735 PVOP_VCALL1(pv_lock_ops.kick, cpu); 735 PVOP_VCALL1(pv_lock_ops.kick, cpu);
736} 736}
737 737
738#else /* !CONFIG_QUEUED_SPINLOCK */ 738#else /* !CONFIG_QUEUED_SPINLOCKS */
739 739
740static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, 740static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
741 __ticket_t ticket) 741 __ticket_t ticket)
@@ -749,7 +749,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
749 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); 749 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
750} 750}
751 751
752#endif /* CONFIG_QUEUED_SPINLOCK */ 752#endif /* CONFIG_QUEUED_SPINLOCKS */
753 753
754#endif /* SMP && PARAVIRT_SPINLOCKS */ 754#endif /* SMP && PARAVIRT_SPINLOCKS */
755 755
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 76cd68426af8..8766c7c395c2 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -336,16 +336,16 @@ typedef u16 __ticket_t;
336struct qspinlock; 336struct qspinlock;
337 337
338struct pv_lock_ops { 338struct pv_lock_ops {
339#ifdef CONFIG_QUEUED_SPINLOCK 339#ifdef CONFIG_QUEUED_SPINLOCKS
340 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); 340 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
341 struct paravirt_callee_save queued_spin_unlock; 341 struct paravirt_callee_save queued_spin_unlock;
342 342
343 void (*wait)(u8 *ptr, u8 val); 343 void (*wait)(u8 *ptr, u8 val);
344 void (*kick)(int cpu); 344 void (*kick)(int cpu);
345#else /* !CONFIG_QUEUED_SPINLOCK */ 345#else /* !CONFIG_QUEUED_SPINLOCKS */
346 struct paravirt_callee_save lock_spinning; 346 struct paravirt_callee_save lock_spinning;
347 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); 347 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
348#endif /* !CONFIG_QUEUED_SPINLOCK */ 348#endif /* !CONFIG_QUEUED_SPINLOCKS */
349}; 349};
350 350
351/* This contains all the paravirt structures: we get a convenient 351/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4ec5413156ca..be0a05913b91 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -42,7 +42,7 @@
42extern struct static_key paravirt_ticketlocks_enabled; 42extern struct static_key paravirt_ticketlocks_enabled;
43static __always_inline bool static_key_false(struct static_key *key); 43static __always_inline bool static_key_false(struct static_key *key);
44 44
45#ifdef CONFIG_QUEUED_SPINLOCK 45#ifdef CONFIG_QUEUED_SPINLOCKS
46#include <asm/qspinlock.h> 46#include <asm/qspinlock.h>
47#else 47#else
48 48
@@ -200,7 +200,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
200 cpu_relax(); 200 cpu_relax();
201 } 201 }
202} 202}
203#endif /* CONFIG_QUEUED_SPINLOCK */ 203#endif /* CONFIG_QUEUED_SPINLOCKS */
204 204
205/* 205/*
206 * Read-write spinlocks, allowing multiple readers 206 * Read-write spinlocks, allowing multiple readers
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 5df1f1b9a4b0..65c3e37f879a 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -23,7 +23,7 @@ typedef u32 __ticketpair_t;
23 23
24#define TICKET_SHIFT (sizeof(__ticket_t) * 8) 24#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
25 25
26#ifdef CONFIG_QUEUED_SPINLOCK 26#ifdef CONFIG_QUEUED_SPINLOCKS
27#include <asm-generic/qspinlock_types.h> 27#include <asm-generic/qspinlock_types.h>
28#else 28#else
29typedef struct arch_spinlock { 29typedef struct arch_spinlock {
@@ -36,7 +36,7 @@ typedef struct arch_spinlock {
36} arch_spinlock_t; 36} arch_spinlock_t;
37 37
38#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 38#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
39#endif /* CONFIG_QUEUED_SPINLOCK */ 39#endif /* CONFIG_QUEUED_SPINLOCKS */
40 40
41#include <asm-generic/qrwlock_types.h> 41#include <asm-generic/qrwlock_types.h>
42 42
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 6c21d931bd24..1681504e44a4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -585,7 +585,7 @@ static void kvm_kick_cpu(int cpu)
585} 585}
586 586
587 587
588#ifdef CONFIG_QUEUED_SPINLOCK 588#ifdef CONFIG_QUEUED_SPINLOCKS
589 589
590#include <asm/qspinlock.h> 590#include <asm/qspinlock.h>
591 591
@@ -615,7 +615,7 @@ out:
615 local_irq_restore(flags); 615 local_irq_restore(flags);
616} 616}
617 617
618#else /* !CONFIG_QUEUED_SPINLOCK */ 618#else /* !CONFIG_QUEUED_SPINLOCKS */
619 619
620enum kvm_contention_stat { 620enum kvm_contention_stat {
621 TAKEN_SLOW, 621 TAKEN_SLOW,
@@ -850,7 +850,7 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
850 } 850 }
851} 851}
852 852
853#endif /* !CONFIG_QUEUED_SPINLOCK */ 853#endif /* !CONFIG_QUEUED_SPINLOCKS */
854 854
855/* 855/*
856 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 856 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -863,13 +863,13 @@ void __init kvm_spinlock_init(void)
863 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 863 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
864 return; 864 return;
865 865
866#ifdef CONFIG_QUEUED_SPINLOCK 866#ifdef CONFIG_QUEUED_SPINLOCKS
867 __pv_init_lock_hash(); 867 __pv_init_lock_hash();
868 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 868 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
869 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 869 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
870 pv_lock_ops.wait = kvm_wait; 870 pv_lock_ops.wait = kvm_wait;
871 pv_lock_ops.kick = kvm_kick_cpu; 871 pv_lock_ops.kick = kvm_kick_cpu;
872#else /* !CONFIG_QUEUED_SPINLOCK */ 872#else /* !CONFIG_QUEUED_SPINLOCKS */
873 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 873 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
874 pv_lock_ops.unlock_kick = kvm_unlock_kick; 874 pv_lock_ops.unlock_kick = kvm_unlock_kick;
875#endif 875#endif
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index a33f1eb15003..33ee3e0efd65 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,7 +8,7 @@
8 8
9#include <asm/paravirt.h> 9#include <asm/paravirt.h>
10 10
11#ifdef CONFIG_QUEUED_SPINLOCK 11#ifdef CONFIG_QUEUED_SPINLOCKS
12__visible void __native_queued_spin_unlock(struct qspinlock *lock) 12__visible void __native_queued_spin_unlock(struct qspinlock *lock)
13{ 13{
14 native_queued_spin_unlock(lock); 14 native_queued_spin_unlock(lock);
@@ -25,15 +25,15 @@ bool pv_is_native_spin_unlock(void)
25 25
26struct pv_lock_ops pv_lock_ops = { 26struct pv_lock_ops pv_lock_ops = {
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28#ifdef CONFIG_QUEUED_SPINLOCK 28#ifdef CONFIG_QUEUED_SPINLOCKS
29 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, 29 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
30 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), 30 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
31 .wait = paravirt_nop, 31 .wait = paravirt_nop,
32 .kick = paravirt_nop, 32 .kick = paravirt_nop,
33#else /* !CONFIG_QUEUED_SPINLOCK */ 33#else /* !CONFIG_QUEUED_SPINLOCKS */
34 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), 34 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
35 .unlock_kick = paravirt_nop, 35 .unlock_kick = paravirt_nop,
36#endif /* !CONFIG_QUEUED_SPINLOCK */ 36#endif /* !CONFIG_QUEUED_SPINLOCKS */
37#endif /* SMP */ 37#endif /* SMP */
38}; 38};
39EXPORT_SYMBOL(pv_lock_ops); 39EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index e0fb41c8255b..a1fa86782186 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -21,7 +21,7 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
21DEF_NATIVE(, mov32, "mov %edi, %eax"); 21DEF_NATIVE(, mov32, "mov %edi, %eax");
22DEF_NATIVE(, mov64, "mov %rdi, %rax"); 22DEF_NATIVE(, mov64, "mov %rdi, %rax");
23 23
24#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK) 24#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
25DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); 25DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
26#endif 26#endif
27 27
@@ -65,7 +65,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
65 PATCH_SITE(pv_cpu_ops, clts); 65 PATCH_SITE(pv_cpu_ops, clts);
66 PATCH_SITE(pv_mmu_ops, flush_tlb_single); 66 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
67 PATCH_SITE(pv_cpu_ops, wbinvd); 67 PATCH_SITE(pv_cpu_ops, wbinvd);
68#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK) 68#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
69 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): 69 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
70 if (pv_is_native_spin_unlock()) { 70 if (pv_is_native_spin_unlock()) {
71 start = start_pv_lock_ops_queued_spin_unlock; 71 start = start_pv_lock_ops_queued_spin_unlock;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index af907a90fb19..9e2ba5c6e1dd 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21static DEFINE_PER_CPU(char *, irq_name); 21static DEFINE_PER_CPU(char *, irq_name);
22static bool xen_pvspin = true; 22static bool xen_pvspin = true;
23 23
24#ifdef CONFIG_QUEUED_SPINLOCK 24#ifdef CONFIG_QUEUED_SPINLOCKS
25 25
26#include <asm/qspinlock.h> 26#include <asm/qspinlock.h>
27 27
@@ -65,7 +65,7 @@ static void xen_qlock_wait(u8 *byte, u8 val)
65 xen_poll_irq(irq); 65 xen_poll_irq(irq);
66} 66}
67 67
68#else /* CONFIG_QUEUED_SPINLOCK */ 68#else /* CONFIG_QUEUED_SPINLOCKS */
69 69
70enum xen_contention_stat { 70enum xen_contention_stat {
71 TAKEN_SLOW, 71 TAKEN_SLOW,
@@ -264,7 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
264 } 264 }
265 } 265 }
266} 266}
267#endif /* CONFIG_QUEUED_SPINLOCK */ 267#endif /* CONFIG_QUEUED_SPINLOCKS */
268 268
269static irqreturn_t dummy_handler(int irq, void *dev_id) 269static irqreturn_t dummy_handler(int irq, void *dev_id)
270{ 270{
@@ -328,7 +328,7 @@ void __init xen_init_spinlocks(void)
328 return; 328 return;
329 } 329 }
330 printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); 330 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
331#ifdef CONFIG_QUEUED_SPINLOCK 331#ifdef CONFIG_QUEUED_SPINLOCKS
332 __pv_init_lock_hash(); 332 __pv_init_lock_hash();
333 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 333 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
334 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 334 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
@@ -366,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg)
366} 366}
367early_param("xen_nopvspin", xen_parse_nopvspin); 367early_param("xen_nopvspin", xen_parse_nopvspin);
368 368
369#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK) 369#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
370 370
371static struct dentry *d_spin_debug; 371static struct dentry *d_spin_debug;
372 372
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 95dd7587ec34..65d755b6a663 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -235,11 +235,11 @@ config LOCK_SPIN_ON_OWNER
235 def_bool y 235 def_bool y
236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER 236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
237 237
238config ARCH_USE_QUEUED_SPINLOCK 238config ARCH_USE_QUEUED_SPINLOCKS
239 bool 239 bool
240 240
241config QUEUED_SPINLOCK 241config QUEUED_SPINLOCKS
242 def_bool y if ARCH_USE_QUEUED_SPINLOCK 242 def_bool y if ARCH_USE_QUEUED_SPINLOCKS
243 depends on SMP 243 depends on SMP
244 244
245config ARCH_USE_QUEUE_RWLOCK 245config ARCH_USE_QUEUE_RWLOCK
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index abfcef3c1ef9..132aff9d3fbe 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_SMP) += spinlock.o
17obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o 17obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
18obj-$(CONFIG_SMP) += lglock.o 18obj-$(CONFIG_SMP) += lglock.o
19obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 19obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
20obj-$(CONFIG_QUEUED_SPINLOCK) += qspinlock.o 20obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
21obj-$(CONFIG_RT_MUTEXES) += rtmutex.o 21obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
22obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o 22obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
23obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o 23obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o