aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/rwlock.h
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@novell.com>2011-07-19 08:00:45 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-21 03:03:36 -0400
commita750036f35cda160ef77408ec92c3dc41f8feebb (patch)
tree1198013e1289dfb9b5a299388ee09515642c4030 /arch/x86/include/asm/rwlock.h
parenta738669464a1e0d8e7b20f631120192f9cf7cfbd (diff)
x86: Fix write lock scalability 64-bit issue
With the write lock path simply subtracting RW_LOCK_BIAS there is, on large systems, the theoretical possibility of overflowing the 32-bit value that was used so far (namely if 128 or more CPUs manage to do the subtraction, but don't get to do the inverse addition in the failure path quickly enough). A first measure is to modify RW_LOCK_BIAS itself - with the new value chosen, it is good for up to 2048 CPUs each allowed to nest over 2048 times on the read path without causing an issue. Quite possibly it would even be sufficient to adjust the bias a little further, assuming that allowing for significantly less nesting would suffice. However, as the original value chosen allowed for even more nesting levels, to support more than 2048 CPUs (possible currently only for 64-bit kernels) the lock itself gets widened to 64 bits. Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/4E258E0D020000780004E3F0@nat28.tlf.novell.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/rwlock.h')
-rw-r--r--arch/x86/include/asm/rwlock.h43
1 files changed, 42 insertions, 1 deletions
diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h
index 6a8c0d645108..a5370a03d90c 100644
--- a/arch/x86/include/asm/rwlock.h
+++ b/arch/x86/include/asm/rwlock.h
@@ -1,7 +1,48 @@
1#ifndef _ASM_X86_RWLOCK_H 1#ifndef _ASM_X86_RWLOCK_H
2#define _ASM_X86_RWLOCK_H 2#define _ASM_X86_RWLOCK_H
3 3
4#define RW_LOCK_BIAS 0x01000000 4#include <asm/asm.h>
5
6#if CONFIG_NR_CPUS <= 2048
7
8#ifndef __ASSEMBLY__
9typedef union {
10 s32 lock;
11 s32 write;
12} arch_rwlock_t;
13#endif
14
15#define RW_LOCK_BIAS 0x00100000
16#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l)
17#define READ_LOCK_ATOMIC(n) atomic_##n
18#define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n)
19#define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n)
20#define WRITE_LOCK_CMP RW_LOCK_BIAS
21
22#else /* CONFIG_NR_CPUS > 2048 */
23
24#include <linux/const.h>
25
26#ifndef __ASSEMBLY__
27typedef union {
28 s64 lock;
29 struct {
30 u32 read;
31 s32 write;
32 };
33} arch_rwlock_t;
34#endif
35
36#define RW_LOCK_BIAS (_AC(1,L) << 32)
37#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q)
38#define READ_LOCK_ATOMIC(n) atomic64_##n
39#define WRITE_LOCK_ADD(n) __ASM_FORM(incl)
40#define WRITE_LOCK_SUB(n) __ASM_FORM(decl)
41#define WRITE_LOCK_CMP 1
42
43#endif /* CONFIG_NR_CPUS */
44
45#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
5 46
6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ 47/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
7 48