diff options
author | Tony Luck <tony.luck@intel.com> | 2009-10-07 13:54:19 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2009-10-07 13:54:19 -0400 |
commit | 9d40ee200a527ce08ab8c793ba8ae3e242edbb0e (patch) | |
tree | 189b7dc9cde0ceb791bb3bdd1a1d9a22aee00336 /arch/ia64 | |
parent | 0eca52a92735f43462165efe00a7e394345fb38e (diff) |
[IA64] Squeeze ticket locks back into 4 bytes.
Linus pointed out that other people have spent large amounts of time
and effort to optimize the layout of frequently used structures. Often
these have embedded locks, and the assumption is that a lock takes
4 bytes. Linus also pointed out how to work with the limited options
for atomic instructions on Itanium.
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 45 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock_types.h | 2 |
2 files changed, 27 insertions, 20 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 30bb930e1111..4fa502739d64 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -25,61 +25,68 @@ | |||
25 | * by atomically noting the tail and incrementing it by one (thus adding | 25 | * by atomically noting the tail and incrementing it by one (thus adding |
26 | * ourself to the queue and noting our position), then waiting until the head | 26 | * ourself to the queue and noting our position), then waiting until the head |
27 | * becomes equal to the the initial value of the tail. | 27 | * becomes equal to the the initial value of the tail. |
28 | * The pad bits in the middle are used to prevent the next_ticket number | ||
29 | * overflowing into the now_serving number. | ||
28 | * | 30 | * |
29 | * 63 32 31 0 | 31 | * 31 17 16 15 14 0 |
30 | * +----------------------------------------------------+ | 32 | * +----------------------------------------------------+ |
31 | * | next_ticket_number | now_serving | | 33 | * | now_serving | padding | next_ticket | |
32 | * +----------------------------------------------------+ | 34 | * +----------------------------------------------------+ |
33 | */ | 35 | */ |
34 | 36 | ||
35 | #define TICKET_SHIFT 32 | 37 | #define TICKET_SHIFT 17 |
38 | #define TICKET_BITS 15 | ||
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | ||
36 | 40 | ||
37 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
38 | { | 42 | { |
39 | int *p = (int *)&lock->lock, turn, now_serving; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
40 | 44 | ||
41 | now_serving = *p; | 45 | ticket = ia64_fetchadd(1, p, acq); |
42 | turn = ia64_fetchadd(1, p+1, acq); | ||
43 | 46 | ||
44 | if (turn == now_serving) | 47 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) |
45 | return; | 48 | return; |
46 | 49 | ||
47 | do { | 50 | ia64_invala(); |
51 | |||
52 | for (;;) { | ||
53 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); | ||
54 | |||
55 | if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
56 | return; | ||
48 | cpu_relax(); | 57 | cpu_relax(); |
49 | } while (ACCESS_ONCE(*p) != turn); | 58 | } |
50 | } | 59 | } |
51 | 60 | ||
52 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
53 | { | 62 | { |
54 | long tmp = ACCESS_ONCE(lock->lock), try; | 63 | int tmp = ACCESS_ONCE(lock->lock); |
55 | 64 | ||
56 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { | 65 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) |
57 | try = tmp + (1L << TICKET_SHIFT); | 66 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; |
58 | |||
59 | return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; | ||
60 | } | ||
61 | return 0; | 67 | return 0; |
62 | } | 68 | } |
63 | 69 | ||
64 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
65 | { | 71 | { |
66 | int *p = (int *)&lock->lock; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
67 | 73 | ||
68 | (void)ia64_fetchadd(1, p, rel); | 74 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); |
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | ||
69 | } | 76 | } |
70 | 77 | ||
71 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 78 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
72 | { | 79 | { |
73 | long tmp = ACCESS_ONCE(lock->lock); | 80 | long tmp = ACCESS_ONCE(lock->lock); |
74 | 81 | ||
75 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); | 82 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
76 | } | 83 | } |
77 | 84 | ||
78 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 85 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
79 | { | 86 | { |
80 | long tmp = ACCESS_ONCE(lock->lock); | 87 | long tmp = ACCESS_ONCE(lock->lock); |
81 | 88 | ||
82 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; | 89 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
83 | } | 90 | } |
84 | 91 | ||
85 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index b61d136d9bc2..474e46f1ab4a 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned long lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } |