diff options
author | Tony Luck <tony.luck@intel.com> | 2009-10-23 20:58:54 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2009-10-23 20:58:54 -0400 |
commit | 48fade6c5a96d29cd3d0e016557a3fd835fd4049 (patch) | |
tree | 671554b43c2deed301dfe1a93acaab4766cf6fef /arch/ia64 | |
parent | b94b08081fcecf83fa690d6c5664f6316fe72208 (diff) | |
parent | 1502f08edc040b6ba4b986454416564088995e79 (diff) |
Pull ticket4byte into release branch
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 62 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock_types.h | 2 |
2 files changed, 42 insertions, 22 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 30bb930e111..239ecdc9516 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -25,61 +25,82 @@ | |||
25 | * by atomically noting the tail and incrementing it by one (thus adding | 25 | * by atomically noting the tail and incrementing it by one (thus adding |
26 | * ourself to the queue and noting our position), then waiting until the head | 26 | * ourself to the queue and noting our position), then waiting until the head |
27 | * becomes equal to the the initial value of the tail. | 27 | * becomes equal to the the initial value of the tail. |
28 | * The pad bits in the middle are used to prevent the next_ticket number | ||
29 | * overflowing into the now_serving number. | ||
28 | * | 30 | * |
29 | * 63 32 31 0 | 31 | * 31 17 16 15 14 0 |
30 | * +----------------------------------------------------+ | 32 | * +----------------------------------------------------+ |
31 | * | next_ticket_number | now_serving | | 33 | * | now_serving | padding | next_ticket | |
32 | * +----------------------------------------------------+ | 34 | * +----------------------------------------------------+ |
33 | */ | 35 | */ |
34 | 36 | ||
35 | #define TICKET_SHIFT 32 | 37 | #define TICKET_SHIFT 17 |
38 | #define TICKET_BITS 15 | ||
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | ||
36 | 40 | ||
37 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
38 | { | 42 | { |
39 | int *p = (int *)&lock->lock, turn, now_serving; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
40 | 44 | ||
41 | now_serving = *p; | 45 | ticket = ia64_fetchadd(1, p, acq); |
42 | turn = ia64_fetchadd(1, p+1, acq); | ||
43 | 46 | ||
44 | if (turn == now_serving) | 47 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) |
45 | return; | 48 | return; |
46 | 49 | ||
47 | do { | 50 | ia64_invala(); |
51 | |||
52 | for (;;) { | ||
53 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); | ||
54 | |||
55 | if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
56 | return; | ||
48 | cpu_relax(); | 57 | cpu_relax(); |
49 | } while (ACCESS_ONCE(*p) != turn); | 58 | } |
50 | } | 59 | } |
51 | 60 | ||
52 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
53 | { | 62 | { |
54 | long tmp = ACCESS_ONCE(lock->lock), try; | 63 | int tmp = ACCESS_ONCE(lock->lock); |
55 | |||
56 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { | ||
57 | try = tmp + (1L << TICKET_SHIFT); | ||
58 | 64 | ||
59 | return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; | 65 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) |
60 | } | 66 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; |
61 | return 0; | 67 | return 0; |
62 | } | 68 | } |
63 | 69 | ||
64 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
65 | { | 71 | { |
66 | int *p = (int *)&lock->lock; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
67 | 73 | ||
68 | (void)ia64_fetchadd(1, p, rel); | 74 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); |
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | ||
76 | } | ||
77 | |||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | ||
79 | { | ||
80 | int *p = (int *)&lock->lock, ticket; | ||
81 | |||
82 | ia64_invala(); | ||
83 | |||
84 | for (;;) { | ||
85 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory"); | ||
86 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
87 | return; | ||
88 | cpu_relax(); | ||
89 | } | ||
69 | } | 90 | } |
70 | 91 | ||
71 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
72 | { | 93 | { |
73 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
74 | 95 | ||
75 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
76 | } | 97 | } |
77 | 98 | ||
78 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
79 | { | 100 | { |
80 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
81 | 102 | ||
82 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
83 | } | 104 | } |
84 | 105 | ||
85 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | |||
116 | 137 | ||
117 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
118 | { | 139 | { |
119 | while (__raw_spin_is_locked(lock)) | 140 | __ticket_spin_unlock_wait(lock); |
120 | cpu_relax(); | ||
121 | } | 141 | } |
122 | 142 | ||
123 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index b61d136d9bc..474e46f1ab4 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned long lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } |