diff options
| author | Matthew Wilcox <willy@parisc-linux.org> | 2005-10-21 22:41:25 -0400 |
|---|---|---|
| committer | Kyle McMartin <kyle@parisc-linux.org> | 2005-10-21 22:41:25 -0400 |
| commit | 14e256c107304367eff401d20f2ab9fa72e33136 (patch) | |
| tree | beff45e23977abbb89aa3cc4851ad4f98b283bff /include/asm-parisc | |
| parent | 04d472dc83388c59deb6241e9aed841926aa1c8c (diff) | |
[PARISC] Update spinlocks from parisc tree
Neaten up the CONFIG_PA20 ifdefs
More merge fixes, this time for SMP
Signed-off-by: Matthew Wilcox <willy@parisc-linux.org>
Prettify the CONFIG_DEBUG_SPINLOCK __SPIN_LOCK_UNLOCKED initializers.
Clean up some warnings with CONFIG_DEBUG_SPINLOCK enabled.
Fix build with spinlock debugging turned on. Patch is cleaner like this,
too.
Remove mandatory 16-byte alignment requirement on PA2.0 processors by
using the ldcw,CO completer. Provides a nice insn savings.
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'include/asm-parisc')
| -rw-r--r-- | include/asm-parisc/spinlock.h | 5 | ||||
| -rw-r--r-- | include/asm-parisc/spinlock_types.h | 8 | ||||
| -rw-r--r-- | include/asm-parisc/system.h | 31 |
3 files changed, 30 insertions, 14 deletions
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 43eaa6e742e0..7c3f406a746a 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
| @@ -5,11 +5,6 @@ | |||
| 5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
| 6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
| 7 | 7 | ||
| 8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | ||
| 9 | * since it only has load-and-zero. Moreover, at least on some PA processors, | ||
| 10 | * the semaphore address has to be 16-byte aligned. | ||
| 11 | */ | ||
| 12 | |||
| 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
| 14 | { | 9 | { |
| 15 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h index 785bba822fbf..d6b479bdb886 100644 --- a/include/asm-parisc/spinlock_types.h +++ b/include/asm-parisc/spinlock_types.h | |||
| @@ -6,11 +6,15 @@ | |||
| 6 | #endif | 6 | #endif |
| 7 | 7 | ||
| 8 | typedef struct { | 8 | typedef struct { |
| 9 | #ifdef CONFIG_PA20 | ||
| 10 | volatile unsigned int slock; | ||
| 11 | # define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 12 | #else | ||
| 9 | volatile unsigned int lock[4]; | 13 | volatile unsigned int lock[4]; |
| 14 | # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
| 15 | #endif | ||
| 10 | } raw_spinlock_t; | 16 | } raw_spinlock_t; |
| 11 | 17 | ||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
| 13 | |||
| 14 | typedef struct { | 18 | typedef struct { |
| 15 | raw_spinlock_t lock; | 19 | raw_spinlock_t lock; |
| 16 | volatile int counter; | 20 | volatile int counter; |
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index a25e9dcd2e79..f3928d3a80cb 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
| @@ -138,13 +138,7 @@ static inline void set_eiem(unsigned long val) | |||
| 138 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | 138 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) |
| 139 | 139 | ||
| 140 | 140 | ||
| 141 | /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ | 141 | #ifndef CONFIG_PA20 |
| 142 | #define __ldcw(a) ({ \ | ||
| 143 | unsigned __ret; \ | ||
| 144 | __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \ | ||
| 145 | __ret; \ | ||
| 146 | }) | ||
| 147 | |||
| 148 | /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, | 142 | /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, |
| 149 | and GCC only guarantees 8-byte alignment for stack locals, we can't | 143 | and GCC only guarantees 8-byte alignment for stack locals, we can't |
| 150 | be assured of 16-byte alignment for atomic lock data even if we | 144 | be assured of 16-byte alignment for atomic lock data even if we |
| @@ -152,12 +146,35 @@ static inline void set_eiem(unsigned long val) | |||
| 152 | we use a struct containing an array of four ints for the atomic lock | 146 | we use a struct containing an array of four ints for the atomic lock |
| 153 | type and dynamically select the 16-byte aligned int from the array | 147 | type and dynamically select the 16-byte aligned int from the array |
| 154 | for the semaphore. */ | 148 | for the semaphore. */ |
| 149 | |||
| 155 | #define __PA_LDCW_ALIGNMENT 16 | 150 | #define __PA_LDCW_ALIGNMENT 16 |
| 156 | #define __ldcw_align(a) ({ \ | 151 | #define __ldcw_align(a) ({ \ |
| 157 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ | 152 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ |
| 158 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ | 153 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ |
| 159 | (volatile unsigned int *) __ret; \ | 154 | (volatile unsigned int *) __ret; \ |
| 160 | }) | 155 | }) |
| 156 | #define LDCW "ldcw" | ||
| 157 | |||
| 158 | #else /*CONFIG_PA20*/ | ||
| 159 | /* From: "Jim Hull" <jim.hull of hp.com> | ||
| 160 | I've attached a summary of the change, but basically, for PA 2.0, as | ||
| 161 | long as the ",CO" (coherent operation) completer is specified, then the | ||
| 162 | 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead | ||
| 163 | they only require "natural" alignment (4-byte for ldcw, 8-byte for | ||
| 164 | ldcd). */ | ||
| 165 | |||
| 166 | #define __PA_LDCW_ALIGNMENT 4 | ||
| 167 | #define __ldcw_align(a) ((volatile unsigned int *)a) | ||
| 168 | #define LDCW "ldcw,co" | ||
| 169 | |||
| 170 | #endif /*!CONFIG_PA20*/ | ||
| 171 | |||
| 172 | /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ | ||
| 173 | #define __ldcw(a) ({ \ | ||
| 174 | unsigned __ret; \ | ||
| 175 | __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \ | ||
| 176 | __ret; \ | ||
| 177 | }) | ||
| 161 | 178 | ||
| 162 | #ifdef CONFIG_SMP | 179 | #ifdef CONFIG_SMP |
| 163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | 180 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
