diff options
author | Gerd Hoffmann <kraxel@suse.de> | 2006-03-23 05:59:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-23 10:38:04 -0500 |
commit | 9a0b5817ad97bb718ab85322759d19a238712b47 (patch) | |
tree | 39bd21eb69c4001b99096d96a76a2e5d37904108 /include/asm-i386/spinlock.h | |
parent | 4d7d8c82c181711d28c8336108330a9121f5ef07 (diff) |
[PATCH] x86: SMP alternatives
Implement SMP alternatives, i.e. switching at runtime between different
code versions for UP and SMP. The code can patch both SMP->UP and UP->SMP.
The UP->SMP case is useful for CPU hotplug.
With CONFIG_CPU_HOTPLUG enabled the code switches to UP at boot time and
when the number of CPUs goes down to 1, and switches to SMP when the number
of CPUs goes up to 2.
Without CONFIG_CPU_HOTPLUG or on non-SMP-capable systems the code is
patched once at boot time (if needed) and the tables are released
afterwards.
The changes in detail:
* The current alternatives bits are moved to a separate file,
the SMP alternatives code is added there.
* The patch adds some new elf sections to the kernel:
.smp_altinstructions
like .altinstructions, also contains a list
of alt_instr structs.
.smp_altinstr_replacement
like .altinstr_replacement, but also has some space to
save original instruction before replaving it.
.smp_locks
list of pointers to lock prefixes which can be nop'ed
out on UP.
The first two are used to replace more complex instruction
sequences such as spinlocks and semaphores. It would be possible
to deal with the lock prefixes with that as well, but by handling
them as special case the table sizes become much smaller.
* The sections are page-aligned and padded up to page size, so they
can be free if they are not needed.
* Splitted the code to release init pages to a separate function and
use it to release the elf sections if they are unused.
Signed-off-by: Gerd Hoffmann <kraxel@suse.de>
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/spinlock.h')
-rw-r--r-- | include/asm-i386/spinlock.h | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 23604350cdf4..a1b8a8a30e21 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -48,18 +48,23 @@ | |||
48 | "jmp 1b\n" \ | 48 | "jmp 1b\n" \ |
49 | "4:\n\t" | 49 | "4:\n\t" |
50 | 50 | ||
51 | #define __raw_spin_lock_string_up \ | ||
52 | "\n\tdecb %0" | ||
53 | |||
51 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 54 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
52 | { | 55 | { |
53 | __asm__ __volatile__( | 56 | alternative_smp( |
54 | __raw_spin_lock_string | 57 | __raw_spin_lock_string, |
55 | :"=m" (lock->slock) : : "memory"); | 58 | __raw_spin_lock_string_up, |
59 | "=m" (lock->slock) : : "memory"); | ||
56 | } | 60 | } |
57 | 61 | ||
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 62 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
59 | { | 63 | { |
60 | __asm__ __volatile__( | 64 | alternative_smp( |
61 | __raw_spin_lock_string_flags | 65 | __raw_spin_lock_string_flags, |
62 | :"=m" (lock->slock) : "r" (flags) : "memory"); | 66 | __raw_spin_lock_string_up, |
67 | "=m" (lock->slock) : "r" (flags) : "memory"); | ||
63 | } | 68 | } |
64 | 69 | ||
65 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 70 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
@@ -178,12 +183,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
178 | 183 | ||
179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 184 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
180 | { | 185 | { |
181 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | 186 | asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); |
182 | } | 187 | } |
183 | 188 | ||
184 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 189 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
185 | { | 190 | { |
186 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" | 191 | asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" |
187 | : "=m" (rw->lock) : : "memory"); | 192 | : "=m" (rw->lock) : : "memory"); |
188 | } | 193 | } |
189 | 194 | ||