diff options
author | Stephen Boyd <sboyd@codeaurora.org> | 2012-12-20 02:39:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 16:50:16 -0500 |
commit | fcc16882ac4532aaa644bff444f0c5d6228ba71e (patch) | |
tree | 7104729ed7fd136a26ea47462e716410666aa1f6 /lib/atomic64.c | |
parent | 787314c35fbb97e02823a1b8eb8cfa58f366cd49 (diff) |
lib: atomic64: Initialize locks statically to fix early users
The atomic64 library uses a handful of static spin locks to implement
atomic 64-bit operations on architectures without support for atomic
64-bit instructions.
Unfortunately, the spinlocks are initialized in a pure initcall and that
is too late for the vfs namespace code which wants to use atomic64
operations before the initcall is run.
This became a problem as of commit 8823c079ba71: "vfs: Add setns support
for the mount namespace".
This leads to BUG messages such as:
BUG: spinlock bad magic on CPU#0, swapper/0/0
lock: atomic64_lock+0x240/0x400, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
do_raw_spin_lock+0x158/0x198
_raw_spin_lock_irqsave+0x4c/0x58
atomic64_add_return+0x30/0x5c
alloc_mnt_ns.clone.14+0x44/0xac
create_mnt_ns+0xc/0x54
mnt_init+0x120/0x1d4
vfs_caches_init+0xe0/0x10c
start_kernel+0x29c/0x300
coming out early on during boot when spinlock debugging is enabled.
Fix this by initializing the spinlocks statically at compile time.
Reported-and-tested-by: Vaibhav Bedia <vaibhav.bedia@ti.com>
Tested-by: Tony Lindgren <tony@atomide.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/atomic64.c')
-rw-r--r-- | lib/atomic64.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c index 978537809d84..08a4f068e61e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -31,7 +31,11 @@ | |||
31 | static union { | 31 | static union { |
32 | raw_spinlock_t lock; | 32 | raw_spinlock_t lock; |
33 | char pad[L1_CACHE_BYTES]; | 33 | char pad[L1_CACHE_BYTES]; |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { |
35 | [0 ... (NR_LOCKS - 1)] = { | ||
36 | .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), | ||
37 | }, | ||
38 | }; | ||
35 | 39 | ||
36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) | 40 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
37 | { | 41 | { |
@@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |||
173 | return ret; | 177 | return ret; |
174 | } | 178 | } |
175 | EXPORT_SYMBOL(atomic64_add_unless); | 179 | EXPORT_SYMBOL(atomic64_add_unless); |
176 | |||
177 | static int init_atomic64_lock(void) | ||
178 | { | ||
179 | int i; | ||
180 | |||
181 | for (i = 0; i < NR_LOCKS; ++i) | ||
182 | raw_spin_lock_init(&atomic64_lock[i].lock); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | pure_initcall(init_atomic64_lock); | ||