diff options
author | Mike Frysinger <vapier@gentoo.org> | 2011-05-29 12:17:22 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2011-07-23 01:10:43 -0400 |
commit | 50f92aa3348a1c810fa809c60227ec80f7a5fd41 (patch) | |
tree | 7744d5e7aeaff92b3bf99431178c7c2183e5fee5 | |
parent | fb1d9be5967fff0a3c93b06304fd992e3c438b7f (diff) |
Blackfin: convert to asm-generic/mutex-dec.h for all systems
The Blackfin mutex.h is merely a copy of an older asm-generic/mutex-dec.h,
so punt it and just use the common one directly.
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
-rw-r--r-- | arch/blackfin/include/asm/mutex.h | 77 |
1 files changed, 1 insertions, 76 deletions
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h index f726e3a80ad0..ff6101aa2c71 100644 --- a/arch/blackfin/include/asm/mutex.h +++ b/arch/blackfin/include/asm/mutex.h | |||
@@ -1,76 +1 @@ | |||
1 | /* | #include <asm-generic/mutex-dec.h> | |
2 | * Pull in the generic implementation for the mutex fastpath. | ||
3 | * | ||
4 | * TODO: implement optimized primitives instead, or leave the generic | ||
5 | * implementation in place, or pick the atomic_xchg() based generic | ||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | * | ||
8 | * Copyright 2006-2009 Analog Devices Inc. | ||
9 | * | ||
10 | * Licensed under the GPL-2 or later. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_MUTEX_H | ||
14 | #define _ASM_MUTEX_H | ||
15 | |||
16 | #ifndef CONFIG_SMP | ||
17 | #include <asm-generic/mutex.h> | ||
18 | #else | ||
19 | |||
20 | static inline void | ||
21 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
22 | { | ||
23 | if (unlikely(atomic_dec_return(count) < 0)) | ||
24 | fail_fn(count); | ||
25 | else | ||
26 | smp_mb(); | ||
27 | } | ||
28 | |||
29 | static inline int | ||
30 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
31 | { | ||
32 | if (unlikely(atomic_dec_return(count) < 0)) | ||
33 | return fail_fn(count); | ||
34 | else { | ||
35 | smp_mb(); | ||
36 | return 0; | ||
37 | } | ||
38 | } | ||
39 | |||
40 | static inline void | ||
41 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
42 | { | ||
43 | smp_mb(); | ||
44 | if (unlikely(atomic_inc_return(count) <= 0)) | ||
45 | fail_fn(count); | ||
46 | } | ||
47 | |||
48 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
49 | |||
50 | static inline int | ||
51 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
52 | { | ||
53 | /* | ||
54 | * We have two variants here. The cmpxchg based one is the best one | ||
55 | * because it never induce a false contention state. It is included | ||
56 | * here because architectures using the inc/dec algorithms over the | ||
57 | * xchg ones are much more likely to support cmpxchg natively. | ||
58 | * | ||
59 | * If not we fall back to the spinlock based variant - that is | ||
60 | * just as efficient (and simpler) as a 'destructive' probing of | ||
61 | * the mutex state would be. | ||
62 | */ | ||
63 | #ifdef __HAVE_ARCH_CMPXCHG | ||
64 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { | ||
65 | smp_mb(); | ||
66 | return 1; | ||
67 | } | ||
68 | return 0; | ||
69 | #else | ||
70 | return fail_fn(count); | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | #endif | ||
75 | |||
76 | #endif | ||