aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-01-18 04:42:18 -0500
committerVineet Gupta <vgupta@synopsys.com>2013-02-11 09:30:35 -0500
commit6e35fa2d430538cd0609e499c6f789beea9e9798 (patch)
tree3ec427c979b67d903f5cebf6cf9b0e5f61b97805 /arch/arc/include/asm
parent3be80aaef861a60b85a9323462ebb5f623774f7a (diff)
ARC: Spinlock/rwlock/mutex primitives
Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/mutex.h9
-rw-r--r--arch/arc/include/asm/spinlock.h144
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
3 files changed, 188 insertions, 0 deletions
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 000000000000..3be5e64da139
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <asm-generic/mutex-xchg.h>
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 000000000000..f158197ac5b0
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
21static inline void arch_spin_lock(arch_spinlock_t *lock)
22{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
24
25 __asm__ __volatile__(
26 "1: ex %0, [%1] \n"
27 " breq %0, %2, 1b \n"
28 : "+&r" (tmp)
29 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
30 : "memory");
31}
32
33static inline int arch_spin_trylock(arch_spinlock_t *lock)
34{
35 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
36
37 __asm__ __volatile__(
38 "1: ex %0, [%1] \n"
39 : "+r" (tmp)
40 : "r"(&(lock->slock))
41 : "memory");
42
43 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
44}
45
46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
49 smp_mb();
50}
51
52/*
53 * Read-write spinlocks, allowing multiple readers but only one writer.
54 *
55 * The spinlock itself is contained in @counter and access to it is
56 * serialized with @lock_mutex.
57 *
58 * Unfair locking as Writers could be starved indefinitely by Reader(s)
59 */
60
61/* Would read_trylock() succeed? */
62#define arch_read_can_lock(x) ((x)->counter > 0)
63
64/* Would write_trylock() succeed? */
65#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
66
67/* 1 - lock taken successfully */
68static inline int arch_read_trylock(arch_rwlock_t *rw)
69{
70 int ret = 0;
71
72 arch_spin_lock(&(rw->lock_mutex));
73
74 /*
75 * zero means writer holds the lock exclusively, deny Reader.
76 * Otherwise grant lock to first/subseq reader
77 */
78 if (rw->counter > 0) {
79 rw->counter--;
80 ret = 1;
81 }
82
83 arch_spin_unlock(&(rw->lock_mutex));
84
85 smp_mb();
86 return ret;
87}
88
89/* 1 - lock taken successfully */
90static inline int arch_write_trylock(arch_rwlock_t *rw)
91{
92 int ret = 0;
93
94 arch_spin_lock(&(rw->lock_mutex));
95
96 /*
97 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
98 * deny writer. Otherwise if unlocked grant to writer
99 * Hence the claim that Linux rwlocks are unfair to writers.
100 * (can be starved for an indefinite time by readers).
101 */
102 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
103 rw->counter = 0;
104 ret = 1;
105 }
106 arch_spin_unlock(&(rw->lock_mutex));
107
108 return ret;
109}
110
111static inline void arch_read_lock(arch_rwlock_t *rw)
112{
113 while (!arch_read_trylock(rw))
114 cpu_relax();
115}
116
117static inline void arch_write_lock(arch_rwlock_t *rw)
118{
119 while (!arch_write_trylock(rw))
120 cpu_relax();
121}
122
123static inline void arch_read_unlock(arch_rwlock_t *rw)
124{
125 arch_spin_lock(&(rw->lock_mutex));
126 rw->counter++;
127 arch_spin_unlock(&(rw->lock_mutex));
128}
129
130static inline void arch_write_unlock(arch_rwlock_t *rw)
131{
132 arch_spin_lock(&(rw->lock_mutex));
133 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
134 arch_spin_unlock(&(rw->lock_mutex));
135}
136
137#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
138#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
139
140#define arch_spin_relax(lock) cpu_relax()
141#define arch_read_relax(lock) cpu_relax()
142#define arch_write_relax(lock) cpu_relax()
143
144#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..8276bfd61704
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_TYPES_H
10#define __ASM_SPINLOCK_TYPES_H
11
12typedef struct {
13 volatile unsigned int slock;
14} arch_spinlock_t;
15
16#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
17#define __ARCH_SPIN_LOCK_LOCKED__ 1
18
19#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
20#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
21
22/*
23 * Unlocked: 0x01_00_00_00
24 * Read lock(s): 0x00_FF_00_00 to say 0x01
25 * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
26 */
27typedef struct {
28 volatile unsigned int counter;
29 arch_spinlock_t lock_mutex;
30} arch_rwlock_t;
31
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
33#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
34
35#endif