aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-11-06 19:26:02 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2010-11-09 16:35:33 -0500
commit5e40fcc568a204ce8b96f734a71402d0ca52e076 (patch)
treecc9a78c73e1397395fc90ce3f31b7b0841c6953d
parent39db6fe3e50ac2dc9a3699cdc6d984e825852469 (diff)
remove asm/atomic.h
For historic resons, we carry old atomic operations support in liblitmus. This is no longer useful: 1) There is actually no client for these calls in liblitmus. 2) There is now a standard gcc API for this purpose. http://gcc.gnu.org/onlinedocs/gcc/Atomic-Builtins.html Thus, we can reduce our maintenance burden.
-rw-r--r--arch/arm/include/asm/atomic.h9
-rw-r--r--arch/sparc64/include/asm/atomic.h95
-rw-r--r--arch/x86/include/asm/atomic.h136
-rw-r--r--src/kernel_iface.c3
4 files changed, 1 insertions, 242 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
deleted file mode 100644
index 70dbf80..0000000
--- a/arch/arm/include/asm/atomic.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef ASM_ATOMIC_H
2#define ASM_ATOMIC_H
3
4static inline void barrier(void)
5{
6 /* not yet implemented */
7}
8
9#endif
diff --git a/arch/sparc64/include/asm/atomic.h b/arch/sparc64/include/asm/atomic.h
deleted file mode 100644
index cabaa5a..0000000
--- a/arch/sparc64/include/asm/atomic.h
+++ /dev/null
@@ -1,95 +0,0 @@
1#ifndef ASM_ATOMIC_H
2#define ASM_ATOMIC_H
3
4/* sparc64 assembly.
5 *
6 * Most of this code comes straight out of the Linux kernel.
7 *
8 * The terms of the GPL v2 apply.
9 *
10 */
11
12#define membar_safe(type) \
13do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
14 " membar " type "\n" \
15 "1:\n" \
16 : : : "memory"); \
17} while (0)
18
19#define mb() \
20 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
21
22static inline void barrier(void)
23{
24 mb();
25}
26
27
28#define cpu_relax() barrier()
29
30static inline int
31cmpxchg(volatile int *m, int old, int new)
32{
33 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
34 "cas [%2], %3, %0\n\t"
35 "membar #StoreLoad | #StoreStore"
36 : "=&r" (new)
37 : "0" (new), "r" (m), "r" (old)
38 : "memory");
39
40 return new;
41}
42
43
44typedef struct { int counter; } atomic_t;
45
46#define ATOMIC_INIT(i) { (i) }
47
48/**
49 * atomic_read - read atomic variable
50 * @v: pointer of type atomic_t
51 *
52 * Atomically reads the value of @v.
53 */
54#define atomic_read(v) ((v)->counter)
55
56/**
57 * atomic_set - set atomic variable
58 * @v: pointer of type atomic_t
59 * @i: required value
60 *
61 * Atomically sets the value of @v to @i.
62 */
63#define atomic_set(v,i) (((v)->counter) = (i))
64
65
66/**
67 * atomic_add_return - add and return
68 * @v: pointer of type atomic_t
69 * @i: integer value to add
70 *
71 * Atomically adds @i to @v and returns @i + @v
72 */
73static __inline__ int atomic_add_return(int i, atomic_t *v)
74{
75 int old;
76 int ret;
77 goto first;
78 do {
79 cpu_relax();
80 first:
81 old = atomic_read(v);
82 ret = cmpxchg(&v->counter, old, old + i);
83 } while (ret != old);
84 return old + i;
85}
86
87static __inline__ void atomic_add(int i, atomic_t *v)
88{
89 atomic_add_return(i, v);
90}
91
92#define atomic_inc_return(v) (atomic_add_return(1,v))
93
94
95#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
deleted file mode 100644
index 4bd1fe2..0000000
--- a/arch/x86/include/asm/atomic.h
+++ /dev/null
@@ -1,136 +0,0 @@
1#ifndef ASM_ATOMIC_H
2#define ASM_ATOMIC_H
3
4/*
5 * Most of this code comes straight out of the Linux kernel.
6 *
7 * The terms of the GPL v2 apply.
8 */
9
10static inline void barrier(void)
11{
12 __asm__ __volatile__("mfence": : :"memory");
13}
14
15static __inline__ void cpu_relax(void)
16{
17 __asm__ __volatile("pause");
18}
19
20typedef struct { int counter; } atomic_t;
21
22#ifdef __i386__
23
24#define ATOMIC_INIT(i) { (i) }
25
26/**
27 * atomic_read - read atomic variable
28 * @v: pointer of type atomic_t
29 *
30 * Atomically reads the value of @v.
31 */
32#define atomic_read(v) ((v)->counter)
33
34/**
35 * atomic_set - set atomic variable
36 * @v: pointer of type atomic_t
37 * @i: required value
38 *
39 * Atomically sets the value of @v to @i.
40 */
41#define atomic_set(v,i) (((v)->counter) = (i))
42
43static __inline__ void atomic_add(int i, atomic_t *v)
44{
45 __asm__ __volatile__(
46 "lock; addl %1,%0"
47 :"+m" (v->counter)
48 :"ir" (i));
49}
50
51/**
52 * atomic_add_return - add and return
53 * @v: pointer of type atomic_t
54 * @i: integer value to add
55 *
56 * Atomically adds @i to @v and returns @i + @v
57 */
58static __inline__ int atomic_add_return(int i, atomic_t *v)
59{
60 int __i;
61 __i = i;
62 __asm__ __volatile__(
63 "lock; xaddl %0, %1"
64 :"+r" (i), "+m" (v->counter)
65 : : "memory");
66 return i + __i;
67}
68
69#define atomic_inc_return(v) (atomic_add_return(1,v))
70
71#elif defined(__x86_64__)
72
73/* almost the same as i386, but extra care must be taken when
74 * specifying clobbered registers
75 */
76
77#define ATOMIC_INIT(i) { (i) }
78
79/**
80 * atomic_read - read atomic variable
81 * @v: pointer of type atomic_t
82 *
83 * Atomically reads the value of @v.
84 */
85static inline int atomic_read(const atomic_t *v)
86{
87 return v->counter;
88}
89
90/**
91 * atomic_set - set atomic variable
92 * @v: pointer of type atomic_t
93 * @i: required value
94 *
95 * Atomically sets the value of @v to @i.
96 */
97static inline void atomic_set(atomic_t *v, int i)
98{
99 v->counter = i;
100}
101
102/**
103 * atomic_add - add integer to atomic variable
104 * @i: integer value to add
105 * @v: pointer of type atomic_t
106 *
107 * Atomically adds @i to @v.
108 */
109static inline void atomic_add(int i, atomic_t *v)
110{
111 asm volatile("lock; addl %1,%0"
112 : "=m" (v->counter)
113 : "ir" (i), "m" (v->counter));
114}
115
116/**
117 * atomic_add_return - add and return
118 * @i: integer value to add
119 * @v: pointer of type atomic_t
120 *
121 * Atomically adds @i to @v and returns @i + @v
122 */
123static inline int atomic_add_return(int i, atomic_t *v)
124{
125 int __i = i;
126 asm volatile("lock; xaddl %0, %1"
127 : "+r" (i), "+m" (v->counter)
128 : : "memory");
129 return i + __i;
130}
131
132#define atomic_inc_return(v) (atomic_add_return(1, v))
133
134#endif
135
136#endif
diff --git a/src/kernel_iface.c b/src/kernel_iface.c
index 25e0cea..1426795 100644
--- a/src/kernel_iface.c
+++ b/src/kernel_iface.c
@@ -8,7 +8,6 @@
8 8
9#include "litmus.h" 9#include "litmus.h"
10#include "internal.h" 10#include "internal.h"
11#include "asm/atomic.h"
12 11
13#define LITMUS_CTRL_DEVICE "/dev/litmus/ctrl" 12#define LITMUS_CTRL_DEVICE "/dev/litmus/ctrl"
14#define CTRL_PAGES 1 13#define CTRL_PAGES 1
@@ -66,7 +65,7 @@ void exit_np(void)
66{ 65{
67 if (likely(ctrl_page != NULL) && --ctrl_page->np_flag == 0) { 66 if (likely(ctrl_page != NULL) && --ctrl_page->np_flag == 0) {
68 /* became preemptive, let's check for delayed preemptions */ 67 /* became preemptive, let's check for delayed preemptions */
69 barrier(); 68 __sync_synchronize();
70 if (ctrl_page->delayed_preemption) 69 if (ctrl_page->delayed_preemption)
71 sched_yield(); 70 sched_yield();
72 } 71 }