diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-06 16:46:55 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-09 16:35:33 -0500 |
commit | 1e82ca830c3ad2fd057b4c4fc5243b0622526473 (patch) | |
tree | facb2c86e48525b28619f3050a635184b6ecbddf /include | |
parent | f8585fe1fc6f0830f900dad7c8ccc40e17f79644 (diff) |
refactor: remove all architecture-dependent code from include/
Move the architecture-dependent code to the arch/ subtree.
Diffstat (limited to 'include')
-rw-r--r-- | include/asm.h | 15 | ||||
-rw-r--r-- | include/asm_sparc.h | 92 | ||||
-rw-r--r-- | include/asm_x86.h | 144 |
3 files changed, 0 insertions, 251 deletions
diff --git a/include/asm.h b/include/asm.h deleted file mode 100644 index bc15fae..0000000 --- a/include/asm.h +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | /* liblitmus platform dependent includes */ | ||
2 | |||
3 | #ifndef ASM_H | ||
4 | #define ASM_H | ||
5 | |||
6 | #if defined(__i386__) || defined(__x86_64__) | ||
7 | #include "asm_x86.h" | ||
8 | #endif | ||
9 | |||
10 | |||
11 | #ifdef __sparc__ | ||
12 | #include "asm_sparc.h" | ||
13 | #endif | ||
14 | |||
15 | #endif | ||
diff --git a/include/asm_sparc.h b/include/asm_sparc.h deleted file mode 100644 index 96c8049..0000000 --- a/include/asm_sparc.h +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* sparc64 assembly. | ||
2 | * Don't include directly, use asm.h instead. | ||
3 | * | ||
4 | * Most of this code comes straight out of the Linux kernel. | ||
5 | * | ||
6 | * The terms of the GPL v2 apply. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #define membar_safe(type) \ | ||
11 | do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | ||
12 | " membar " type "\n" \ | ||
13 | "1:\n" \ | ||
14 | : : : "memory"); \ | ||
15 | } while (0) | ||
16 | |||
17 | #define mb() \ | ||
18 | membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") | ||
19 | |||
20 | static inline void barrier(void) | ||
21 | { | ||
22 | mb(); | ||
23 | } | ||
24 | |||
25 | |||
26 | #define cpu_relax() barrier() | ||
27 | |||
28 | static inline int | ||
29 | cmpxchg(volatile int *m, int old, int new) | ||
30 | { | ||
31 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" | ||
32 | "cas [%2], %3, %0\n\t" | ||
33 | "membar #StoreLoad | #StoreStore" | ||
34 | : "=&r" (new) | ||
35 | : "0" (new), "r" (m), "r" (old) | ||
36 | : "memory"); | ||
37 | |||
38 | return new; | ||
39 | } | ||
40 | |||
41 | |||
42 | typedef struct { int counter; } atomic_t; | ||
43 | |||
44 | #define ATOMIC_INIT(i) { (i) } | ||
45 | |||
46 | /** | ||
47 | * atomic_read - read atomic variable | ||
48 | * @v: pointer of type atomic_t | ||
49 | * | ||
50 | * Atomically reads the value of @v. | ||
51 | */ | ||
52 | #define atomic_read(v) ((v)->counter) | ||
53 | |||
54 | /** | ||
55 | * atomic_set - set atomic variable | ||
56 | * @v: pointer of type atomic_t | ||
57 | * @i: required value | ||
58 | * | ||
59 | * Atomically sets the value of @v to @i. | ||
60 | */ | ||
61 | #define atomic_set(v,i) (((v)->counter) = (i)) | ||
62 | |||
63 | |||
64 | /** | ||
65 | * atomic_add_return - add and return | ||
66 | * @v: pointer of type atomic_t | ||
67 | * @i: integer value to add | ||
68 | * | ||
69 | * Atomically adds @i to @v and returns @i + @v | ||
70 | */ | ||
71 | static __inline__ int atomic_add_return(int i, atomic_t *v) | ||
72 | { | ||
73 | int old; | ||
74 | int ret; | ||
75 | goto first; | ||
76 | do { | ||
77 | cpu_relax(); | ||
78 | first: | ||
79 | old = atomic_read(v); | ||
80 | ret = cmpxchg(&v->counter, old, old + i); | ||
81 | } while (ret != old); | ||
82 | return old + i; | ||
83 | } | ||
84 | |||
85 | static __inline__ void atomic_add(int i, atomic_t *v) | ||
86 | { | ||
87 | atomic_add_return(i, v); | ||
88 | } | ||
89 | |||
90 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | ||
91 | |||
92 | |||
diff --git a/include/asm_x86.h b/include/asm_x86.h deleted file mode 100644 index ccc6cce..0000000 --- a/include/asm_x86.h +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | /* Intel ia32 assembly. | ||
2 | * Don't include directly, use asm.h instead. | ||
3 | * | ||
4 | * Most of this code comes straight out of the Linux kernel. | ||
5 | * | ||
6 | * The terms of the GPL v2 apply. | ||
7 | */ | ||
8 | |||
9 | static inline void barrier(void) | ||
10 | { | ||
11 | __asm__ __volatile__("mfence": : :"memory"); | ||
12 | } | ||
13 | |||
14 | static __inline__ void cpu_relax(void) | ||
15 | { | ||
16 | __asm__ __volatile("pause"); | ||
17 | } | ||
18 | |||
19 | /* please, use these only if you _really_ know what you're doing | ||
20 | * ... and remember iopl(3) first!! (include sys/io.h) | ||
21 | */ | ||
22 | static inline void cli(void) { | ||
23 | asm volatile("cli": : :"memory"); | ||
24 | } | ||
25 | |||
26 | static inline void sti(void) { | ||
27 | asm volatile("sti": : :"memory"); | ||
28 | } | ||
29 | |||
30 | typedef struct { int counter; } atomic_t; | ||
31 | |||
32 | #ifdef __i386__ | ||
33 | |||
34 | #define ATOMIC_INIT(i) { (i) } | ||
35 | |||
36 | /** | ||
37 | * atomic_read - read atomic variable | ||
38 | * @v: pointer of type atomic_t | ||
39 | * | ||
40 | * Atomically reads the value of @v. | ||
41 | */ | ||
42 | #define atomic_read(v) ((v)->counter) | ||
43 | |||
44 | /** | ||
45 | * atomic_set - set atomic variable | ||
46 | * @v: pointer of type atomic_t | ||
47 | * @i: required value | ||
48 | * | ||
49 | * Atomically sets the value of @v to @i. | ||
50 | */ | ||
51 | #define atomic_set(v,i) (((v)->counter) = (i)) | ||
52 | |||
53 | static __inline__ void atomic_add(int i, atomic_t *v) | ||
54 | { | ||
55 | __asm__ __volatile__( | ||
56 | "lock; addl %1,%0" | ||
57 | :"+m" (v->counter) | ||
58 | :"ir" (i)); | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * atomic_add_return - add and return | ||
63 | * @v: pointer of type atomic_t | ||
64 | * @i: integer value to add | ||
65 | * | ||
66 | * Atomically adds @i to @v and returns @i + @v | ||
67 | */ | ||
68 | static __inline__ int atomic_add_return(int i, atomic_t *v) | ||
69 | { | ||
70 | int __i; | ||
71 | __i = i; | ||
72 | __asm__ __volatile__( | ||
73 | "lock; xaddl %0, %1" | ||
74 | :"+r" (i), "+m" (v->counter) | ||
75 | : : "memory"); | ||
76 | return i + __i; | ||
77 | } | ||
78 | |||
79 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | ||
80 | |||
81 | #elif defined(__x86_64__) | ||
82 | |||
83 | /* almost the same as i386, but extra care must be taken when | ||
84 | * specifying clobbered registers | ||
85 | */ | ||
86 | |||
87 | #define ATOMIC_INIT(i) { (i) } | ||
88 | |||
89 | /** | ||
90 | * atomic_read - read atomic variable | ||
91 | * @v: pointer of type atomic_t | ||
92 | * | ||
93 | * Atomically reads the value of @v. | ||
94 | */ | ||
95 | static inline int atomic_read(const atomic_t *v) | ||
96 | { | ||
97 | return v->counter; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * atomic_set - set atomic variable | ||
102 | * @v: pointer of type atomic_t | ||
103 | * @i: required value | ||
104 | * | ||
105 | * Atomically sets the value of @v to @i. | ||
106 | */ | ||
107 | static inline void atomic_set(atomic_t *v, int i) | ||
108 | { | ||
109 | v->counter = i; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * atomic_add - add integer to atomic variable | ||
114 | * @i: integer value to add | ||
115 | * @v: pointer of type atomic_t | ||
116 | * | ||
117 | * Atomically adds @i to @v. | ||
118 | */ | ||
119 | static inline void atomic_add(int i, atomic_t *v) | ||
120 | { | ||
121 | asm volatile("lock; addl %1,%0" | ||
122 | : "=m" (v->counter) | ||
123 | : "ir" (i), "m" (v->counter)); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * atomic_add_return - add and return | ||
128 | * @i: integer value to add | ||
129 | * @v: pointer of type atomic_t | ||
130 | * | ||
131 | * Atomically adds @i to @v and returns @i + @v | ||
132 | */ | ||
133 | static inline int atomic_add_return(int i, atomic_t *v) | ||
134 | { | ||
135 | int __i = i; | ||
136 | asm volatile("lock; xaddl %0, %1" | ||
137 | : "+r" (i), "+m" (v->counter) | ||
138 | : : "memory"); | ||
139 | return i + __i; | ||
140 | } | ||
141 | |||
142 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
143 | |||
144 | #endif | ||