aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm.h15
-rw-r--r--include/asm_sparc.h92
-rw-r--r--include/asm_x86.h144
-rw-r--r--include/cycles.h63
-rw-r--r--include/litmus.h24
5 files changed, 13 insertions, 325 deletions
diff --git a/include/asm.h b/include/asm.h
deleted file mode 100644
index bc15fae..0000000
--- a/include/asm.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/* liblitmus platform dependent includes */
2
3#ifndef ASM_H
4#define ASM_H
5
6#if defined(__i386__) || defined(__x86_64__)
7#include "asm_x86.h"
8#endif
9
10
11#ifdef __sparc__
12#include "asm_sparc.h"
13#endif
14
15#endif
diff --git a/include/asm_sparc.h b/include/asm_sparc.h
deleted file mode 100644
index 96c8049..0000000
--- a/include/asm_sparc.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/* sparc64 assembly.
2 * Don't include directly, use asm.h instead.
3 *
4 * Most of this code comes straight out of the Linux kernel.
5 *
6 * The terms of the GPL v2 apply.
7 *
8 */
9
10#define membar_safe(type) \
11do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
12 " membar " type "\n" \
13 "1:\n" \
14 : : : "memory"); \
15} while (0)
16
17#define mb() \
18 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
19
20static inline void barrier(void)
21{
22 mb();
23}
24
25
26#define cpu_relax() barrier()
27
28static inline int
29cmpxchg(volatile int *m, int old, int new)
30{
31 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
32 "cas [%2], %3, %0\n\t"
33 "membar #StoreLoad | #StoreStore"
34 : "=&r" (new)
35 : "0" (new), "r" (m), "r" (old)
36 : "memory");
37
38 return new;
39}
40
41
42typedef struct { int counter; } atomic_t;
43
44#define ATOMIC_INIT(i) { (i) }
45
46/**
47 * atomic_read - read atomic variable
48 * @v: pointer of type atomic_t
49 *
50 * Atomically reads the value of @v.
51 */
52#define atomic_read(v) ((v)->counter)
53
54/**
55 * atomic_set - set atomic variable
56 * @v: pointer of type atomic_t
57 * @i: required value
58 *
59 * Atomically sets the value of @v to @i.
60 */
61#define atomic_set(v,i) (((v)->counter) = (i))
62
63
64/**
65 * atomic_add_return - add and return
66 * @v: pointer of type atomic_t
67 * @i: integer value to add
68 *
69 * Atomically adds @i to @v and returns @i + @v
70 */
71static __inline__ int atomic_add_return(int i, atomic_t *v)
72{
73 int old;
74 int ret;
75 goto first;
76 do {
77 cpu_relax();
78 first:
79 old = atomic_read(v);
80 ret = cmpxchg(&v->counter, old, old + i);
81 } while (ret != old);
82 return old + i;
83}
84
85static __inline__ void atomic_add(int i, atomic_t *v)
86{
87 atomic_add_return(i, v);
88}
89
90#define atomic_inc_return(v) (atomic_add_return(1,v))
91
92
diff --git a/include/asm_x86.h b/include/asm_x86.h
deleted file mode 100644
index ccc6cce..0000000
--- a/include/asm_x86.h
+++ /dev/null
@@ -1,144 +0,0 @@
1/* Intel ia32 assembly.
2 * Don't include directly, use asm.h instead.
3 *
4 * Most of this code comes straight out of the Linux kernel.
5 *
6 * The terms of the GPL v2 apply.
7 */
8
9static inline void barrier(void)
10{
11 __asm__ __volatile__("mfence": : :"memory");
12}
13
14static __inline__ void cpu_relax(void)
15{
16 __asm__ __volatile("pause");
17}
18
19/* please, use these only if you _really_ know what you're doing
20 * ... and remember iopl(3) first!! (include sys/io.h)
21 */
22static inline void cli(void) {
23 asm volatile("cli": : :"memory");
24}
25
26static inline void sti(void) {
27 asm volatile("sti": : :"memory");
28}
29
30typedef struct { int counter; } atomic_t;
31
32#ifdef __i386__
33
34#define ATOMIC_INIT(i) { (i) }
35
36/**
37 * atomic_read - read atomic variable
38 * @v: pointer of type atomic_t
39 *
40 * Atomically reads the value of @v.
41 */
42#define atomic_read(v) ((v)->counter)
43
44/**
45 * atomic_set - set atomic variable
46 * @v: pointer of type atomic_t
47 * @i: required value
48 *
49 * Atomically sets the value of @v to @i.
50 */
51#define atomic_set(v,i) (((v)->counter) = (i))
52
53static __inline__ void atomic_add(int i, atomic_t *v)
54{
55 __asm__ __volatile__(
56 "lock; addl %1,%0"
57 :"+m" (v->counter)
58 :"ir" (i));
59}
60
61/**
62 * atomic_add_return - add and return
63 * @v: pointer of type atomic_t
64 * @i: integer value to add
65 *
66 * Atomically adds @i to @v and returns @i + @v
67 */
68static __inline__ int atomic_add_return(int i, atomic_t *v)
69{
70 int __i;
71 __i = i;
72 __asm__ __volatile__(
73 "lock; xaddl %0, %1"
74 :"+r" (i), "+m" (v->counter)
75 : : "memory");
76 return i + __i;
77}
78
79#define atomic_inc_return(v) (atomic_add_return(1,v))
80
81#elif defined(__x86_64__)
82
83/* almost the same as i386, but extra care must be taken when
84 * specifying clobbered registers
85 */
86
87#define ATOMIC_INIT(i) { (i) }
88
89/**
90 * atomic_read - read atomic variable
91 * @v: pointer of type atomic_t
92 *
93 * Atomically reads the value of @v.
94 */
95static inline int atomic_read(const atomic_t *v)
96{
97 return v->counter;
98}
99
100/**
101 * atomic_set - set atomic variable
102 * @v: pointer of type atomic_t
103 * @i: required value
104 *
105 * Atomically sets the value of @v to @i.
106 */
107static inline void atomic_set(atomic_t *v, int i)
108{
109 v->counter = i;
110}
111
112/**
113 * atomic_add - add integer to atomic variable
114 * @i: integer value to add
115 * @v: pointer of type atomic_t
116 *
117 * Atomically adds @i to @v.
118 */
119static inline void atomic_add(int i, atomic_t *v)
120{
121 asm volatile("lock; addl %1,%0"
122 : "=m" (v->counter)
123 : "ir" (i), "m" (v->counter));
124}
125
126/**
127 * atomic_add_return - add and return
128 * @i: integer value to add
129 * @v: pointer of type atomic_t
130 *
131 * Atomically adds @i to @v and returns @i + @v
132 */
133static inline int atomic_add_return(int i, atomic_t *v)
134{
135 int __i = i;
136 asm volatile("lock; xaddl %0, %1"
137 : "+r" (i), "+m" (v->counter)
138 : : "memory");
139 return i + __i;
140}
141
142#define atomic_inc_return(v) (atomic_add_return(1, v))
143
144#endif
diff --git a/include/cycles.h b/include/cycles.h
deleted file mode 100644
index e9b0e11..0000000
--- a/include/cycles.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef CYCLES_H
2#define CYCLES_H
3
4#ifdef __x86_64__
5
6#define rdtscll(val) do { \
7 unsigned int __a,__d; \
8 __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
9 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
10} while(0)
11
12static __inline__ unsigned long long native_read_tsc(void)
13{
14 unsigned long long val;
15
16 __asm__ __volatile__("mfence":::"memory");
17 rdtscll(val);
18 __asm__ __volatile__("mfence":::"memory");
19
20 return val;
21}
22
23
24#define CYCLES_FMT "llu"
25
26typedef unsigned long long cycles_t;
27
28static inline cycles_t get_cycles(void)
29{
30 return native_read_tsc();
31}
32#elif defined __i386__
33static inline unsigned long long native_read_tsc(void) {
34 unsigned long long val;
35 __asm__ __volatile__("rdtsc" : "=A" (val));
36 return val;
37}
38
39typedef unsigned long long cycles_t;
40
41#define CYCLES_FMT "llu"
42
43static inline cycles_t get_cycles(void)
44{
45 return native_read_tsc();
46}
47#elif defined __sparc__
48
49#define NPT_BIT 63
50
51typedef unsigned long cycles_t;
52
53#define CYCLES_FMT "lu"
54
55static inline cycles_t get_cycles(void) {
56 unsigned long cycles = 0;
57 __asm__ __volatile__("rd %%asr24, %0" : "=r" (cycles));
58 return cycles & ~(1UL << NPT_BIT);
59}
60
61#endif
62
63#endif
diff --git a/include/litmus.h b/include/litmus.h
index 632b9e1..2c48766 100644
--- a/include/litmus.h
+++ b/include/litmus.h
@@ -5,15 +5,15 @@
5extern "C" { 5extern "C" {
6#endif 6#endif
7 7
8#include <sys/types.h>
9
8/* Include kernel header. 10/* Include kernel header.
9 * This is required for the rt_param 11 * This is required for the rt_param
10 * and control_page structures. 12 * and control_page structures.
11 */ 13 */
12#include <litmus/rt_param.h> 14#include "litmus/rt_param.h"
13
14#include <sys/types.h>
15 15
16#include "cycles.h" /* for null_call() */ 16#include "asm/cycles.h" /* for null_call() */
17 17
18typedef int pid_t; /* PID of a task */ 18typedef int pid_t; /* PID of a task */
19 19
@@ -61,13 +61,9 @@ static inline int od_open(int fd, obj_type_t type, int obj_id)
61 return od_openx(fd, type, obj_id, 0); 61 return od_openx(fd, type, obj_id, 0);
62} 62}
63 63
64/* FMLP binary semaphore support */ 64/* real-time locking protocol support */
65int fmlp_down(int od); 65int litmus_lock(int od);
66int fmlp_up(int od); 66int litmus_unlock(int od);
67
68/* SRP binary semaphore support */
69int srp_down(int od);
70int srp_up(int od);
71 67
72/* job control*/ 68/* job control*/
73int get_job_no(unsigned int* job_no); 69int get_job_no(unsigned int* job_no);
@@ -113,6 +109,12 @@ static inline lt_t ms2lt(unsigned long milliseconds)
113 return __NS_PER_MS * milliseconds; 109 return __NS_PER_MS * milliseconds;
114} 110}
115 111
112/* CPU time consumed so far in seconds */
113double cputime(void);
114
115/* wall-clock time in seconds */
116double wctime(void);
117
116/* semaphore allocation */ 118/* semaphore allocation */
117 119
118static inline int open_fmlp_sem(int fd, int name) 120static inline int open_fmlp_sem(int fd, int name)