diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/litmus/fpmath.h | 145 | ||||
-rw-r--r-- | include/litmus/litmus.h | 2 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 6 |
3 files changed, 152 insertions, 1 deletions
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h | |||
@@ -0,0 +1,145 @@ | |||
1 | #ifndef __FP_MATH_H__ | ||
2 | #define __FP_MATH_H__ | ||
3 | |||
4 | #ifndef __KERNEL__ | ||
5 | #include <stdint.h> | ||
6 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
7 | #endif | ||
8 | |||
9 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
10 | // This can lead to very large numbers. | ||
11 | typedef int64_t fpbuf_t; | ||
12 | typedef struct | ||
13 | { | ||
14 | fpbuf_t val; | ||
15 | } fp_t; | ||
16 | |||
17 | #define FP_SHIFT 10 | ||
18 | #define ROUND_BIT (FP_SHIFT - 1) | ||
19 | |||
20 | #define _fp(x) ((fp_t) {x}) | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
24 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
25 | #endif | ||
26 | |||
27 | static inline fp_t FP(fpbuf_t x) | ||
28 | { | ||
29 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
30 | } | ||
31 | |||
32 | /* divide two integers to obtain a fixed point value */ | ||
33 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
34 | { | ||
35 | return _fp(FP(a).val / (b)); | ||
36 | } | ||
37 | |||
38 | static inline fpbuf_t _point(fp_t x) | ||
39 | { | ||
40 | return (x.val % (1 << FP_SHIFT)); | ||
41 | |||
42 | } | ||
43 | |||
44 | #define fp2str(x) x.val | ||
45 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
46 | #define _FP_ "%ld/1024" | ||
47 | |||
48 | static inline fpbuf_t _floor(fp_t x) | ||
49 | { | ||
50 | return x.val >> FP_SHIFT; | ||
51 | } | ||
52 | |||
53 | /* FIXME: negative rounding */ | ||
54 | static inline fpbuf_t _round(fp_t x) | ||
55 | { | ||
56 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
57 | } | ||
58 | |||
59 | /* multiply two fixed point values */ | ||
60 | static inline fp_t _mul(fp_t a, fp_t b) | ||
61 | { | ||
62 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
63 | } | ||
64 | |||
65 | static inline fp_t _div(fp_t a, fp_t b) | ||
66 | { | ||
67 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
68 | #define unlikely(x) (x) | ||
69 | #define DO_UNDEF_UNLIKELY | ||
70 | #endif | ||
71 | /* try not to overflow */ | ||
72 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
73 | return _fp((a.val / b.val) << FP_SHIFT); | ||
74 | else | ||
75 | return _fp((a.val << FP_SHIFT) / b.val); | ||
76 | #ifdef DO_UNDEF_UNLIKELY | ||
77 | #undef unlikely | ||
78 | #undef DO_UNDEF_UNLIKELY | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | static inline fp_t _add(fp_t a, fp_t b) | ||
83 | { | ||
84 | return _fp(a.val + b.val); | ||
85 | } | ||
86 | |||
87 | static inline fp_t _sub(fp_t a, fp_t b) | ||
88 | { | ||
89 | return _fp(a.val - b.val); | ||
90 | } | ||
91 | |||
92 | static inline fp_t _neg(fp_t x) | ||
93 | { | ||
94 | return _fp(-x.val); | ||
95 | } | ||
96 | |||
97 | static inline fp_t _abs(fp_t x) | ||
98 | { | ||
99 | return _fp(abs(x.val)); | ||
100 | } | ||
101 | |||
102 | /* works the same as casting float/double to integer */ | ||
103 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
104 | { | ||
105 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
106 | } | ||
107 | |||
108 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
109 | { | ||
110 | return _frac(x,1); | ||
111 | } | ||
112 | |||
113 | static inline int _leq(fp_t a, fp_t b) | ||
114 | { | ||
115 | return a.val <= b.val; | ||
116 | } | ||
117 | |||
118 | static inline int _geq(fp_t a, fp_t b) | ||
119 | { | ||
120 | return a.val >= b.val; | ||
121 | } | ||
122 | |||
123 | static inline int _lt(fp_t a, fp_t b) | ||
124 | { | ||
125 | return a.val < b.val; | ||
126 | } | ||
127 | |||
128 | static inline int _gt(fp_t a, fp_t b) | ||
129 | { | ||
130 | return a.val > b.val; | ||
131 | } | ||
132 | |||
133 | static inline int _eq(fp_t a, fp_t b) | ||
134 | { | ||
135 | return a.val == b.val; | ||
136 | } | ||
137 | |||
138 | static inline fp_t _max(fp_t a, fp_t b) | ||
139 | { | ||
140 | if (a.val < b.val) | ||
141 | return b; | ||
142 | else | ||
143 | return a; | ||
144 | } | ||
145 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 338245abd6ed..807b7888695a 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -63,7 +63,7 @@ void litmus_exit_task(struct task_struct *tsk); | |||
63 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | 63 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) |
64 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 64 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
65 | #define get_release(t) (tsk_rt(t)->job_params.release) | 65 | #define get_release(t) (tsk_rt(t)->job_params.release) |
66 | 66 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | |
67 | 67 | ||
68 | #define is_hrt(t) \ | 68 | #define is_hrt(t) \ |
69 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | 69 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 89ac0dda7d3d..fac939dbd33a 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -110,6 +110,12 @@ struct rt_job { | |||
110 | /* How much service has this job received so far? */ | 110 | /* How much service has this job received so far? */ |
111 | lt_t exec_time; | 111 | lt_t exec_time; |
112 | 112 | ||
113 | /* By how much did the prior job miss its deadline by? | ||
114 | * Value differs from tardiness in that lateness may | ||
115 | * be negative (when job finishes before its deadline). | ||
116 | */ | ||
117 | long long lateness; | ||
118 | |||
113 | /* Which job is this. This is used to let user space | 119 | /* Which job is this. This is used to let user space |
114 | * specify which job to wait for, which is important if jobs | 120 | * specify which job to wait for, which is important if jobs |
115 | * overrun. If we just call sys_sleep_next_period() then we | 121 | * overrun. If we just call sys_sleep_next_period() then we |