1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
|
/*
* Constant definitions related to
* scheduling policy.
*/
#ifndef _LINUX_LITMUS_H_
#define _LINUX_LITMUS_H_
#include <litmus/debug_trace.h>
#ifdef CONFIG_RELEASE_MASTER
extern atomic_t release_master_cpu;
#endif
/* in_list - is a given list_head queued on some list?
*/
static inline int in_list(struct list_head* list)
{
return !( /* case 1: deleted */
(list->next == LIST_POISON1 &&
list->prev == LIST_POISON2)
||
/* case 2: initialized */
(list->next == list &&
list->prev == list)
);
}
struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
#define NO_CPU 0xffffffff
void litmus_fork(struct task_struct *tsk);
void litmus_post_fork_thread(struct task_struct *tsk);
void litmus_exec(void);
/* clean up real-time state of a task */
void exit_litmus(struct task_struct *dead_tsk);
long litmus_admit_task(struct task_struct *tsk);
void litmus_pre_exit_task(struct task_struct *tsk); // called before litmus_exit_task, but without run queue locks held
void litmus_exit_task(struct task_struct *tsk);
#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
#define rt_transition_pending(t) \
((t)->rt_param.transition_pending)
#define tsk_rt(t) (&(t)->rt_param)
#define tsk_aux(t) (&(t)->aux_data)
/* Realtime utility macros */
#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
/* task_params macros */
#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost)
#define get_rt_period(t) (tsk_rt(t)->task_params.period)
#define get_rt_relative_deadline(t) (tsk_rt(t)->task_params.relative_deadline)
#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
#define get_partition(t) (tsk_rt(t)->task_params.cpu)
#define get_priority(t) (tsk_rt(t)->task_params.priority)
#define get_class(t) (tsk_rt(t)->task_params.cls)
#define get_release_policy(t) (tsk_rt(t)->task_params.release_policy)
#define get_drain_policy(t) (tsk_rt(t)->task_params.drain_policy)
/* job_param macros */
#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
#define get_period(t) (tsk_rt(t)->task_params.period)
#define get_release(t) (tsk_rt(t)->job_params.release)
#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
#define get_backlog(t) (tsk_rt(t)->job_params.backlog)
#define has_backlog(t) (get_backlog(t) != 0)
#define get_budget_timer(t) (tsk_rt(t)->budget)
#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task)
#define base_priority(t) (t)
/* release policy macros */
#define is_periodic(t) (get_release_policy(t) == PERIODIC)
#define is_sporadic(t) (get_release_policy(t) == SPORADIC)
#ifdef CONFIG_ALLOW_EARLY_RELEASE
#define is_early_releasing(t) (get_release_policy(t) == EARLY)
#else
#define is_early_releasing(t) (0)
#endif
#define is_hrt(t) \
(tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
#define is_srt(t) \
(tsk_rt(t)->task_params.cls == RT_CLASS_SOFT)
#define is_be(t) \
(tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT)
/* budget-related functions and macros */
inline static int budget_exhausted(struct task_struct* t) {
return get_exec_time(t) >= get_exec_cost(t);
}
inline static int budget_remaining(struct task_struct* t) {
return (!budget_exhausted(t)) ? (get_exec_cost(t) - get_exec_time(t)) : 0;
}
#define budget_enforced(t) (\
tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
#define budget_precisely_tracked(t) (\
tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \
tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS)
#define budget_quantum_tracked(t) (\
tsk_rt(t)->task_params.budget_policy == QUANTUM_ENFORCEMENT || \
tsk_rt(t)->task_params.budget_signal_policy == QUANTUM_SIGNALS)
#define budget_signalled(t) (\
tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS)
#define budget_precisely_signalled(t) (\
tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS)
#define bt_flag_is_set(t, flag_nr) (\
test_bit(flag_nr, &tsk_rt(t)->budget.flags))
#define bt_flag_test_and_set(t, flag_nr) (\
test_and_set_bit(flag_nr, &tsk_rt(t)->budget.flags))
#define bt_flag_set(t, flag_nr) (\
set_bit(flag_nr, &tsk_rt(t)->budget.flags))
#define bt_flag_clear(t, flag_nr) (\
clear_bit(flag_nr, &tsk_rt(t)->budget.flags))
#define bt_flags_reset(t) (\
tsk_rt(t)->budget.flags = 0)
#define requeue_preempted_job(t) \
(t && (!budget_exhausted(t) || !budget_enforced(t)))
#ifdef CONFIG_LITMUS_LOCKING
static inline void set_inh_task_linkback(struct task_struct* t, struct task_struct* linkto)
{
const int MAX_IDX = BITS_PER_LONG - 1;
int success = 0;
int old_idx = tsk_rt(t)->inh_task_linkback_idx;
/* is the linkback already set? */
if (old_idx >= 0 && old_idx <= MAX_IDX) {
if ((BIT_MASK(old_idx) & tsk_rt(linkto)->used_linkback_slots) &&
(tsk_rt(linkto)->inh_task_linkbacks[old_idx] == t)) {
TRACE_TASK(t, "linkback is current.\n");
return;
}
BUG();
}
/* kludge: upper limit on num linkbacks */
BUG_ON(tsk_rt(linkto)->used_linkback_slots == ~0ul);
while(!success) {
int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots,
BITS_PER_BYTE*sizeof(tsk_rt(linkto)->used_linkback_slots));
BUG_ON(b > MAX_IDX);
/* set bit... */
if (!test_and_set_bit(b, &tsk_rt(linkto)->used_linkback_slots)) {
TRACE_TASK(t, "linking back to %s/%d in slot %d\n", linkto->comm, linkto->pid, b);
if (tsk_rt(linkto)->inh_task_linkbacks[b])
TRACE_TASK(t, "%s/%d already has %s/%d in slot %d\n",
linkto->comm, linkto->pid,
tsk_rt(linkto)->inh_task_linkbacks[b]->comm,
tsk_rt(linkto)->inh_task_linkbacks[b]->pid,
b);
/* TODO: allow dirty data to remain in [b] after code is tested */
BUG_ON(tsk_rt(linkto)->inh_task_linkbacks[b] != NULL);
/* ...before setting slot */
tsk_rt(linkto)->inh_task_linkbacks[b] = t;
tsk_rt(t)->inh_task_linkback_idx = b;
success = 1;
}
}
}
static inline void clear_inh_task_linkback(struct task_struct* t, struct task_struct* linkedto)
{
const int MAX_IDX = BITS_PER_LONG - 1;
int success = 0;
int slot = tsk_rt(t)->inh_task_linkback_idx;
if (slot < 0) {
TRACE_TASK(t, "assuming linkback already cleared.\n");
return;
}
BUG_ON(slot > MAX_IDX);
BUG_ON(tsk_rt(linkedto)->inh_task_linkbacks[slot] != t);
/* be safe - clear slot before clearing the bit */
tsk_rt(t)->inh_task_linkback_idx = -1;
tsk_rt(linkedto)->inh_task_linkbacks[slot] = NULL;
success = test_and_clear_bit(slot, &tsk_rt(linkedto)->used_linkback_slots);
BUG_ON(!success);
}
#endif
/* Our notion of time within LITMUS: kernel monotonic time. */
static inline lt_t litmus_clock(void)
{
return ktime_to_ns(ktime_get());
}
/* A macro to convert from nanoseconds to ktime_t. */
#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
#define get_domain(t) (tsk_rt(t)->domain)
/* Honor the flag in the preempt_count variable that is set
* when scheduling is in progress.
*/
#define is_running(t) \
((t)->state == TASK_RUNNING || \
task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
#define is_blocked(t) \
(!is_running(t))
#define is_released(t, now) \
(lt_before_eq(get_release(t), now))
#define is_tardy(t, now) \
(lt_before_eq(tsk_rt(t)->job_params.deadline, now))
/* real-time comparison macros */
#define earlier_deadline(a, b) (lt_before(\
(a)->rt_param.job_params.deadline,\
(b)->rt_param.job_params.deadline))
#define shorter_period(a, b) (lt_before(\
(a)->rt_param.task_params.period,\
(b)->rt_param.task_params.period))
#define earlier_release(a, b) (lt_before(\
(a)->rt_param.job_params.release,\
(b)->rt_param.job_params.release))
void preempt_if_preemptable(struct task_struct* t, int on_cpu);
#ifdef CONFIG_LITMUS_LOCKING
void srp_ceiling_block(void);
#else
#define srp_ceiling_block() /* nothing */
#endif
#define bheap2task(hn) ((struct task_struct*) hn->value)
#ifdef CONFIG_NP_SECTION
static inline int is_kernel_np(struct task_struct *t)
{
return tsk_rt(t)->kernel_np;
}
static inline int is_user_np(struct task_struct *t)
{
return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
}
static inline void request_exit_np(struct task_struct *t)
{
if (is_user_np(t)) {
/* Set the flag that tells user space to call
* into the kernel at the end of a critical section. */
if (likely(tsk_rt(t)->ctrl_page)) {
TRACE_TASK(t, "setting delayed_preemption flag\n");
tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
}
}
}
static inline void make_np(struct task_struct *t)
{
tsk_rt(t)->kernel_np++;
}
/* Caller should check if preemption is necessary when
* the function return 0.
*/
static inline int take_np(struct task_struct *t)
{
return --tsk_rt(t)->kernel_np;
}
/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
static inline int request_exit_np_atomic(struct task_struct *t)
{
union np_flag old, new;
if (tsk_rt(t)->ctrl_page) {
old.raw = tsk_rt(t)->ctrl_page->sched.raw;
if (old.np.flag == 0) {
/* no longer non-preemptive */
return 0;
} else if (old.np.preempt) {
/* already set, nothing for us to do */
return 1;
} else {
/* non preemptive and flag not set */
new.raw = old.raw;
new.np.preempt = 1;
/* if we get old back, then we atomically set the flag */
return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
/* If we raced with a concurrent change, then so be
* it. Deliver it by IPI. We don't want an unbounded
* retry loop here since tasks might exploit that to
* keep the kernel busy indefinitely. */
}
}
else {
return 0;
}
}
#else
static inline int is_kernel_np(struct task_struct* t)
{
return 0;
}
static inline int is_user_np(struct task_struct* t)
{
return 0;
}
static inline void request_exit_np(struct task_struct *t)
{
/* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
BUG();
}
static inline int request_exit_np_atomic(struct task_struct *t)
{
return 0;
}
#endif
static inline void clear_exit_np(struct task_struct *t)
{
if (likely(tsk_rt(t)->ctrl_page))
tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
}
static inline int is_np(struct task_struct *t)
{
#ifdef CONFIG_SCHED_DEBUG_TRACE
int kernel, user;
kernel = is_kernel_np(t);
user = is_user_np(t);
if (kernel || user)
TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
kernel, user);
return kernel || user;
#else
return unlikely(is_kernel_np(t) || is_user_np(t));
#endif
}
static inline int is_present(struct task_struct* t)
{
return t && tsk_rt(t)->present;
}
static inline int is_completed(struct task_struct* t)
{
return t && tsk_rt(t)->completed;
}
/* make the unit explicit */
typedef unsigned long quanta_t;
enum round {
FLOOR,
CEIL
};
/* Tick period is used to convert ns-specified execution
* costs and periods into tick-based equivalents.
*/
extern ktime_t tick_period;
static inline quanta_t time2quanta(lt_t time, enum round round)
{
s64 quantum_length = ktime_to_ns(tick_period);
if (do_div(time, quantum_length) && round == CEIL)
time++;
return (quanta_t) time;
}
/* By how much is cpu staggered behind CPU 0? */
u64 cpu_stagger_offset(int cpu);
static inline struct control_page* get_control_page(struct task_struct *t)
{
return tsk_rt(t)->ctrl_page;
}
static inline int has_control_page(struct task_struct* t)
{
return tsk_rt(t)->ctrl_page != NULL;
}
#ifdef CONFIG_SCHED_OVERHEAD_TRACE
#define TS_SYSCALL_IN_START \
if (has_control_page(current)) { \
__TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \
}
#define TS_SYSCALL_IN_END \
if (has_control_page(current)) { \
uint64_t irqs; \
local_irq_disable(); \
irqs = get_control_page(current)->irq_count - \
get_control_page(current)->irq_syscall_start; \
__TS_SYSCALL_IN_END(&irqs); \
local_irq_enable(); \
}
#else
#define TS_SYSCALL_IN_START
#define TS_SYSCALL_IN_END
#endif
#endif
|