diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-11-05 13:56:04 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-03-03 10:15:39 -0500 |
commit | 7cb6b656924ee4a8e2887160097d0237365e40d1 (patch) | |
tree | 282109773105988246ded616ddec6f12ffb57131 | |
parent | 1b451ec595b80e5032ae99958393f2ffec3c3fc8 (diff) |
Add more robust budget tracking/enforcement.
This patch adds more feature-rich budget tracking/enforcement
features. Budget tracking is now controlled by a state machine.
Each task can elect to use a different budget policy. Hooks
are in place to implement bandwidth inheritance (BWI) and
virtually exclusive resources.
-rw-r--r-- | include/litmus/budget.h | 161 | ||||
-rw-r--r-- | include/litmus/litmus.h | 43 | ||||
-rw-r--r-- | include/litmus/locking.h | 4 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 54 | ||||
-rw-r--r-- | include/litmus/signal.h | 47 | ||||
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/litmus.c | 75 | ||||
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 2 | ||||
-rw-r--r-- | litmus/budget.c | 486 | ||||
-rw-r--r-- | litmus/jobs.c | 4 | ||||
-rw-r--r-- | litmus/klmirqd.c | 2 | ||||
-rw-r--r-- | litmus/locking.c | 71 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 13 | ||||
-rw-r--r-- | litmus/sync.c | 12 |
15 files changed, 855 insertions, 128 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index bd2d5c964f92..aff48d9218fa 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
@@ -1,36 +1,151 @@ | |||
1 | #ifndef _LITMUS_BUDGET_H_ | 1 | #ifndef _LITMUS_BUDGET_H_ |
2 | #define _LITMUS_BUDGET_H_ | 2 | #define _LITMUS_BUDGET_H_ |
3 | 3 | ||
4 | /* Update the per-processor enforcement timer (arm/reproram/cancel) for | 4 | #include <linux/hrtimer.h> |
5 | * the next task. */ | 5 | #include <linux/semaphore.h> |
6 | void update_enforcement_timer(struct task_struct* t); | ||
7 | 6 | ||
8 | inline static int budget_exhausted(struct task_struct* t) | 7 | #include <litmus/binheap.h> |
8 | |||
9 | struct enforcement_timer | ||
9 | { | 10 | { |
10 | return get_exec_time(t) >= get_exec_cost(t); | 11 | raw_spinlock_t lock; |
11 | } | 12 | struct hrtimer timer; |
13 | unsigned int job_when_armed; | ||
14 | unsigned int armed:1; | ||
15 | }; | ||
16 | |||
17 | int cancel_enforcement_timer(struct task_struct* t); | ||
18 | |||
19 | typedef void (*scheduled_t)(struct task_struct* t); | ||
20 | typedef void (*blocked_t)(struct task_struct* t); | ||
21 | typedef void (*preempt_t)(struct task_struct* t); | ||
22 | typedef void (*sleep_t)(struct task_struct* t); | ||
23 | typedef void (*wakeup_t)(struct task_struct* t); | ||
24 | |||
25 | #define IN_SCHEDULE 1 | ||
26 | |||
27 | typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t, | ||
28 | int in_schedule); | ||
29 | typedef void (*exit_t)(struct task_struct* t); | ||
30 | typedef void (*inherit_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
31 | typedef void (*disinherit_t)(struct task_struct* t, | ||
32 | struct task_struct* prio_inh); | ||
33 | |||
34 | typedef void (*enter_top_m_t)(struct task_struct* t); | ||
35 | typedef void (*exit_top_m_t)(struct task_struct* t); | ||
12 | 36 | ||
13 | inline static lt_t budget_remaining(struct task_struct* t) | 37 | struct budget_tracker_ops |
14 | { | 38 | { |
15 | if (!budget_exhausted(t)) | 39 | scheduled_t on_scheduled; /* called from litmus_schedule(). */ |
16 | return get_exec_cost(t) - get_exec_time(t); | 40 | blocked_t on_blocked; /* called from plugin::schedule() */ |
17 | else | 41 | preempt_t on_preempt; /* called from plugin::schedule() */ |
18 | /* avoid overflow */ | 42 | sleep_t on_sleep; /* called from plugin::schedule() */ |
19 | return 0; | 43 | wakeup_t on_wakeup; |
20 | } | ||
21 | 44 | ||
22 | #define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | 45 | exit_t on_exit; /* task exiting rt mode */ |
23 | 46 | ||
24 | #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | 47 | /* called by plugin::tick() or timer interrupt */ |
25 | == PRECISE_ENFORCEMENT) | 48 | exhausted_t on_exhausted; |
26 | 49 | ||
27 | static inline int requeue_preempted_job(struct task_struct* t) | 50 | /* inheritance callbacks for bandwidth inheritance-related |
51 | budget tracking/enforcement methods */ | ||
52 | inherit_t on_inherit; | ||
53 | disinherit_t on_disinherit; | ||
54 | |||
55 | enter_top_m_t on_enter_top_m; /* task enters top-m priority tasks */ | ||
56 | exit_top_m_t on_exit_top_m; /* task exits top-m priority tasks */ | ||
57 | }; | ||
58 | |||
59 | struct budget_tracker | ||
60 | { | ||
61 | struct enforcement_timer timer; | ||
62 | const struct budget_tracker_ops* ops; | ||
63 | unsigned long flags; | ||
64 | |||
65 | struct binheap_node top_m_node; | ||
66 | lt_t suspend_timestamp; | ||
67 | }; | ||
68 | |||
69 | /* budget tracker flags */ | ||
70 | enum BT_FLAGS | ||
28 | { | 71 | { |
29 | /* Add task to ready queue only if not subject to budget enforcement or | 72 | BTF_BUDGET_EXHAUSTED = 0, |
30 | * if the job has budget remaining. t may be NULL. | 73 | BTF_SIG_BUDGET_SENT = 1, |
31 | */ | 74 | BTF_IS_TOP_M = 2, |
32 | return t && !is_completed(t) && | 75 | BTF_WAITING_FOR_RELEASE = 3, |
33 | (!budget_exhausted(t) || !budget_enforced(t)); | 76 | }; |
34 | } | 77 | |
78 | /* Functions for simple DRAIN_SIMPLE policy common | ||
79 | * to every scheduler. Scheduler must provide | ||
80 | * implementation for simple_on_exhausted(). | ||
81 | */ | ||
82 | void simple_on_scheduled(struct task_struct* t); | ||
83 | void simple_on_blocked(struct task_struct* t); | ||
84 | void simple_on_preempt(struct task_struct* t); | ||
85 | void simple_on_sleep(struct task_struct* t); | ||
86 | void simple_on_exit(struct task_struct* t); | ||
87 | |||
88 | |||
89 | /* Functions for DRAIN_SIMPLE_IO policy common | ||
90 | * to every scheduler. Scheduler must provide | ||
91 | * implementation for simple_io_on_exhausted(). | ||
92 | */ | ||
93 | #define simple_io_on_scheduled simple_on_scheduled | ||
94 | void simple_io_on_blocked(struct task_struct* t); | ||
95 | void simple_io_on_wakeup(struct task_struct* t); | ||
96 | #define simple_io_on_preempt simple_on_preempt | ||
97 | #define simple_io_on_sleep simple_on_sleep | ||
98 | #define simple_io_on_exit simple_on_exit | ||
99 | |||
100 | |||
101 | /* Functions for DRAIN_SOBLIV policy common | ||
102 | * to every scheduler. Scheduler must provide | ||
103 | * implementation for sobliv_on_exhausted(). | ||
104 | * | ||
105 | * Limitation: Quantum budget tracking is unsupported. | ||
106 | */ | ||
107 | void sobliv_on_blocked(struct task_struct* t); | ||
108 | void sobliv_on_wakeup(struct task_struct* t); | ||
109 | #define sobliv_on_exit simple_on_exit | ||
110 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh); | ||
111 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh); | ||
112 | void sobliv_on_enter_top_m(struct task_struct* t); | ||
113 | void sobliv_on_exit_top_m(struct task_struct* t); | ||
114 | |||
115 | void reevaluate_inheritance(struct task_struct* t); | ||
116 | |||
117 | #define budget_state_machine(t, evt) \ | ||
118 | do { \ | ||
119 | if (get_budget_timer(t).ops && \ | ||
120 | get_budget_timer(t).ops->evt != NULL) { \ | ||
121 | get_budget_timer(t).ops->evt(t); \ | ||
122 | } \ | ||
123 | }while(0) | ||
124 | |||
125 | #define budget_state_machine2(t, evt, param) \ | ||
126 | do { \ | ||
127 | if (get_budget_timer(t).ops && \ | ||
128 | get_budget_timer(t).ops->evt != NULL) { \ | ||
129 | get_budget_timer(t).ops->evt(t, param); \ | ||
130 | } \ | ||
131 | }while(0) | ||
132 | |||
133 | #define budget_state_machine_chgprio(a, b, evt) \ | ||
134 | do { \ | ||
135 | if (get_budget_timer(a).ops && \ | ||
136 | get_budget_timer(b).ops && \ | ||
137 | get_budget_timer(a).ops->evt != NULL && \ | ||
138 | get_budget_timer(b).ops->evt != NULL) {\ | ||
139 | get_budget_timer(a).ops->evt(a, b); \ | ||
140 | } \ | ||
141 | }while(0) | ||
142 | |||
143 | |||
144 | void init_budget_tracker(struct budget_tracker* bt, | ||
145 | const struct budget_tracker_ops* ops); | ||
146 | |||
147 | |||
148 | /* Send SIG_BUDGET to a real-time task. */ | ||
149 | void send_sigbudget(struct task_struct* t); | ||
35 | 150 | ||
36 | #endif | 151 | #endif |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 74d9426b66d9..861ffd20df9e 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -68,16 +68,59 @@ void litmus_do_exit(struct task_struct *tsk); | |||
68 | #define get_priority(t) (tsk_rt(t)->task_params.priority) | 68 | #define get_priority(t) (tsk_rt(t)->task_params.priority) |
69 | #define get_class(t) (tsk_rt(t)->task_params.cls) | 69 | #define get_class(t) (tsk_rt(t)->task_params.cls) |
70 | #define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) | 70 | #define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) |
71 | #define get_drain_policy(t) (tsk_rt(t)->task_params.drain_policy) | ||
71 | 72 | ||
72 | /* job_param macros */ | 73 | /* job_param macros */ |
73 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | 74 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) |
74 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 75 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
75 | #define get_release(t) (tsk_rt(t)->job_params.release) | 76 | #define get_release(t) (tsk_rt(t)->job_params.release) |
76 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | 77 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) |
78 | #define get_backlog(t) (tsk_rt(t)->job_params.backlog) | ||
77 | 79 | ||
78 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) | 80 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) |
79 | #define base_priority(t) (t) | 81 | #define base_priority(t) (t) |
80 | 82 | ||
83 | /* budget-related functions and macros */ | ||
84 | |||
85 | inline static int budget_exhausted(struct task_struct* t) { | ||
86 | return get_exec_time(t) >= get_exec_cost(t); | ||
87 | } | ||
88 | |||
89 | inline static int budget_remaining(struct task_struct* t) { | ||
90 | return (!budget_exhausted(t)) ? (get_exec_cost(t) - get_exec_time(t)) : 0; | ||
91 | } | ||
92 | |||
93 | #define has_backlog(t) (get_backlog(t) != 0) | ||
94 | #define get_budget_timer(t) (tsk_rt(t)->budget) | ||
95 | #define budget_enforced(t) (\ | ||
96 | tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
97 | #define budget_precisely_tracked(t) (\ | ||
98 | tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \ | ||
99 | tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS) | ||
100 | #define budget_quantum_tracked(t) (\ | ||
101 | tsk_rt(t)->task_params.budget_policy == QUANTUM_ENFORCEMENT || \ | ||
102 | tsk_rt(t)->task_params.budget_signal_policy == QUANTUM_SIGNALS) | ||
103 | #define budget_signalled(t) (\ | ||
104 | tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS) | ||
105 | #define budget_precisely_signalled(t) (\ | ||
106 | tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS) | ||
107 | #define bt_flag_is_set(t, flag_nr) (\ | ||
108 | test_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
109 | #define bt_flag_test_and_set(t, flag_nr) (\ | ||
110 | test_and_set_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
111 | #define bt_flag_test_and_clear(t, flag_nr) (\ | ||
112 | test_and_clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
113 | #define bt_flag_set(t, flag_nr) (\ | ||
114 | set_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
115 | #define bt_flag_clear(t, flag_nr) (\ | ||
116 | clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
117 | #define bt_flags_reset(t) (\ | ||
118 | tsk_rt(t)->budget.flags = 0) | ||
119 | //#define should_requeue_preempted_job(t) | ||
120 | #define requeue_preempted_job(t) \ | ||
121 | (t && !is_completed(t) && !tsk_rt(t)->dont_requeue && \ | ||
122 | (!budget_exhausted(t) || !budget_enforced(t))) | ||
123 | |||
81 | /* release policy macros */ | 124 | /* release policy macros */ |
82 | #define is_periodic(t) (get_release_policy(t) == TASK_PERIODIC) | 125 | #define is_periodic(t) (get_release_policy(t) == TASK_PERIODIC) |
83 | #define is_sporadic(t) (get_release_policy(t) == TASK_SPORADIC) | 126 | #define is_sporadic(t) (get_release_policy(t) == TASK_SPORADIC) |
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 04f4e7710a4e..a49099cc84dd 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -280,4 +280,8 @@ void init_wake_queues(void); | |||
280 | !binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) | 280 | !binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) |
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | void set_inh_task_linkback(struct task_struct* t, struct task_struct* linkto); | ||
284 | void clear_inh_task_linkback(struct task_struct* t, | ||
285 | struct task_struct* linkedto); | ||
286 | |||
283 | #endif | 287 | #endif |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index efae2fbf8371..f28afc43e801 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -33,6 +33,23 @@ typedef enum { | |||
33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ | 33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ |
34 | } budget_policy_t; | 34 | } budget_policy_t; |
35 | 35 | ||
36 | /* budget draining policy (ignored if neither budget enforcement nor | ||
37 | signalling are used). */ | ||
38 | typedef enum { | ||
39 | DRAIN_SIMPLE, /* drains while task is linked */ | ||
40 | DRAIN_SIMPLE_IO, /* drains while task is linked or blocked | ||
41 | (not waiting for Litmus lock) */ | ||
42 | DRAIN_SAWARE, /* drains according to suspension-aware analysis */ | ||
43 | DRAIN_SOBLIV /* drains according to suspension-obliv analysis */ | ||
44 | } budget_drain_policy_t; | ||
45 | |||
46 | /* signal policy for budget exhaustion */ | ||
47 | typedef enum { | ||
48 | NO_SIGNALS, /* job receives no signals when it exhausts its budget */ | ||
49 | QUANTUM_SIGNALS, /*budget signals are only sent on quantum boundaries */ | ||
50 | PRECISE_SIGNALS /* budget signals are triggered with hrtimers */ | ||
51 | } budget_signal_policy_t; | ||
52 | |||
36 | /* Release behaviors for jobs. PERIODIC and EARLY jobs | 53 | /* Release behaviors for jobs. PERIODIC and EARLY jobs |
37 | must end by calling sys_complete_job() (or equivalent) | 54 | must end by calling sys_complete_job() (or equivalent) |
38 | to set up their next release and deadline. */ | 55 | to set up their next release and deadline. */ |
@@ -90,6 +107,8 @@ struct rt_task { | |||
90 | unsigned int priority; | 107 | unsigned int priority; |
91 | task_class_t cls; | 108 | task_class_t cls; |
92 | budget_policy_t budget_policy; /* ignored by pfair */ | 109 | budget_policy_t budget_policy; /* ignored by pfair */ |
110 | budget_drain_policy_t drain_policy; | ||
111 | budget_signal_policy_t budget_signal_policy; /* ignored by pfair */ | ||
93 | release_policy_t release_policy; | 112 | release_policy_t release_policy; |
94 | }; | 113 | }; |
95 | 114 | ||
@@ -171,6 +190,7 @@ struct control_page { | |||
171 | 190 | ||
172 | #include <linux/semaphore.h> | 191 | #include <linux/semaphore.h> |
173 | #include <litmus/binheap.h> | 192 | #include <litmus/binheap.h> |
193 | #include <litmus/budget.h> | ||
174 | 194 | ||
175 | #ifdef CONFIG_LITMUS_SOFTIRQD | 195 | #ifdef CONFIG_LITMUS_SOFTIRQD |
176 | #include <linux/interrupt.h> | 196 | #include <linux/interrupt.h> |
@@ -263,6 +283,19 @@ struct rt_job { | |||
263 | * Increase this sequence number when a job is released. | 283 | * Increase this sequence number when a job is released. |
264 | */ | 284 | */ |
265 | unsigned int job_no; | 285 | unsigned int job_no; |
286 | |||
287 | /* Increments each time a job is forced to complete by budget exhaustion. | ||
288 | * If a job completes without remaining budget, the next ob will be early- | ||
289 | * released __without__ pushing back its deadline. job_backlog is | ||
290 | * decremented once per early release. This behavior continues until | ||
291 | * backlog == 0. | ||
292 | */ | ||
293 | unsigned int backlog; | ||
294 | |||
295 | /* Denotes if the current job is a backlogged job that was early released | ||
296 | * due to budget enforcement behaviors. | ||
297 | */ | ||
298 | unsigned int is_backlogged_job:1; | ||
266 | }; | 299 | }; |
267 | 300 | ||
268 | struct pfair_param; | 301 | struct pfair_param; |
@@ -344,6 +377,8 @@ struct rt_param { | |||
344 | unsigned int sporadic_release:1; | 377 | unsigned int sporadic_release:1; |
345 | lt_t sporadic_release_time; | 378 | lt_t sporadic_release_time; |
346 | 379 | ||
380 | /* budget tracking/enforcement method and data assigned to this task */ | ||
381 | struct budget_tracker budget; | ||
347 | 382 | ||
348 | /* task representing the current "inherited" task | 383 | /* task representing the current "inherited" task |
349 | * priority, assigned by inherit_priority and | 384 | * priority, assigned by inherit_priority and |
@@ -353,6 +388,17 @@ struct rt_param { | |||
353 | */ | 388 | */ |
354 | struct task_struct* inh_task; | 389 | struct task_struct* inh_task; |
355 | 390 | ||
391 | /* budget enforcement methods may require knowledge of tasks that | ||
392 | * inherit this task's priority. There may be more than one such | ||
393 | * task w/ priority inheritance chains. | ||
394 | */ | ||
395 | int inh_task_linkback_idx; /* idx in inh_task's | ||
396 | inh_task_linkbacks array. */ | ||
397 | |||
398 | struct task_struct** inh_task_linkbacks; /* array w/ BITS_PER_LONG elm */ | ||
399 | unsigned long used_linkback_slots; /* nr used slots | ||
400 | in inh_task_linkbacks */ | ||
401 | |||
356 | #ifdef CONFIG_NP_SECTION | 402 | #ifdef CONFIG_NP_SECTION |
357 | /* For the FMLP under PSN-EDF, it is required to make the task | 403 | /* For the FMLP under PSN-EDF, it is required to make the task |
358 | * non-preemptive from kernel space. In order not to interfere with | 404 | * non-preemptive from kernel space. In order not to interfere with |
@@ -426,17 +472,19 @@ struct rt_param { | |||
426 | #endif /* end LITMUS_SOFTIRQD */ | 472 | #endif /* end LITMUS_SOFTIRQD */ |
427 | 473 | ||
428 | #ifdef CONFIG_REALTIME_AUX_TASKS | 474 | #ifdef CONFIG_REALTIME_AUX_TASKS |
475 | /* Real-time data for auxiliary tasks */ | ||
476 | struct list_head aux_task_node; | ||
477 | struct binheap_node aux_task_owner_node; | ||
478 | |||
429 | unsigned int is_aux_task:1; | 479 | unsigned int is_aux_task:1; |
430 | unsigned int aux_ready:1; | 480 | unsigned int aux_ready:1; |
431 | unsigned int has_aux_tasks:1; | 481 | unsigned int has_aux_tasks:1; |
432 | unsigned int hide_from_aux_tasks:1; | 482 | unsigned int hide_from_aux_tasks:1; |
433 | |||
434 | struct list_head aux_task_node; | ||
435 | struct binheap_node aux_task_owner_node; | ||
436 | #endif | 483 | #endif |
437 | }; | 484 | }; |
438 | 485 | ||
439 | #ifdef CONFIG_REALTIME_AUX_TASKS | 486 | #ifdef CONFIG_REALTIME_AUX_TASKS |
487 | /* Auxiliary task data. Appears in task_struct, like rt_param */ | ||
440 | struct aux_data { | 488 | struct aux_data { |
441 | struct list_head aux_tasks; | 489 | struct list_head aux_tasks; |
442 | struct binheap aux_task_owners; | 490 | struct binheap aux_task_owners; |
diff --git a/include/litmus/signal.h b/include/litmus/signal.h new file mode 100644 index 000000000000..38c3207951e0 --- /dev/null +++ b/include/litmus/signal.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef LITMUS_SIGNAL_H | ||
2 | #define LITMUS_SIGNAL_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #include <linux/signal.h> | ||
6 | #else | ||
7 | #include <signal.h> | ||
8 | #endif | ||
9 | |||
10 | /* Signals used by Litmus to asynchronously communicate events | ||
11 | * to real-time tasks. | ||
12 | * | ||
13 | * Signal values overlap with [SIGRTMIN, SIGRTMAX], so beware of | ||
14 | * application-level conflicts when dealing with COTS user-level | ||
15 | * code. | ||
16 | */ | ||
17 | |||
18 | /* Sent to a Litmus task when all of the following conditions are true: | ||
19 | * (1) The task has exhausted its budget. | ||
20 | * (2) budget_signal_policy is QUANTUM_SIGNALS or PRECISE_SIGNALS. | ||
21 | * | ||
22 | * Note: If a task does not have a registered handler for SIG_BUDGET, | ||
23 | * the signal will cause the task to terminate (default action). | ||
24 | */ | ||
25 | |||
26 | /* Assigned values start at SIGRTMAX and decrease, hopefully reducing | ||
27 | * likelihood of user-level conflicts. | ||
28 | */ | ||
29 | #define SIG_BUDGET (SIGRTMAX - 0) | ||
30 | |||
31 | /* | ||
32 | Future signals could include: | ||
33 | |||
34 | #define SIG_DEADLINE_MISS (SIGRTMAX - 1) | ||
35 | #define SIG_CRIT_LEVEL_CHANGE (SIGRTMAX - 2) | ||
36 | */ | ||
37 | |||
38 | #define SIGLITMUSMIN SIG_BUDGET | ||
39 | |||
40 | #ifdef __KERNEL__ | ||
41 | #if (SIGLITMUSMIN < SIGRTMIN) | ||
42 | /* no compile-time check in user-space since SIGRTMIN may be a variable. */ | ||
43 | #error "Too many LITMUS^RT signals!" | ||
44 | #endif | ||
45 | #endif | ||
46 | |||
47 | #endif | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8fbdf5b639fa..c62ed018d994 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -92,6 +92,7 @@ | |||
92 | #include <litmus/sched_plugin.h> | 92 | #include <litmus/sched_plugin.h> |
93 | 93 | ||
94 | void litmus_tick(struct rq*, struct task_struct*); | 94 | void litmus_tick(struct rq*, struct task_struct*); |
95 | void litmus_handle_budget_exhaustion(struct task_struct*); | ||
95 | 96 | ||
96 | #define CREATE_TRACE_POINTS | 97 | #define CREATE_TRACE_POINTS |
97 | #include <trace/events/sched.h> | 98 | #include <trace/events/sched.h> |
@@ -3107,6 +3108,11 @@ litmus_need_resched_nonpreemptible: | |||
3107 | 3108 | ||
3108 | post_schedule(rq); | 3109 | post_schedule(rq); |
3109 | 3110 | ||
3111 | if (is_realtime(current) && | ||
3112 | unlikely(budget_enforced(current) && budget_exhausted(current))) { | ||
3113 | litmus_handle_budget_exhaustion(current); | ||
3114 | } | ||
3115 | |||
3110 | if (sched_state_validate_switch()) { | 3116 | if (sched_state_validate_switch()) { |
3111 | TS_SCHED2_END(prev); | 3117 | TS_SCHED2_END(prev); |
3112 | goto litmus_need_resched_nonpreemptible; | 3118 | goto litmus_need_resched_nonpreemptible; |
diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c index 857a8c8443bd..23ce659dc62c 100644 --- a/kernel/sched/litmus.c +++ b/kernel/sched/litmus.c | |||
@@ -93,7 +93,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
93 | TRACE_TASK(next,"descheduled. Proceeding.\n"); | 93 | TRACE_TASK(next,"descheduled. Proceeding.\n"); |
94 | 94 | ||
95 | if (lt_before(_maybe_deadlock + 1000000000L, | 95 | if (lt_before(_maybe_deadlock + 1000000000L, |
96 | litmus_clock())) { | 96 | litmus_clock())) { |
97 | /* We've been spinning for 1s. | 97 | /* We've been spinning for 1s. |
98 | * Something can't be right! | 98 | * Something can't be right! |
99 | * Let's abandon the task and bail out; at least | 99 | * Let's abandon the task and bail out; at least |
@@ -138,8 +138,6 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
138 | } | 138 | } |
139 | } | 139 | } |
140 | 140 | ||
141 | set_task_cpu(next, smp_processor_id()); | ||
142 | |||
143 | /* DEBUG: now that we have the lock we need to make sure a | 141 | /* DEBUG: now that we have the lock we need to make sure a |
144 | * couple of things still hold: | 142 | * couple of things still hold: |
145 | * - it is still a real-time task | 143 | * - it is still a real-time task |
@@ -147,7 +145,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
147 | * If either is violated, then the active plugin is | 145 | * If either is violated, then the active plugin is |
148 | * doing something wrong. | 146 | * doing something wrong. |
149 | */ | 147 | */ |
150 | if (!is_realtime(next) || !is_running(next)) { | 148 | if (unlikely(!is_realtime(next) || !is_running(next))) { |
151 | /* BAD BAD BAD */ | 149 | /* BAD BAD BAD */ |
152 | TRACE_TASK(next,"BAD: migration invariant FAILED: " | 150 | TRACE_TASK(next,"BAD: migration invariant FAILED: " |
153 | "rt=%d running=%d\n", | 151 | "rt=%d running=%d\n", |
@@ -156,6 +154,10 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
156 | /* drop the task */ | 154 | /* drop the task */ |
157 | next = NULL; | 155 | next = NULL; |
158 | } | 156 | } |
157 | else { | ||
158 | set_task_cpu(next, smp_processor_id()); | ||
159 | } | ||
160 | |||
159 | /* release the other CPU's runqueue, but keep ours */ | 161 | /* release the other CPU's runqueue, but keep ours */ |
160 | raw_spin_unlock(&other_rq->lock); | 162 | raw_spin_unlock(&other_rq->lock); |
161 | } | 163 | } |
@@ -168,12 +170,71 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
168 | next->rt_param.stack_in_use = 0; | 170 | next->rt_param.stack_in_use = 0; |
169 | #endif | 171 | #endif |
170 | next->se.exec_start = rq->clock; | 172 | next->se.exec_start = rq->clock; |
173 | |||
174 | if (is_realtime(next)) { | ||
175 | budget_state_machine(next, on_scheduled); | ||
176 | |||
177 | #ifdef CONFIG_LITMUS_AFFINITY_AWARE_GPU_ASSINGMENT | ||
178 | /* turn GPU tracking back on if needed */ | ||
179 | if(tsk_rt(next)->held_gpus) { | ||
180 | if(0 == tsk_rt(next)->gpu_time_stamp) { | ||
181 | start_gpu_tracker(next); | ||
182 | } | ||
183 | } | ||
184 | #endif | ||
185 | } | ||
171 | } | 186 | } |
172 | 187 | ||
173 | update_enforcement_timer(next); | ||
174 | return next; | 188 | return next; |
175 | } | 189 | } |
176 | 190 | ||
191 | void litmus_handle_budget_exhaustion(struct task_struct *t) | ||
192 | { | ||
193 | /* We're unlikely to pick a task that has an exhausted budget, so this | ||
194 | * provides a failsafe. */ | ||
195 | |||
196 | /* BUG: Virtual unlock of OMLP-family locking protocols is not triggered. | ||
197 | * | ||
198 | * TODO-FIX: Add a new virtual-unlock call to budget state machine and do | ||
199 | * the virtual unlock in plugin::schedule(), instead of in budget | ||
200 | * timer handler. This bug should only be raised EXTREMELY infrequently. | ||
201 | */ | ||
202 | |||
203 | int handle_exhausion = 1; | ||
204 | |||
205 | BUG_ON(current != t); | ||
206 | |||
207 | if (is_np(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) { | ||
208 | /* ignore. will handle exhausion in the future. */ | ||
209 | TRACE_TASK(t, "Task is np and already flagged as exhausted. " | ||
210 | "Allow scheduling.\n"); | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { | ||
215 | TRACE_TASK(t, "Waiting for release. Skipping exhaustion.\n"); | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | if (budget_precisely_tracked(t)) { | ||
220 | if (cancel_enforcement_timer(t) < 0) { | ||
221 | TRACE_TASK(t, "schedule() raced with timer. Deferring to timer.\n"); | ||
222 | handle_exhausion = 0; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | if (handle_exhausion) { | ||
227 | if (likely(!is_np(t))) { | ||
228 | TRACE_TASK(t, "picked task without budget => FORCE_RESCHED.\n"); | ||
229 | litmus_reschedule_local(); | ||
230 | } | ||
231 | else if (is_user_np(t)) { | ||
232 | TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); | ||
233 | request_exit_np(t); | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | |||
177 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | 238 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, |
178 | int flags) | 239 | int flags) |
179 | { | 240 | { |
@@ -187,7 +248,7 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | |||
187 | * state already here. | 248 | * state already here. |
188 | * | 249 | * |
189 | * WARNING: this needs to be re-evaluated when porting | 250 | * WARNING: this needs to be re-evaluated when porting |
190 | * to newer kernel versions. | 251 | * to newer kernel versions. |
191 | */ | 252 | */ |
192 | p->state = TASK_RUNNING; | 253 | p->state = TASK_RUNNING; |
193 | litmus->task_wake_up(p); | 254 | litmus->task_wake_up(p); |
@@ -340,7 +401,7 @@ const struct sched_class litmus_sched_class = { | |||
340 | .pre_schedule = pre_schedule_litmus, | 401 | .pre_schedule = pre_schedule_litmus, |
341 | #endif | 402 | #endif |
342 | 403 | ||
343 | .set_curr_task = set_curr_task_litmus, | 404 | .set_curr_task = set_curr_task_litmus, |
344 | .task_tick = task_tick_litmus, | 405 | .task_tick = task_tick_litmus, |
345 | 406 | ||
346 | .get_rr_interval = get_rr_interval_litmus, | 407 | .get_rr_interval = get_rr_interval_litmus, |
diff --git a/litmus/Makefile b/litmus/Makefile index a312ef4dd52a..bb28045a5221 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -6,11 +6,12 @@ obj-y = sched_plugin.o litmus.o \ | |||
6 | preempt.o \ | 6 | preempt.o \ |
7 | litmus_proc.o \ | 7 | litmus_proc.o \ |
8 | budget.o \ | 8 | budget.o \ |
9 | aux_tasks.o \ | 9 | aux_tasks.o \ |
10 | clustered.o \ | 10 | clustered.o \ |
11 | jobs.o \ | 11 | jobs.o \ |
12 | sync.o \ | 12 | sync.o \ |
13 | rt_domain.o \ | 13 | rt_domain.o \ |
14 | budget.o \ | ||
14 | edf_common.o \ | 15 | edf_common.o \ |
15 | fp_common.o \ | 16 | fp_common.o \ |
16 | fdso.o \ | 17 | fdso.o \ |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index 5b34b9b99941..a5d44775b41d 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -35,10 +35,8 @@ static int admit_aux_task(struct task_struct *t) | |||
35 | .priority = LITMUS_LOWEST_PRIORITY, | 35 | .priority = LITMUS_LOWEST_PRIORITY, |
36 | .cls = RT_CLASS_BEST_EFFORT, | 36 | .cls = RT_CLASS_BEST_EFFORT, |
37 | .budget_policy = QUANTUM_ENFORCEMENT, | 37 | .budget_policy = QUANTUM_ENFORCEMENT, |
38 | #if 0 /* PORT RECHECK */ | ||
39 | .drain_policy = DRAIN_SIMPLE, | 38 | .drain_policy = DRAIN_SIMPLE, |
40 | .budget_signal_policy = NO_SIGNALS, | 39 | .budget_signal_policy = NO_SIGNALS, |
41 | #endif | ||
42 | /* use SPORADIC instead of EARLY since util = 1.0 */ | 40 | /* use SPORADIC instead of EARLY since util = 1.0 */ |
43 | .release_policy = TASK_SPORADIC, | 41 | .release_policy = TASK_SPORADIC, |
44 | }; | 42 | }; |
diff --git a/litmus/budget.c b/litmus/budget.c index f7712be29adb..9e36ce34e417 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -1,113 +1,459 @@ | |||
1 | #include <linux/sched.h> | 1 | #include <linux/sched.h> |
2 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
3 | #include <linux/hrtimer.h> | 3 | #include <linux/hrtimer.h> |
4 | #include <linux/signal.h> | ||
4 | 5 | ||
5 | #include <litmus/litmus.h> | 6 | #include <litmus/litmus.h> |
6 | #include <litmus/preempt.h> | 7 | #include <litmus/preempt.h> |
7 | 8 | #include <litmus/sched_plugin.h> | |
8 | #include <litmus/budget.h> | 9 | #include <litmus/budget.h> |
10 | #include <litmus/signal.h> | ||
11 | |||
12 | int cancel_enforcement_timer(struct task_struct* t) | ||
13 | { | ||
14 | struct enforcement_timer* et; | ||
15 | int ret = 0; | ||
16 | unsigned long flags; | ||
17 | |||
18 | BUG_ON(!t); | ||
19 | BUG_ON(!is_realtime(t)); | ||
20 | |||
21 | et = &tsk_rt(t)->budget.timer; | ||
9 | 22 | ||
10 | struct enforcement_timer { | 23 | TRACE_TASK(t, "canceling enforcement timer.\n"); |
11 | /* The enforcement timer is used to accurately police | ||
12 | * slice budgets. */ | ||
13 | struct hrtimer timer; | ||
14 | int armed; | ||
15 | }; | ||
16 | 24 | ||
17 | DEFINE_PER_CPU(struct enforcement_timer, budget_timer); | 25 | if (et->armed) { |
26 | raw_spin_lock_irqsave(&et->lock, flags); | ||
27 | if (et->armed) { | ||
28 | ret = hrtimer_try_to_cancel(&et->timer); | ||
29 | if (ret < 0) | ||
30 | TRACE_TASK(t, "timer already running. failed to cancel.\n"); | ||
31 | else { | ||
32 | TRACE_TASK(t, "canceled timer with %lld ns remaining.\n", | ||
33 | ktime_to_ns(hrtimer_expires_remaining(&et->timer))); | ||
34 | et->armed = 0; | ||
35 | } | ||
36 | } | ||
37 | else | ||
38 | TRACE_TASK(t, "timer was not armed (race).\n"); | ||
39 | raw_spin_unlock_irqrestore(&et->lock, flags); | ||
40 | } | ||
41 | else | ||
42 | TRACE_TASK(t, "timer was not armed.\n"); | ||
43 | |||
44 | return ret; | ||
45 | } | ||
18 | 46 | ||
19 | static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) | 47 | inline static void arm_enforcement_timer(struct task_struct* t, int force) |
20 | { | 48 | { |
21 | struct enforcement_timer* et = container_of(timer, | 49 | struct enforcement_timer* et; |
22 | struct enforcement_timer, | 50 | lt_t when_to_fire, remaining_budget; |
23 | timer); | 51 | lt_t now; |
24 | unsigned long flags; | 52 | unsigned long flags; |
25 | 53 | ||
26 | local_irq_save(flags); | 54 | BUG_ON(!t); |
27 | TRACE("enforcement timer fired.\n"); | 55 | BUG_ON(!is_realtime(t)); |
28 | et->armed = 0; | 56 | |
29 | /* activate scheduler */ | 57 | et = &tsk_rt(t)->budget.timer; |
30 | litmus_reschedule_local(); | 58 | if (et->armed) { |
31 | local_irq_restore(flags); | 59 | TRACE_TASK(t, "timer already armed!\n"); |
60 | return; | ||
61 | } | ||
62 | |||
63 | if (!force) { | ||
64 | if ( (!budget_enforced(t) || | ||
65 | (budget_enforced(t) && | ||
66 | bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED))) | ||
67 | && | ||
68 | (!budget_signalled(t) || | ||
69 | (budget_signalled(t) && | ||
70 | bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) { | ||
71 | TRACE_TASK(t, | ||
72 | "trying to arm timer when budget " | ||
73 | "has already been exhausted.\n"); | ||
74 | return; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | TRACE_TASK(t, "arming enforcement timer.\n"); | ||
79 | |||
80 | /* __hrtimer_start_range_ns() cancels the timer | ||
81 | * anyway, so we don't have to check whether it is still armed */ | ||
82 | raw_spin_lock_irqsave(&et->lock, flags); | ||
83 | |||
84 | if (et->armed) { | ||
85 | TRACE_TASK(t, "timer already armed (race)!\n"); | ||
86 | goto out; | ||
87 | } | ||
88 | |||
89 | now = litmus_clock(); | ||
90 | remaining_budget = budget_remaining(t); | ||
91 | when_to_fire = now + remaining_budget; | ||
92 | |||
93 | TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n", | ||
94 | remaining_budget, when_to_fire); | ||
32 | 95 | ||
33 | return HRTIMER_NORESTART; | 96 | __hrtimer_start_range_ns(&et->timer, |
97 | ns_to_ktime(when_to_fire), | ||
98 | 0 /* delta */, | ||
99 | HRTIMER_MODE_ABS_PINNED, /* TODO: need to use non-pinned? */ | ||
100 | 0 /* no wakeup */); | ||
101 | et->armed = 1; | ||
102 | |||
103 | out: | ||
104 | raw_spin_unlock_irqrestore(&et->lock, flags); | ||
34 | } | 105 | } |
35 | 106 | ||
36 | /* assumes called with IRQs off */ | 107 | void send_sigbudget(struct task_struct* t) |
37 | static void cancel_enforcement_timer(struct enforcement_timer* et) | ||
38 | { | 108 | { |
39 | int ret; | 109 | if (!bt_flag_test_and_set(t, BTF_SIG_BUDGET_SENT)) { |
110 | /* signal has not yet been sent and we are responsible for sending | ||
111 | * since we just set the sent-bit when it was previously 0. */ | ||
40 | 112 | ||
41 | TRACE("cancelling enforcement timer.\n"); | 113 | TRACE_TASK(t, "SIG_BUDGET being sent!\n"); |
114 | send_sig(SIG_BUDGET, t, 1); /* '1' denotes signal sent from kernel */ | ||
115 | } | ||
116 | } | ||
42 | 117 | ||
43 | /* Since interrupts are disabled and et->armed is only | 118 | /* |
44 | * modified locally, we do not need any locks. | 119 | * DRAIN_SIMPLE |
45 | */ | 120 | */ |
46 | 121 | ||
47 | if (et->armed) { | 122 | void simple_on_scheduled(struct task_struct* t) |
48 | ret = hrtimer_try_to_cancel(&et->timer); | 123 | { |
49 | /* Should never be inactive. */ | 124 | BUG_ON(!t); |
50 | BUG_ON(ret == 0); | 125 | |
51 | /* Should never be running concurrently. */ | 126 | if(budget_precisely_tracked(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) |
52 | BUG_ON(ret == -1); | 127 | if (!tsk_rt(t)->budget.timer.armed) |
128 | arm_enforcement_timer(t, 0); | ||
129 | } | ||
130 | |||
131 | inline static void __simple_on_unscheduled(struct task_struct* t) | ||
132 | { | ||
133 | BUG_ON(!t); | ||
134 | |||
135 | if (budget_precisely_tracked(t)) | ||
136 | cancel_enforcement_timer(t); | ||
137 | } | ||
138 | |||
139 | void simple_on_blocked(struct task_struct* t) | ||
140 | { | ||
141 | __simple_on_unscheduled(t); | ||
142 | } | ||
143 | |||
144 | void simple_on_preempt(struct task_struct* t) | ||
145 | { | ||
146 | __simple_on_unscheduled(t); | ||
147 | } | ||
148 | |||
149 | void simple_on_sleep(struct task_struct* t) | ||
150 | { | ||
151 | __simple_on_unscheduled(t); | ||
152 | } | ||
153 | |||
154 | void simple_on_exit(struct task_struct* t) | ||
155 | { | ||
156 | __simple_on_unscheduled(t); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * DRAIN_SIMPLE_IO | ||
161 | */ | ||
162 | |||
163 | void simple_io_on_blocked(struct task_struct* t) | ||
164 | { | ||
165 | /* hiding is turned on by locking protocols, so if there isn't any | ||
166 | hiding, then we're blocking for some other reason. assume it's I/O. */ | ||
167 | |||
168 | int for_io = !tsk_rt(t)->blocked_lock || (0 | ||
169 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
170 | || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) | ||
171 | #endif | ||
172 | #ifdef CONFIG_LITMUS_NVIDIA | ||
173 | || (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) | ||
174 | #endif | ||
175 | ); | ||
53 | 176 | ||
54 | et->armed = 0; | 177 | /* we drain budget for io-based suspensions */ |
178 | if (for_io) { | ||
179 | /* there is a fraction of time where we're double-counting the | ||
180 | * time tracked by the rq and suspension time. | ||
181 | * TODO: Do this recording closer to suspension time. */ | ||
182 | tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); | ||
183 | |||
184 | TRACE_TASK(t, "blocking for I/O.\n"); | ||
185 | |||
186 | if (!tsk_rt(t)->budget.timer.armed) { | ||
187 | bt_flag_clear(t, BTF_BUDGET_EXHAUSTED); | ||
188 | |||
189 | if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { | ||
190 | TRACE_TASK(t, "budget timer not armed. " | ||
191 | "Raced with exhaustion-resched? Re-arming.\n"); | ||
192 | arm_enforcement_timer(t, 1); | ||
193 | } | ||
194 | else { | ||
195 | TRACE_TASK(t, "not arming timer because task is waiting " | ||
196 | "for release.\n"); | ||
197 | } | ||
198 | } | ||
199 | } | ||
200 | else { | ||
201 | TRACE_TASK(t, "blocking for litmus lock. stop draining.\n"); | ||
202 | simple_on_blocked(t); | ||
55 | } | 203 | } |
56 | } | 204 | } |
57 | 205 | ||
58 | /* assumes called with IRQs off */ | 206 | void simple_io_on_wakeup(struct task_struct* t) |
59 | static void arm_enforcement_timer(struct enforcement_timer* et, | ||
60 | struct task_struct* t) | ||
61 | { | 207 | { |
62 | lt_t when_to_fire; | 208 | /* we're waking up from an io-based suspension */ |
63 | TRACE_TASK(t, "arming enforcement timer.\n"); | 209 | if (tsk_rt(t)->budget.suspend_timestamp) { |
210 | lt_t suspend_cost = litmus_clock() - | ||
211 | tsk_rt(t)->budget.suspend_timestamp; | ||
212 | tsk_rt(t)->budget.suspend_timestamp = 0; | ||
213 | TRACE_TASK(t, "budget consumed while io-suspended: %llu\n", | ||
214 | suspend_cost); | ||
215 | get_exec_time(t) += suspend_cost; | ||
216 | } | ||
217 | else { | ||
218 | TRACE_TASK(t, "waking from non-io blocking\n"); | ||
219 | } | ||
220 | } | ||
64 | 221 | ||
65 | /* Calling this when there is no budget left for the task | ||
66 | * makes no sense, unless the task is non-preemptive. */ | ||
67 | BUG_ON(budget_exhausted(t) && (!is_np(t))); | ||
68 | 222 | ||
69 | /* __hrtimer_start_range_ns() cancels the timer | 223 | /* |
70 | * anyway, so we don't have to check whether it is still armed */ | 224 | * DRAIN_SOBLIV |
225 | */ | ||
226 | |||
227 | void sobliv_on_blocked(struct task_struct* t) | ||
228 | { | ||
229 | if (bt_flag_is_set(t, BTF_IS_TOP_M)) { | ||
230 | /* there is a fraction of time where we're double-counting the | ||
231 | * time tracked by the rq and suspension time. | ||
232 | * TODO: Do this recording closer to suspension time. */ | ||
233 | tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); | ||
234 | |||
235 | if (!tsk_rt(t)->budget.timer.armed) { | ||
236 | /* budget exhaustion timer fired as t was waking up, so budget | ||
237 | * routine thought t was running. We need to re-trigger the budget | ||
238 | * exhastion routine via timer. Schedulers do not call | ||
239 | * job_completion() when a task blocks, even if t's budget has been | ||
240 | * exhausted. Unfortunately, we cannot rerun the exhaustion routine | ||
241 | * here due to spinlock ordering issues. Just re-arm the timer with | ||
242 | * the exhausted time, re-running the timer routine immediately once | ||
243 | * interrupts have been re-enabled. */ | ||
244 | |||
245 | /* clear the exhausted flag so handle will re-run. this will not | ||
246 | * trigger another exhaustion signal since signals are controled by | ||
247 | * BTF_SIG_BUDGET_SENT. */ | ||
248 | bt_flag_clear(t, BTF_BUDGET_EXHAUSTED); | ||
71 | 249 | ||
72 | if (likely(!is_np(t))) { | 250 | if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { |
73 | when_to_fire = litmus_clock() + budget_remaining(t); | 251 | TRACE_TASK(t, "budget timer not armed. " |
74 | __hrtimer_start_range_ns(&et->timer, | 252 | "Raced with exhaustion-resched? Re-arming.\n"); |
75 | ns_to_ktime(when_to_fire), | 253 | arm_enforcement_timer(t, 1); |
76 | 0 /* delta */, | 254 | } |
77 | HRTIMER_MODE_ABS_PINNED, | 255 | else { |
78 | 0 /* no wakeup */); | 256 | TRACE_TASK(t, "not arming timer because task is waiting " |
79 | et->armed = 1; | 257 | "for release.\n"); |
258 | } | ||
259 | } | ||
80 | } | 260 | } |
81 | } | 261 | } |
82 | 262 | ||
263 | void sobliv_on_wakeup(struct task_struct* t) | ||
264 | { | ||
265 | if (bt_flag_is_set(t, BTF_IS_TOP_M)) { | ||
266 | /* we're waking up while in top-m. record the time spent | ||
267 | * suspended while draining in exec_cost. suspend_timestamp was | ||
268 | * either set when we entered top-m while asleep, or when we | ||
269 | * blocked. */ | ||
270 | if (tsk_rt(t)->budget.suspend_timestamp) { | ||
271 | lt_t suspend_cost = litmus_clock() - | ||
272 | tsk_rt(t)->budget.suspend_timestamp; | ||
273 | tsk_rt(t)->budget.suspend_timestamp = 0; | ||
274 | TRACE_TASK(t, "budget consumed while suspended: %llu\n", | ||
275 | suspend_cost); | ||
276 | get_exec_time(t) += suspend_cost; | ||
277 | } | ||
278 | else { | ||
279 | WARN_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE)); | ||
280 | } | ||
281 | } | ||
282 | } | ||
83 | 283 | ||
84 | /* expects to be called with IRQs off */ | 284 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) |
85 | void update_enforcement_timer(struct task_struct* t) | ||
86 | { | 285 | { |
87 | struct enforcement_timer* et = &__get_cpu_var(budget_timer); | 286 | /* TODO: Budget credit accounting. */ |
287 | BUG_ON(!prio_inh); | ||
288 | } | ||
289 | |||
290 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) | ||
291 | { | ||
292 | /* TODO: Budget credit accounting. */ | ||
293 | } | ||
88 | 294 | ||
89 | if (t && budget_precisely_enforced(t)) { | 295 | void sobliv_on_enter_top_m(struct task_struct* t) |
90 | /* Make sure we call into the scheduler when this budget | 296 | { |
91 | * expires. */ | 297 | if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { |
92 | arm_enforcement_timer(et, t); | 298 | if (tsk_rt(t)->budget.timer.armed) |
93 | } else if (et->armed) { | 299 | TRACE_TASK(t, "budget timer already armed.\n"); |
94 | /* Make sure we don't cause unnecessary interrupts. */ | 300 | else { |
95 | cancel_enforcement_timer(et); | 301 | /* if we're blocked, then record the time at which we |
302 | started measuring */ | ||
303 | if (!is_running(t)) | ||
304 | tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); | ||
305 | |||
306 | /* the callback will handle it if it is executing */ | ||
307 | if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) { | ||
308 | arm_enforcement_timer(t, 0); | ||
309 | } | ||
310 | else { | ||
311 | TRACE_TASK(t, | ||
312 | "within callback context. deferring timer arm.\n"); | ||
313 | } | ||
314 | } | ||
96 | } | 315 | } |
97 | } | 316 | } |
98 | 317 | ||
318 | void sobliv_on_exit_top_m(struct task_struct* t) | ||
319 | { | ||
320 | if (budget_precisely_tracked(t)) { | ||
321 | if (tsk_rt(t)->budget.timer.armed) { | ||
322 | |||
323 | if (!is_running(t)) { | ||
324 | /* the time at which we started draining budget while | ||
325 | * suspended is recorded in evt_timestamp. evt_timestamp | ||
326 | * was set either when 't' exited the top-m while suspended | ||
327 | * or when 't' blocked. */ | ||
328 | lt_t suspend_cost; | ||
329 | BUG_ON(!tsk_rt(t)->budget.suspend_timestamp); | ||
330 | suspend_cost = litmus_clock() - | ||
331 | tsk_rt(t)->budget.suspend_timestamp; | ||
332 | TRACE_TASK(t, "budget consumed while suspended: %llu\n", | ||
333 | suspend_cost); | ||
334 | get_exec_time(t) += suspend_cost; | ||
335 | |||
336 | /* timer should have fired before now */ | ||
337 | if (get_exec_time(t) + 1000000/10 > get_exec_cost(t)) { | ||
338 | TRACE_TASK(t, | ||
339 | "budget overrun while suspended by over 1/10 " | ||
340 | "millisecond! timer should have already fired!\n"); | ||
341 | WARN_ON(1); | ||
342 | } | ||
343 | } | ||
99 | 344 | ||
100 | static int __init init_budget_enforcement(void) | 345 | TRACE_TASK(t, "stops draining budget\n"); |
346 | /* the callback will handle it if it is executing */ | ||
347 | if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) { | ||
348 | /* TODO: record a timestamp if the task isn't running */ | ||
349 | cancel_enforcement_timer(t); | ||
350 | } | ||
351 | else { | ||
352 | TRACE_TASK(t, | ||
353 | "within callback context. skipping operation.\n"); | ||
354 | } | ||
355 | } | ||
356 | else { | ||
357 | TRACE_TASK(t, "was not draining budget\n"); | ||
358 | } | ||
359 | } | ||
360 | } | ||
361 | |||
362 | void reevaluate_inheritance(struct task_struct* t) | ||
101 | { | 363 | { |
102 | int cpu; | 364 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
103 | struct enforcement_timer* et; | 365 | struct litmus_lock *blocked_lock = NULL; |
366 | |||
367 | TRACE_TASK(t, "reevaluating locks in light of budget exhaustion.\n"); | ||
368 | |||
369 | /* do we need to inherit from any tasks now that our own | ||
370 | * priority has decreased? */ | ||
371 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
372 | if (holds_locks(t)) { | ||
373 | struct task_struct* hp_blocked = | ||
374 | top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
375 | |||
376 | if (litmus->compare(hp_blocked, t)) | ||
377 | litmus->increase_prio(t, effective_priority(hp_blocked)); | ||
378 | } | ||
379 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
380 | |||
381 | /* do we need to tell the lock we're blocked on about our | ||
382 | * changed priority? */ | ||
383 | blocked_lock = tsk_rt(t)->blocked_lock; | ||
384 | if(blocked_lock) { | ||
385 | if(blocked_lock->ops->supports_budget_exhaustion) { | ||
386 | TRACE_TASK(t, "Lock %d supports budget exhaustion.\n", | ||
387 | blocked_lock->ident); | ||
388 | blocked_lock->ops->budget_exhausted(blocked_lock, t); | ||
389 | } | ||
390 | } | ||
391 | else { | ||
392 | TRACE_TASK(t, | ||
393 | "Budget exhausted while task not blocked on Litmus lock.\n"); | ||
394 | } | ||
395 | #else | ||
396 | /* prio-reeval currently relies upon nested locking infrastructure */ | ||
397 | TRACE_TASK(t, | ||
398 | "Unable to check if sleeping task is blocked " | ||
399 | "on Litmus lock without " | ||
400 | "CONFIG_LITMUS_NESTED_LOCKING enabled.\n"); | ||
401 | #endif | ||
402 | } | ||
403 | |||
404 | |||
104 | 405 | ||
105 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 406 | static enum hrtimer_restart __on_timeout(struct hrtimer *timer) |
106 | et = &per_cpu(budget_timer, cpu); | 407 | { |
107 | hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 408 | enum hrtimer_restart restart = HRTIMER_NORESTART; |
108 | et->timer.function = on_enforcement_timeout; | 409 | unsigned long flags; |
410 | |||
411 | struct budget_tracker* bt = | ||
412 | container_of( | ||
413 | container_of(timer, | ||
414 | struct enforcement_timer, | ||
415 | timer), | ||
416 | struct budget_tracker, | ||
417 | timer); | ||
418 | |||
419 | struct task_struct* t = | ||
420 | container_of( | ||
421 | container_of(bt, struct rt_param, budget), | ||
422 | struct task_struct, | ||
423 | rt_param); | ||
424 | |||
425 | TRACE_TASK(t, "budget timer interrupt fired at time %lu\n", | ||
426 | litmus_clock()); | ||
427 | |||
428 | raw_spin_lock_irqsave(&bt->timer.lock, flags); | ||
429 | tsk_rt(t)->budget.timer.armed = 0; | ||
430 | raw_spin_unlock_irqrestore(&bt->timer.lock, flags); | ||
431 | |||
432 | if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { | ||
433 | TRACE_TASK(t, | ||
434 | "spurious exhastion while waiting for release. dropping.\n"); | ||
435 | goto out; | ||
109 | } | 436 | } |
110 | return 0; | 437 | |
438 | restart = bt->ops->on_exhausted(t,!IN_SCHEDULE); | ||
439 | |||
440 | raw_spin_lock_irqsave(&bt->timer.lock, flags); | ||
441 | tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART); | ||
442 | raw_spin_unlock_irqrestore(&bt->timer.lock, flags); | ||
443 | |||
444 | out: | ||
445 | return restart; | ||
111 | } | 446 | } |
112 | 447 | ||
113 | module_init(init_budget_enforcement); | 448 | void init_budget_tracker(struct budget_tracker* bt, |
449 | const struct budget_tracker_ops* ops) | ||
450 | { | ||
451 | BUG_ON(!bt); | ||
452 | |||
453 | memset(bt, 0, sizeof(*bt)); | ||
454 | raw_spin_lock_init(&bt->timer.lock); | ||
455 | hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
456 | bt->timer.timer.function = __on_timeout; | ||
457 | bt->ops = ops; | ||
458 | INIT_BINHEAP_NODE(&bt->top_m_node); | ||
459 | } | ||
diff --git a/litmus/jobs.c b/litmus/jobs.c index ce7d591b82c5..d955bbde8d57 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -13,11 +13,9 @@ void setup_release(struct task_struct *t, lt_t release) | |||
13 | t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t); | 13 | t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t); |
14 | t->rt_param.job_params.exec_time = 0; | 14 | t->rt_param.job_params.exec_time = 0; |
15 | 15 | ||
16 | #if 0 /* PORT CHECK */ | 16 | /* TODO: Move this to budget.h/.c */ |
17 | /* kludge - TODO: Move this to budget.h/.c */ | ||
18 | if (t->rt_param.budget.ops) | 17 | if (t->rt_param.budget.ops) |
19 | bt_flags_reset(t); | 18 | bt_flags_reset(t); |
20 | #endif | ||
21 | 19 | ||
22 | /* update job sequence number */ | 20 | /* update job sequence number */ |
23 | t->rt_param.job_params.job_no++; | 21 | t->rt_param.job_params.job_no++; |
diff --git a/litmus/klmirqd.c b/litmus/klmirqd.c index c1f4aef97063..b16e9c271118 100644 --- a/litmus/klmirqd.c +++ b/litmus/klmirqd.c | |||
@@ -286,10 +286,8 @@ static int become_litmus_daemon(struct task_struct* tsk) | |||
286 | .priority = LITMUS_LOWEST_PRIORITY, | 286 | .priority = LITMUS_LOWEST_PRIORITY, |
287 | .cls = RT_CLASS_BEST_EFFORT, | 287 | .cls = RT_CLASS_BEST_EFFORT, |
288 | .budget_policy = NO_ENFORCEMENT, | 288 | .budget_policy = NO_ENFORCEMENT, |
289 | #if 0 /* PORT RECHECK */ | ||
290 | .drain_policy = DRAIN_SIMPLE, | 289 | .drain_policy = DRAIN_SIMPLE, |
291 | .budget_signal_policy = NO_SIGNALS, | 290 | .budget_signal_policy = NO_SIGNALS, |
292 | #endif | ||
293 | /* use SPORADIC instead of EARLY since util = 1.0 */ | 291 | /* use SPORADIC instead of EARLY since util = 1.0 */ |
294 | .release_policy = TASK_SPORADIC, | 292 | .release_policy = TASK_SPORADIC, |
295 | }; | 293 | }; |
diff --git a/litmus/locking.c b/litmus/locking.c index a2f38900e02f..0de007cc9732 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -956,6 +956,77 @@ int flush_pending_wakes() | |||
956 | return count; | 956 | return count; |
957 | } | 957 | } |
958 | 958 | ||
959 | void set_inh_task_linkback(struct task_struct* t, struct task_struct* linkto) | ||
960 | { | ||
961 | const int MAX_IDX = BITS_PER_LONG - 1; | ||
962 | |||
963 | int success = 0; | ||
964 | int old_idx = tsk_rt(t)->inh_task_linkback_idx; | ||
965 | |||
966 | /* is the linkback already set? */ | ||
967 | if (old_idx >= 0 && old_idx <= MAX_IDX) { | ||
968 | if ((BIT_MASK(old_idx) & tsk_rt(linkto)->used_linkback_slots) && | ||
969 | (tsk_rt(linkto)->inh_task_linkbacks[old_idx] == t)) { | ||
970 | TRACE_TASK(t, "linkback is current.\n"); | ||
971 | return; | ||
972 | } | ||
973 | BUG(); | ||
974 | } | ||
975 | |||
976 | /* kludge: upper limit on num linkbacks */ | ||
977 | BUG_ON(tsk_rt(linkto)->used_linkback_slots == ~0ul); | ||
978 | |||
979 | while(!success) { | ||
980 | int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots, | ||
981 | BITS_PER_BYTE*sizeof(tsk_rt(linkto)->used_linkback_slots)); | ||
982 | |||
983 | BUG_ON(b > MAX_IDX); | ||
984 | |||
985 | /* set bit... */ | ||
986 | if (!test_and_set_bit(b, &tsk_rt(linkto)->used_linkback_slots)) { | ||
987 | TRACE_TASK(t, "linking back to %s/%d in slot %d\n", | ||
988 | linkto->comm, linkto->pid, b); | ||
989 | if (tsk_rt(linkto)->inh_task_linkbacks[b]) | ||
990 | TRACE_TASK(t, "%s/%d already has %s/%d in slot %d\n", | ||
991 | linkto->comm, linkto->pid, | ||
992 | tsk_rt(linkto)->inh_task_linkbacks[b]->comm, | ||
993 | tsk_rt(linkto)->inh_task_linkbacks[b]->pid, | ||
994 | b); | ||
995 | |||
996 | /* TODO: allow dirty data to remain in [b] after code is tested */ | ||
997 | BUG_ON(tsk_rt(linkto)->inh_task_linkbacks[b] != NULL); | ||
998 | /* ...before setting slot */ | ||
999 | tsk_rt(linkto)->inh_task_linkbacks[b] = t; | ||
1000 | tsk_rt(t)->inh_task_linkback_idx = b; | ||
1001 | success = 1; | ||
1002 | } | ||
1003 | } | ||
1004 | } | ||
1005 | |||
1006 | void clear_inh_task_linkback(struct task_struct* t, | ||
1007 | struct task_struct* linkedto) | ||
1008 | { | ||
1009 | const int MAX_IDX = BITS_PER_LONG - 1; | ||
1010 | |||
1011 | int success = 0; | ||
1012 | int slot = tsk_rt(t)->inh_task_linkback_idx; | ||
1013 | |||
1014 | if (slot < 0) { | ||
1015 | TRACE_TASK(t, "assuming linkback already cleared.\n"); | ||
1016 | return; | ||
1017 | } | ||
1018 | |||
1019 | BUG_ON(slot > MAX_IDX); | ||
1020 | BUG_ON(tsk_rt(linkedto)->inh_task_linkbacks[slot] != t); | ||
1021 | |||
1022 | /* be safe - clear slot before clearing the bit */ | ||
1023 | tsk_rt(t)->inh_task_linkback_idx = -1; | ||
1024 | tsk_rt(linkedto)->inh_task_linkbacks[slot] = NULL; | ||
1025 | |||
1026 | success = test_and_clear_bit(slot, &tsk_rt(linkedto)->used_linkback_slots); | ||
1027 | |||
1028 | BUG_ON(!success); | ||
1029 | } | ||
959 | 1030 | ||
960 | #else /* CONFIG_LITMUS_LOCKING */ | 1031 | #else /* CONFIG_LITMUS_LOCKING */ |
961 | 1032 | ||
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 574957fdc1c0..fc2f696b26ef 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -191,10 +191,10 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
191 | struct st_event_record* rec = get_record(ST_COMPLETION, t); | 191 | struct st_event_record* rec = get_record(ST_COMPLETION, t); |
192 | if (rec) { | 192 | if (rec) { |
193 | rec->data.completion.when = now(); | 193 | rec->data.completion.when = now(); |
194 | #if 0 | 194 | rec->data.completion.backlog_remaining = |
195 | rec->data.completion.backlog_remaining = tsk_rt(t)->job_params.backlog; | 195 | tsk_rt(t)->job_params.backlog; |
196 | rec->data.completion.was_backlog_job = tsk_rt(t)->job_params.is_backlogged_job; | 196 | rec->data.completion.was_backlog_job = |
197 | #endif | 197 | tsk_rt(t)->job_params.is_backlogged_job; |
198 | rec->data.completion.forced = forced; | 198 | rec->data.completion.forced = forced; |
199 | put_record(rec); | 199 | put_record(rec); |
200 | } | 200 | } |
@@ -208,8 +208,9 @@ feather_callback void do_sched_trace_task_block(unsigned long id, | |||
208 | if (rec) { | 208 | if (rec) { |
209 | rec->data.block.when = now(); | 209 | rec->data.block.when = now(); |
210 | 210 | ||
211 | // hiding is turned on by locking protocols, so if there isn't any | 211 | /* Hiding is turned on by locking protocols, so if there isn't any |
212 | // hiding, then we're blocking for some other reason. assume it's I/O. | 212 | hiding, then we're blocking for some other reason: assume |
213 | it's I/O. */ | ||
213 | rec->data.block.for_io = !tsk_rt(t)->blocked_lock || (0 | 214 | rec->data.block.for_io = !tsk_rt(t)->blocked_lock || (0 |
214 | #ifdef CONFIG_REALTIME_AUX_TASKS | 215 | #ifdef CONFIG_REALTIME_AUX_TASKS |
215 | || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) | 216 | || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) |
diff --git a/litmus/sync.c b/litmus/sync.c index 681a65d836c6..08a71361a995 100644 --- a/litmus/sync.c +++ b/litmus/sync.c | |||
@@ -13,9 +13,7 @@ | |||
13 | #include <litmus/jobs.h> | 13 | #include <litmus/jobs.h> |
14 | 14 | ||
15 | #include <litmus/sched_trace.h> | 15 | #include <litmus/sched_trace.h> |
16 | #if 0 /* PORT RECHECK */ | ||
17 | #include <litmus/budget.h> | 16 | #include <litmus/budget.h> |
18 | #endif | ||
19 | 17 | ||
20 | struct ts_release_wait { | 18 | struct ts_release_wait { |
21 | struct list_head list; | 19 | struct list_head list; |
@@ -58,19 +56,15 @@ static long do_wait_for_ts_release(struct timespec *wake) | |||
58 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) | 56 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) |
59 | hide_from_workers(t, &vis_flags); | 57 | hide_from_workers(t, &vis_flags); |
60 | #endif | 58 | #endif |
61 | #if 0 /* PORT RECHECK */ | ||
62 | bt_flag_set(t, BTF_WAITING_FOR_RELEASE); | 59 | bt_flag_set(t, BTF_WAITING_FOR_RELEASE); |
63 | mb(); | 60 | mb(); |
64 | budget_state_machine(t,on_exit); // do this here and not in schedule()? | 61 | budget_state_machine(t, on_exit); /* TODO: maybe call in schedule() */ |
65 | #endif | ||
66 | } | 62 | } |
67 | 63 | ||
68 | TRACE_TASK(t, "waiting for ts release.\n"); | 64 | TRACE_TASK(t, "waiting for ts release.\n"); |
69 | 65 | ||
70 | #if 0 /* PORT RECHECK */ | ||
71 | if (is_rt) | 66 | if (is_rt) |
72 | BUG_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE)); | 67 | BUG_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE)); |
73 | #endif | ||
74 | 68 | ||
75 | /* We are enqueued, now we wait for someone to wake us up. */ | 69 | /* We are enqueued, now we wait for someone to wake us up. */ |
76 | ret = wait_for_completion_interruptible(&wait.completion); | 70 | ret = wait_for_completion_interruptible(&wait.completion); |
@@ -78,9 +72,7 @@ static long do_wait_for_ts_release(struct timespec *wake) | |||
78 | TRACE_TASK(t, "released by ts release!\n"); | 72 | TRACE_TASK(t, "released by ts release!\n"); |
79 | 73 | ||
80 | if (is_rt) { | 74 | if (is_rt) { |
81 | #if 0 /* PORT RECHECK */ | ||
82 | bt_flag_clear(t, BTF_WAITING_FOR_RELEASE); | 75 | bt_flag_clear(t, BTF_WAITING_FOR_RELEASE); |
83 | #endif | ||
84 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) | 76 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) |
85 | show_to_workers(t, &vis_flags); | 77 | show_to_workers(t, &vis_flags); |
86 | #endif | 78 | #endif |
@@ -102,13 +94,11 @@ static long do_wait_for_ts_release(struct timespec *wake) | |||
102 | */ | 94 | */ |
103 | tsk_rt(current)->dont_requeue = 1; | 95 | tsk_rt(current)->dont_requeue = 1; |
104 | tsk_rt(t)->completed = 1; | 96 | tsk_rt(t)->completed = 1; |
105 | #if 0 /* PORT RECHECK */ | ||
106 | tsk_rt(t)->job_params.backlog = 0; | 97 | tsk_rt(t)->job_params.backlog = 0; |
107 | tsk_rt(t)->job_params.is_backlogged_job = 0; | 98 | tsk_rt(t)->job_params.is_backlogged_job = 0; |
108 | tsk_rt(t)->budget.suspend_timestamp = 0; | 99 | tsk_rt(t)->budget.suspend_timestamp = 0; |
109 | bt_flag_clear(t, BTF_BUDGET_EXHAUSTED); | 100 | bt_flag_clear(t, BTF_BUDGET_EXHAUSTED); |
110 | mb(); | 101 | mb(); |
111 | #endif | ||
112 | 102 | ||
113 | /* completion succeeded, set up release. subtract off | 103 | /* completion succeeded, set up release. subtract off |
114 | * period because schedule()->job_completion() will | 104 | * period because schedule()->job_completion() will |