diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-03-28 18:10:23 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-03-28 18:10:23 -0400 |
commit | b9f67e64d84081f7dbba8dc380af8c39ac8d0a37 (patch) | |
tree | 05cc5c56f469aa17ac0e81d94e3a2284b28e07ab | |
parent | 4e28e863475df7c27c2e9ecba4e2cdd409bf044e (diff) |
inheritance management of budget.exp while blocked
-rw-r--r-- | include/litmus/budget.h | 42 | ||||
-rw-r--r-- | include/litmus/fifo_lock.h | 10 | ||||
-rw-r--r-- | include/litmus/ikglp_lock.h | 3 | ||||
-rw-r--r-- | include/litmus/litmus.h | 120 | ||||
-rw-r--r-- | include/litmus/locking.h | 15 | ||||
-rw-r--r-- | include/litmus/prioq_lock.h | 5 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 6 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 10 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 4 | ||||
-rw-r--r-- | litmus/budget.c | 86 | ||||
-rw-r--r-- | litmus/fifo_lock.c | 81 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 17 | ||||
-rw-r--r-- | litmus/kfmlp_lock.c | 4 | ||||
-rw-r--r-- | litmus/litmus.c | 21 | ||||
-rw-r--r-- | litmus/prioq_lock.c | 14 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 218 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 20 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 13 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 2 |
19 files changed, 508 insertions, 183 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index 4f1bdd101a9e..8e426a71f03d 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
@@ -4,47 +4,6 @@ | |||
4 | #include <linux/hrtimer.h> | 4 | #include <linux/hrtimer.h> |
5 | #include <linux/semaphore.h> | 5 | #include <linux/semaphore.h> |
6 | 6 | ||
7 | #define budget_exhausted(t) \ | ||
8 | (get_exec_time(t) >= get_exec_cost(t)) | ||
9 | |||
10 | #define budget_remaining(t) \ | ||
11 | ((!budget_exhausted(t)) ? (get_exec_cost(t) - get_exec_time(t)) : 0) | ||
12 | |||
13 | #define budget_enforced(t) (\ | ||
14 | tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
15 | |||
16 | #define budget_precisely_tracked(t) (\ | ||
17 | tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \ | ||
18 | tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS) | ||
19 | |||
20 | #define budget_quantum_tracked(t) (\ | ||
21 | tsk_rt(t)->task_params.budget_policy == QUANTUM_ENFORCEMENT || \ | ||
22 | tsk_rt(t)->task_params.budget_signal_policy == QUANTUM_SIGNALS) | ||
23 | |||
24 | #define budget_signalled(t) (\ | ||
25 | tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS) | ||
26 | |||
27 | #define budget_precisely_signalled(t) (\ | ||
28 | tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS) | ||
29 | |||
30 | #define bt_flag_is_set(t, flag_nr) (\ | ||
31 | test_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
32 | |||
33 | #define bt_flag_test_and_set(t, flag_nr) (\ | ||
34 | test_and_set_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
35 | |||
36 | #define bt_flag_set(t, flag_nr) (\ | ||
37 | set_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
38 | |||
39 | #define bt_flag_clear(t, flag_nr) (\ | ||
40 | clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
41 | |||
42 | #define bt_flags_reset(t) (\ | ||
43 | tsk_rt(t)->budget.flags = 0) | ||
44 | |||
45 | #define requeue_preempted_job(t) \ | ||
46 | (t && (!budget_exhausted(t) || !budget_enforced(t))) | ||
47 | |||
48 | struct enforcement_timer | 7 | struct enforcement_timer |
49 | { | 8 | { |
50 | raw_spinlock_t lock; | 9 | raw_spinlock_t lock; |
@@ -115,6 +74,7 @@ void sobliv_on_sleep(struct task_struct* t); | |||
115 | #define sobliv_on_exit simple_on_exit | 74 | #define sobliv_on_exit simple_on_exit |
116 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh); | 75 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh); |
117 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh); | 76 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh); |
77 | void sobliv_revaluate_task(struct task_struct* t); | ||
118 | 78 | ||
119 | 79 | ||
120 | void init_budget_tracker(struct budget_tracker* bt, | 80 | void init_budget_tracker(struct budget_tracker* bt, |
diff --git a/include/litmus/fifo_lock.h b/include/litmus/fifo_lock.h index 0b2337b42155..fcf53c10ca92 100644 --- a/include/litmus/fifo_lock.h +++ b/include/litmus/fifo_lock.h | |||
@@ -35,15 +35,19 @@ int fifo_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_ | |||
35 | void fifo_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | 35 | void fifo_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | /* Assumes task's base-priority already updated to reflect new priority. */ | ||
39 | void fifo_mutex_budget_exhausted(struct litmus_lock *l, struct task_struct *t); | ||
40 | |||
38 | void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l, | 41 | void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l, |
39 | struct task_struct* t, | 42 | struct task_struct* t, |
40 | raw_spinlock_t* to_unlock, | 43 | raw_spinlock_t* to_unlock, |
41 | unsigned long irqflags); | 44 | unsigned long irqflags); |
42 | 45 | ||
43 | void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | 46 | void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, |
44 | struct task_struct* t, | 47 | struct task_struct* t, |
45 | raw_spinlock_t* to_unlock, | 48 | raw_spinlock_t* to_unlock, |
46 | unsigned long irqflags); | 49 | unsigned long irqflags, |
50 | int budget_triggered); | ||
47 | 51 | ||
48 | int fifo_mutex_lock(struct litmus_lock* l); | 52 | int fifo_mutex_lock(struct litmus_lock* l); |
49 | int fifo_mutex_unlock(struct litmus_lock* l); | 53 | int fifo_mutex_unlock(struct litmus_lock* l); |
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h index 4e69d85d1e37..357465e78d23 100644 --- a/include/litmus/ikglp_lock.h +++ b/include/litmus/ikglp_lock.h | |||
@@ -101,12 +101,13 @@ static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) | |||
101 | 101 | ||
102 | int ikglp_lock(struct litmus_lock* l); | 102 | int ikglp_lock(struct litmus_lock* l); |
103 | int ikglp_unlock(struct litmus_lock* l); | 103 | int ikglp_unlock(struct litmus_lock* l); |
104 | void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t); | ||
105 | void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t); | ||
104 | int ikglp_close(struct litmus_lock* l); | 106 | int ikglp_close(struct litmus_lock* l); |
105 | void ikglp_free(struct litmus_lock* l); | 107 | void ikglp_free(struct litmus_lock* l); |
106 | struct litmus_lock* ikglp_new(unsigned int m, struct litmus_lock_ops*, void* __user arg); | 108 | struct litmus_lock* ikglp_new(unsigned int m, struct litmus_lock_ops*, void* __user arg); |
107 | 109 | ||
108 | 110 | ||
109 | |||
110 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 111 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
111 | 112 | ||
112 | struct ikglp_queue_info | 113 | struct ikglp_queue_info |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index ce24e62eee81..4fa705e65f0c 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -95,6 +95,126 @@ void litmus_exit_task(struct task_struct *tsk); | |||
95 | #define is_be(t) \ | 95 | #define is_be(t) \ |
96 | (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT) | 96 | (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT) |
97 | 97 | ||
98 | |||
99 | |||
100 | /* budget-related functions and macros */ | ||
101 | |||
102 | inline static int budget_exhausted(struct task_struct* t) { | ||
103 | return get_exec_time(t) >= get_exec_cost(t); | ||
104 | } | ||
105 | |||
106 | inline static int budget_remaining(struct task_struct* t) { | ||
107 | return (!budget_exhausted(t)) ? (get_exec_cost(t) - get_exec_time(t)) : 0; | ||
108 | } | ||
109 | |||
110 | #define budget_enforced(t) (\ | ||
111 | tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
112 | |||
113 | #define budget_precisely_tracked(t) (\ | ||
114 | tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \ | ||
115 | tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS) | ||
116 | |||
117 | #define budget_quantum_tracked(t) (\ | ||
118 | tsk_rt(t)->task_params.budget_policy == QUANTUM_ENFORCEMENT || \ | ||
119 | tsk_rt(t)->task_params.budget_signal_policy == QUANTUM_SIGNALS) | ||
120 | |||
121 | #define budget_signalled(t) (\ | ||
122 | tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS) | ||
123 | |||
124 | #define budget_precisely_signalled(t) (\ | ||
125 | tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS) | ||
126 | |||
127 | #define bt_flag_is_set(t, flag_nr) (\ | ||
128 | test_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
129 | |||
130 | #define bt_flag_test_and_set(t, flag_nr) (\ | ||
131 | test_and_set_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
132 | |||
133 | #define bt_flag_set(t, flag_nr) (\ | ||
134 | set_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
135 | |||
136 | #define bt_flag_clear(t, flag_nr) (\ | ||
137 | clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) | ||
138 | |||
139 | #define bt_flags_reset(t) (\ | ||
140 | tsk_rt(t)->budget.flags = 0) | ||
141 | |||
142 | #define requeue_preempted_job(t) \ | ||
143 | (t && (!budget_exhausted(t) || !budget_enforced(t))) | ||
144 | |||
145 | |||
146 | #ifdef CONFIG_LITMUS_LOCKING | ||
147 | static inline void set_inh_task_linkback(struct task_struct* t, struct task_struct* linkto) | ||
148 | { | ||
149 | const int MAX_IDX = BITS_PER_LONG - 1; | ||
150 | |||
151 | int success = 0; | ||
152 | int old_idx = tsk_rt(t)->inh_task_linkback_idx; | ||
153 | |||
154 | /* is the linkback already set? */ | ||
155 | if (old_idx >= 0 && old_idx <= MAX_IDX) { | ||
156 | if ((BIT_MASK(old_idx) & tsk_rt(linkto)->used_linkback_slots) && | ||
157 | (tsk_rt(linkto)->inh_task_linkbacks[old_idx] == t)) { | ||
158 | TRACE_TASK(t, "linkback is current.\n"); | ||
159 | return; | ||
160 | } | ||
161 | BUG(); | ||
162 | } | ||
163 | |||
164 | /* kludge: upper limit on num linkbacks */ | ||
165 | BUG_ON(tsk_rt(linkto)->used_linkback_slots == ~0ul); | ||
166 | |||
167 | while(!success) { | ||
168 | int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots, | ||
169 | sizeof(tsk_rt(linkto)->used_linkback_slots)); | ||
170 | |||
171 | BUG_ON(b > MAX_IDX); | ||
172 | |||
173 | /* set bit... */ | ||
174 | if (!test_and_set_bit(b, &tsk_rt(linkto)->used_linkback_slots)) { | ||
175 | TRACE_TASK(t, "linking back to %s/%d in slot %d\n", linkto->comm, linkto->pid, b); | ||
176 | if (tsk_rt(linkto)->inh_task_linkbacks[b]) | ||
177 | TRACE_TASK(t, "%s/%d already has %s/%d in slot %d\n", | ||
178 | linkto->comm, linkto->pid, | ||
179 | tsk_rt(linkto)->inh_task_linkbacks[b]->comm, | ||
180 | tsk_rt(linkto)->inh_task_linkbacks[b]->pid, | ||
181 | b); | ||
182 | |||
183 | /* TODO: allow dirty data to remain in [b] after code is tested */ | ||
184 | BUG_ON(tsk_rt(linkto)->inh_task_linkbacks[b] != NULL); | ||
185 | /* ...before setting slot */ | ||
186 | tsk_rt(linkto)->inh_task_linkbacks[b] = t; | ||
187 | tsk_rt(t)->inh_task_linkback_idx = b; | ||
188 | success = 1; | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | |||
193 | static inline void clear_inh_task_linkback(struct task_struct* t, struct task_struct* linkedto) | ||
194 | { | ||
195 | const int MAX_IDX = BITS_PER_LONG - 1; | ||
196 | |||
197 | int success = 0; | ||
198 | int slot = tsk_rt(t)->inh_task_linkback_idx; | ||
199 | |||
200 | if (slot < 0) { | ||
201 | TRACE_TASK(t, "assuming linkback already cleared.\n"); | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | BUG_ON(slot > MAX_IDX); | ||
206 | BUG_ON(tsk_rt(linkedto)->inh_task_linkbacks[slot] != t); | ||
207 | |||
208 | /* be safe - clear slot before clearing the bit */ | ||
209 | tsk_rt(t)->inh_task_linkback_idx = -1; | ||
210 | tsk_rt(linkedto)->inh_task_linkbacks[slot] = NULL; | ||
211 | |||
212 | success = test_and_clear_bit(slot, &tsk_rt(linkedto)->used_linkback_slots); | ||
213 | |||
214 | BUG_ON(!success); | ||
215 | } | ||
216 | #endif | ||
217 | |||
98 | /* Our notion of time within LITMUS: kernel monotonic time. */ | 218 | /* Our notion of time within LITMUS: kernel monotonic time. */ |
99 | static inline lt_t litmus_clock(void) | 219 | static inline lt_t litmus_clock(void) |
100 | { | 220 | { |
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 962ad5e6726a..08b06200b955 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -120,6 +120,12 @@ typedef lock_op_t lock_unlock_t; | |||
120 | typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); | 120 | typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); |
121 | typedef void (*lock_free_t)(struct litmus_lock *l); | 121 | typedef void (*lock_free_t)(struct litmus_lock *l); |
122 | 122 | ||
123 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
124 | /* Assumes task's base-priority already updated to reflect new priority. */ | ||
125 | typedef void (*lock_budget_exhausted_t)(struct litmus_lock* l, struct task_struct* t); | ||
126 | typedef void (*lock_omlp_virtual_unlock_t)(struct litmus_lock* l, struct task_struct* t); | ||
127 | #endif | ||
128 | |||
123 | struct litmus_lock_ops { | 129 | struct litmus_lock_ops { |
124 | /* Current task tries to obtain / drop a reference to a lock. | 130 | /* Current task tries to obtain / drop a reference to a lock. |
125 | * Optional methods, allowed by default. */ | 131 | * Optional methods, allowed by default. */ |
@@ -133,10 +139,12 @@ struct litmus_lock_ops { | |||
133 | /* The lock is no longer being referenced (mandatory method). */ | 139 | /* The lock is no longer being referenced (mandatory method). */ |
134 | lock_free_t deallocate; | 140 | lock_free_t deallocate; |
135 | 141 | ||
136 | |||
137 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 142 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
143 | lock_budget_exhausted_t budget_exhausted; | ||
144 | lock_omlp_virtual_unlock_t omlp_virtual_unlock; | ||
145 | |||
138 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | 146 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); |
139 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | 147 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags, int budget_triggered); |
140 | #endif | 148 | #endif |
141 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 149 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
142 | raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); | 150 | raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); |
@@ -151,6 +159,9 @@ struct litmus_lock_ops { | |||
151 | #endif | 159 | #endif |
152 | 160 | ||
153 | /* all flags at the end */ | 161 | /* all flags at the end */ |
162 | unsigned int supports_budget_exhaustion:1; | ||
163 | unsigned int is_omlp_family:1; | ||
164 | |||
154 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 165 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
155 | unsigned int supports_nesting:1; | 166 | unsigned int supports_nesting:1; |
156 | #endif | 167 | #endif |
diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h index 1b0a591ef1a6..1128e3aab077 100644 --- a/include/litmus/prioq_lock.h +++ b/include/litmus/prioq_lock.h | |||
@@ -41,6 +41,8 @@ void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_l | |||
41 | int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t); | 41 | int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t); |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | void prioq_mutex_budget_exhausted(struct litmus_lock* l, struct task_struct* t); | ||
45 | |||
44 | void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, | 46 | void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, |
45 | struct task_struct* t, | 47 | struct task_struct* t, |
46 | raw_spinlock_t* to_unlock, | 48 | raw_spinlock_t* to_unlock, |
@@ -49,7 +51,8 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
49 | void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | 51 | void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, |
50 | struct task_struct* t, | 52 | struct task_struct* t, |
51 | raw_spinlock_t* to_unlock, | 53 | raw_spinlock_t* to_unlock, |
52 | unsigned long irqflags); | 54 | unsigned long irqflags, |
55 | int budget_triggered); | ||
53 | 56 | ||
54 | 57 | ||
55 | int prioq_mutex_lock(struct litmus_lock* l); | 58 | int prioq_mutex_lock(struct litmus_lock* l); |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 40bdb26faebe..43a7e2126bf4 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -422,7 +422,11 @@ struct rt_param { | |||
422 | */ | 422 | */ |
423 | struct task_struct* inh_task; | 423 | struct task_struct* inh_task; |
424 | 424 | ||
425 | struct task_struct* inh_task_linkback; | 425 | /* kludge... */ |
426 | int inh_task_linkback_idx; | ||
427 | struct task_struct** inh_task_linkbacks; /* array. BITS_PER_LONG elements. */ | ||
428 | unsigned long used_linkback_slots; | ||
429 | |||
426 | 430 | ||
427 | #ifdef CONFIG_REALTIME_AUX_TASKS | 431 | #ifdef CONFIG_REALTIME_AUX_TASKS |
428 | unsigned int is_aux_task:1; | 432 | unsigned int is_aux_task:1; |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7d6df2fb78..d9e3a46129f4 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -35,6 +35,9 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | |||
35 | */ | 35 | */ |
36 | typedef void (*finish_switch_t)(struct task_struct *prev); | 36 | typedef void (*finish_switch_t)(struct task_struct *prev); |
37 | 37 | ||
38 | /* trigger a reschedule of 't' if 't' is running. */ | ||
39 | typedef void (*check_schedule_t)(struct task_struct *t); | ||
40 | |||
38 | /********************* task state changes ********************/ | 41 | /********************* task state changes ********************/ |
39 | 42 | ||
40 | /* Called to setup a new real-time task. | 43 | /* Called to setup a new real-time task. |
@@ -71,15 +74,15 @@ typedef long (*allocate_affinity_observer_t) ( | |||
71 | void* __user config); | 74 | void* __user config); |
72 | 75 | ||
73 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | 76 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); |
74 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | 77 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, int budget_triggered); |
75 | 78 | ||
76 | typedef int (*__increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | 79 | typedef int (*__increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); |
77 | typedef int (*__decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | 80 | typedef int (*__decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, int budget_triggered); |
78 | 81 | ||
79 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | 82 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, |
80 | raw_spinlock_t *to_unlock, unsigned long irqflags); | 83 | raw_spinlock_t *to_unlock, unsigned long irqflags); |
81 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | 84 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, |
82 | raw_spinlock_t *to_unlock, unsigned long irqflags); | 85 | raw_spinlock_t *to_unlock, unsigned long irqflags, int budget_triggered); |
83 | 86 | ||
84 | 87 | ||
85 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); | 88 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); |
@@ -129,6 +132,7 @@ struct sched_plugin { | |||
129 | scheduler_tick_t tick; | 132 | scheduler_tick_t tick; |
130 | schedule_t schedule; | 133 | schedule_t schedule; |
131 | finish_switch_t finish_switch; | 134 | finish_switch_t finish_switch; |
135 | check_schedule_t check_schedule; | ||
132 | 136 | ||
133 | 137 | ||
134 | /* syscall backend */ | 138 | /* syscall backend */ |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index db6523a3dcf7..8942288d2222 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -63,7 +63,7 @@ int exit_aux_task(struct task_struct *t) | |||
63 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | 63 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE |
64 | list_del(&tsk_rt(t)->aux_task_node); | 64 | list_del(&tsk_rt(t)->aux_task_node); |
65 | if (tsk_rt(t)->inh_task) { | 65 | if (tsk_rt(t)->inh_task) { |
66 | litmus->__decrease_prio(t, NULL); | 66 | litmus->__decrease_prio(t, NULL, 0); |
67 | } | 67 | } |
68 | #endif | 68 | #endif |
69 | 69 | ||
@@ -119,7 +119,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
119 | } | 119 | } |
120 | else { | 120 | else { |
121 | // TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); | 121 | // TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); |
122 | retval = litmus->__decrease_prio(aux, hp); | 122 | retval = litmus->__decrease_prio(aux, hp, 0); |
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
diff --git a/litmus/budget.c b/litmus/budget.c index 91755cf2787c..779506abf119 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -164,18 +164,14 @@ void sobliv_on_scheduled(struct task_struct* t) | |||
164 | BUG_ON(!t); | 164 | BUG_ON(!t); |
165 | 165 | ||
166 | if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { | 166 | if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { |
167 | if (tsk_rt(t)->budget.timer.armed) { | 167 | if (tsk_rt(t)->budget.timer.armed) |
168 | TRACE_TASK(t, "budget timer already armed.\n"); | 168 | TRACE_TASK(t, "budget timer already armed.\n"); |
169 | } | 169 | else |
170 | else { | ||
171 | arm_enforcement_timer(t); | 170 | arm_enforcement_timer(t); |
172 | } | ||
173 | } | 171 | } |
174 | 172 | ||
175 | if (tsk_rt(t)->inh_task) { | 173 | if (tsk_rt(t)->inh_task) |
176 | BUG_ON(is_running(tsk_rt(t)->inh_task)); | 174 | BUG_ON(is_running(tsk_rt(t)->inh_task)); |
177 | tsk_rt(tsk_rt(t)->inh_task)->inh_task_linkback = t; | ||
178 | } | ||
179 | } | 175 | } |
180 | 176 | ||
181 | void sobliv_on_blocked(struct task_struct* t) | 177 | void sobliv_on_blocked(struct task_struct* t) |
@@ -203,24 +199,80 @@ void sobliv_on_sleep(struct task_struct* t) | |||
203 | 199 | ||
204 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) | 200 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) |
205 | { | 201 | { |
206 | BUG_ON(!prio_inh); | 202 | // BUG_ON(!prio_inh); |
207 | 203 | // | |
208 | if (budget_precisely_tracked(t)) { | 204 | // if (budget_precisely_tracked(t)) { |
209 | TRACE_TASK(t, "inheriting from %s/%d. stop draining own budget.\n", | 205 | // TRACE_TASK(t, "inheriting from %s/%d. stop draining own budget.\n", |
210 | prio_inh->comm, prio_inh->pid); | 206 | // prio_inh->comm, prio_inh->pid); |
211 | cancel_enforcement_timer(t); | 207 | // cancel_enforcement_timer(t); |
212 | } | 208 | // } |
213 | } | 209 | } |
214 | 210 | ||
215 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) | 211 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) |
216 | { | 212 | { |
217 | if (!prio_inh && budget_precisely_tracked(t)) { | 213 | // if (!prio_inh && budget_precisely_tracked(t)) { |
218 | TRACE_TASK(t, "assuming base priority. start draining own budget.\n"); | 214 | // TRACE_TASK(t, "assuming base priority. start draining own budget.\n"); |
219 | arm_enforcement_timer(t); | 215 | // arm_enforcement_timer(t); |
216 | // } | ||
217 | } | ||
218 | |||
219 | void sobliv_revaluate_task(struct task_struct* t) | ||
220 | { | ||
221 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
222 | struct litmus_lock *blocked_lock = NULL; | ||
223 | |||
224 | TRACE_TASK(t, "reevaluating locks in light of budget exhaustion.\n"); | ||
225 | |||
226 | /* do we need to inherit from any tasks now that our own | ||
227 | * priority has decreased? */ | ||
228 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
229 | if (holds_locks(t)) { | ||
230 | struct task_struct* hp_blocked = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
231 | if (litmus->compare(hp_blocked, t)) | ||
232 | litmus->increase_prio(t, effective_priority(hp_blocked)); | ||
233 | } | ||
234 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
235 | |||
236 | /* do we need to tell the lock we're blocked on about our | ||
237 | * changed priority? */ | ||
238 | blocked_lock = tsk_rt(t)->blocked_lock; | ||
239 | if(blocked_lock) { | ||
240 | if(blocked_lock->ops->supports_budget_exhaustion) { | ||
241 | TRACE_TASK(t, "Lock %d supports budget exhaustion.\n", | ||
242 | blocked_lock->ident); | ||
243 | blocked_lock->ops->budget_exhausted(blocked_lock, t); | ||
244 | } | ||
220 | } | 245 | } |
246 | else { | ||
247 | TRACE_TASK(t, "Budget exhausted while task not blocked on Litmus lock.\n"); | ||
248 | } | ||
249 | #else | ||
250 | /* prio-reeval currently relies upon nested locking infrastructure */ | ||
251 | TRACE_TASK(t, | ||
252 | "Unable to check if sleeping task is blocked " | ||
253 | "on Litmus lock without " | ||
254 | "CONFIG_LITMUS_NESTED_LOCKING enabled.\n"); | ||
255 | #endif | ||
256 | |||
257 | /* TODO: If we hold an OMLP-family outmost lock, then we may | ||
258 | * need to move a task into a fifo queue */ | ||
259 | |||
260 | |||
261 | |||
262 | // /* anyone who inherits from me may need to be rescheduled */ | ||
263 | // linkback = tsk_rt(t)->inh_task_linkback; | ||
264 | // if (linkback) { | ||
265 | // /* TODO: IS THIS THREAD SAFE???? */ | ||
266 | // TRACE_TASK(t, "Checking if inheritor %s/%d needs to be rescheduled.\n", | ||
267 | // linkback->comm, | ||
268 | // linkback->pid); | ||
269 | // litmus->check_schedule(linkback); | ||
270 | // } | ||
221 | } | 271 | } |
222 | 272 | ||
223 | 273 | ||
274 | |||
275 | |||
224 | static enum hrtimer_restart __on_timeout(struct hrtimer *timer) | 276 | static enum hrtimer_restart __on_timeout(struct hrtimer *timer) |
225 | { | 277 | { |
226 | enum hrtimer_restart restart; | 278 | enum hrtimer_restart restart; |
diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c index 0cbba2424701..ed637044c948 100644 --- a/litmus/fifo_lock.c +++ b/litmus/fifo_lock.c | |||
@@ -312,8 +312,6 @@ int fifo_mutex_lock(struct litmus_lock* l) | |||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | |||
316 | |||
317 | int fifo_mutex_unlock(struct litmus_lock* l) | 315 | int fifo_mutex_unlock(struct litmus_lock* l) |
318 | { | 316 | { |
319 | struct task_struct *t = current, *next = NULL; | 317 | struct task_struct *t = current, *next = NULL; |
@@ -365,7 +363,7 @@ int fifo_mutex_unlock(struct litmus_lock* l) | |||
365 | WARN_ON(1); | 363 | WARN_ON(1); |
366 | } | 364 | } |
367 | 365 | ||
368 | litmus->decrease_prio(t, new_max_eff_prio); | 366 | litmus->decrease_prio(t, new_max_eff_prio, 0); |
369 | } | 367 | } |
370 | } | 368 | } |
371 | 369 | ||
@@ -514,9 +512,9 @@ out: | |||
514 | 512 | ||
515 | 513 | ||
516 | void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l, | 514 | void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l, |
517 | struct task_struct* t, | 515 | struct task_struct* t, |
518 | raw_spinlock_t* to_unlock, | 516 | raw_spinlock_t* to_unlock, |
519 | unsigned long irqflags) | 517 | unsigned long irqflags) |
520 | { | 518 | { |
521 | struct fifo_mutex *mutex = fifo_mutex_from_lock(l); | 519 | struct fifo_mutex *mutex = fifo_mutex_from_lock(l); |
522 | 520 | ||
@@ -608,17 +606,14 @@ void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
608 | } | 606 | } |
609 | 607 | ||
610 | 608 | ||
611 | void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | 609 | inline static void __fifo_mutex_propagate_decrease_inheritance( |
612 | struct task_struct* t, | 610 | struct litmus_lock* l, |
613 | raw_spinlock_t* to_unlock, | 611 | struct task_struct* t, |
614 | unsigned long irqflags) | 612 | unsigned long irqflags, |
613 | int budget_tiggered) | ||
615 | { | 614 | { |
615 | /* assumes mutex->lock is already held */ | ||
616 | struct fifo_mutex *mutex = fifo_mutex_from_lock(l); | 616 | struct fifo_mutex *mutex = fifo_mutex_from_lock(l); |
617 | |||
618 | // relay-style locking | ||
619 | lock_fine(&mutex->lock); | ||
620 | unlock_fine(to_unlock); | ||
621 | |||
622 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 617 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked |
623 | if(t == mutex->hp_waiter) { | 618 | if(t == mutex->hp_waiter) { |
624 | struct task_struct *owner = mutex->owner; | 619 | struct task_struct *owner = mutex->owner; |
@@ -633,7 +628,7 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
633 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | 628 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); |
634 | mutex->hp_waiter = fifo_mutex_find_hp_waiter(mutex, NULL); | 629 | mutex->hp_waiter = fifo_mutex_find_hp_waiter(mutex, NULL); |
635 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | 630 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? |
636 | effective_priority(mutex->hp_waiter) : NULL; | 631 | effective_priority(mutex->hp_waiter) : NULL; |
637 | binheap_add(&l->nest.hp_binheap_node, | 632 | binheap_add(&l->nest.hp_binheap_node, |
638 | &tsk_rt(owner)->hp_blocked_tasks, | 633 | &tsk_rt(owner)->hp_blocked_tasks, |
639 | struct nested_info, hp_binheap_node); | 634 | struct nested_info, hp_binheap_node); |
@@ -647,11 +642,11 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
647 | 642 | ||
648 | struct task_struct *decreased_prio; | 643 | struct task_struct *decreased_prio; |
649 | 644 | ||
650 | TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", | 645 | TRACE_TASK(t, "Propagating decreased inheritance to holder of lock %d.\n", |
651 | l->ident); | 646 | l->ident); |
652 | 647 | ||
653 | if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { | 648 | if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { |
654 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", | 649 | TRACE_TASK(t, "%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", |
655 | (new_max_eff_prio) ? new_max_eff_prio->comm : "null", | 650 | (new_max_eff_prio) ? new_max_eff_prio->comm : "null", |
656 | (new_max_eff_prio) ? new_max_eff_prio->pid : 0, | 651 | (new_max_eff_prio) ? new_max_eff_prio->pid : 0, |
657 | owner->comm, | 652 | owner->comm, |
@@ -661,7 +656,7 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
661 | decreased_prio = new_max_eff_prio; | 656 | decreased_prio = new_max_eff_prio; |
662 | } | 657 | } |
663 | else { | 658 | else { |
664 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", | 659 | TRACE_TASK(t, "%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", |
665 | (new_max_eff_prio) ? new_max_eff_prio->comm : "null", | 660 | (new_max_eff_prio) ? new_max_eff_prio->comm : "null", |
666 | (new_max_eff_prio) ? new_max_eff_prio->pid : 0, | 661 | (new_max_eff_prio) ? new_max_eff_prio->pid : 0, |
667 | owner->comm, | 662 | owner->comm, |
@@ -672,7 +667,8 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
672 | } | 667 | } |
673 | 668 | ||
674 | // beware: recursion | 669 | // beware: recursion |
675 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | 670 | // will trigger reschedule of owner, if needed. |
671 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags, budget_tiggered); // will unlock mutex->lock | ||
676 | } | 672 | } |
677 | else { | 673 | else { |
678 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 674 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
@@ -687,6 +683,9 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
687 | else { | 683 | else { |
688 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | 684 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; |
689 | 685 | ||
686 | /* TODO: is this code path valid for budgets? */ | ||
687 | WARN_ON(1); | ||
688 | |||
690 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | 689 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); |
691 | if(still_blocked) { | 690 | if(still_blocked) { |
692 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | 691 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", |
@@ -698,7 +697,8 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
698 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, | 697 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, |
699 | t, | 698 | t, |
700 | &mutex->lock, | 699 | &mutex->lock, |
701 | irqflags); | 700 | irqflags, |
701 | budget_tiggered); | ||
702 | } | 702 | } |
703 | else { | 703 | else { |
704 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | 704 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", |
@@ -712,6 +712,45 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
712 | } | 712 | } |
713 | } | 713 | } |
714 | 714 | ||
715 | void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
716 | struct task_struct* t, | ||
717 | raw_spinlock_t* to_unlock, | ||
718 | unsigned long irqflags, | ||
719 | int budget_tiggered) | ||
720 | { | ||
721 | struct fifo_mutex *mutex = fifo_mutex_from_lock(l); | ||
722 | |||
723 | // relay-style locking | ||
724 | lock_fine(&mutex->lock); | ||
725 | unlock_fine(to_unlock); | ||
726 | |||
727 | // unlocks mutex->lock | ||
728 | __fifo_mutex_propagate_decrease_inheritance(&mutex->litmus_lock, t, irqflags, budget_tiggered); | ||
729 | } | ||
730 | |||
731 | |||
732 | /* t's base priority has (already) been decreased due to budget exhaustion */ | ||
733 | void fifo_mutex_budget_exhausted(struct litmus_lock* l, struct task_struct* t) | ||
734 | { | ||
735 | struct fifo_mutex *mutex = fifo_mutex_from_lock(l); | ||
736 | unsigned long flags = 0; | ||
737 | |||
738 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
739 | unsigned long dglirqflags; | ||
740 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
741 | lock_global_irqsave(dgl_lock, dglirqflags); | ||
742 | #endif | ||
743 | |||
744 | lock_fine_irqsave(&mutex->lock, flags); | ||
745 | |||
746 | // unlocks mutex->lock | ||
747 | __fifo_mutex_propagate_decrease_inheritance(&mutex->litmus_lock, t, flags, 1); | ||
748 | |||
749 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
750 | unlock_global_irqrestore(dgl_lock, dglirqflags); | ||
751 | #endif | ||
752 | } | ||
753 | |||
715 | 754 | ||
716 | int fifo_mutex_close(struct litmus_lock* l) | 755 | int fifo_mutex_close(struct litmus_lock* l) |
717 | { | 756 | { |
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index b0c8afe90122..160998f466ed 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -484,7 +484,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, | |||
484 | } | 484 | } |
485 | 485 | ||
486 | // beware: recursion | 486 | // beware: recursion |
487 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | 487 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags, 0); // will unlock mutex->lock |
488 | } | 488 | } |
489 | else { | 489 | else { |
490 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | 490 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); |
@@ -535,7 +535,7 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, | |||
535 | } | 535 | } |
536 | 536 | ||
537 | // beware: recursion | 537 | // beware: recursion |
538 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | 538 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags, 0); // will unlock mutex->lock |
539 | } | 539 | } |
540 | else { | 540 | else { |
541 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | 541 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); |
@@ -1382,7 +1382,7 @@ int ikglp_unlock(struct litmus_lock* l) | |||
1382 | ++count; | 1382 | ++count; |
1383 | } | 1383 | } |
1384 | if (count) { | 1384 | if (count) { |
1385 | litmus->decrease_prio(t, NULL); | 1385 | litmus->decrease_prio(t, NULL, 0); |
1386 | } | 1386 | } |
1387 | WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. | 1387 | WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. |
1388 | } | 1388 | } |
@@ -1606,6 +1606,17 @@ out: | |||
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | 1608 | ||
1609 | void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) | ||
1610 | { | ||
1611 | TRACE_TASK(t, "TODO!\n"); | ||
1612 | } | ||
1613 | |||
1614 | void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t) | ||
1615 | { | ||
1616 | TRACE_TASK(t, "TODO!\n"); | ||
1617 | } | ||
1618 | |||
1619 | |||
1609 | 1620 | ||
1610 | int ikglp_close(struct litmus_lock* l) | 1621 | int ikglp_close(struct litmus_lock* l) |
1611 | { | 1622 | { |
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c index 7dd866185623..93c598205edd 100644 --- a/litmus/kfmlp_lock.c +++ b/litmus/kfmlp_lock.c | |||
@@ -150,7 +150,7 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem, | |||
150 | (src->hp_waiter) ? src->hp_waiter->pid : -1); | 150 | (src->hp_waiter) ? src->hp_waiter->pid : -1); |
151 | 151 | ||
152 | if(src->owner && tsk_rt(src->owner)->inh_task == t) { | 152 | if(src->owner && tsk_rt(src->owner)->inh_task == t) { |
153 | litmus->decrease_prio(src->owner, src->hp_waiter); | 153 | litmus->decrease_prio(src->owner, src->hp_waiter, 0); |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
@@ -356,7 +356,7 @@ int kfmlp_unlock(struct litmus_lock* l) | |||
356 | 356 | ||
357 | /* we lose the benefit of priority inheritance (if any) */ | 357 | /* we lose the benefit of priority inheritance (if any) */ |
358 | if (tsk_rt(t)->inh_task) | 358 | if (tsk_rt(t)->inh_task) |
359 | litmus->decrease_prio(t, NULL); | 359 | litmus->decrease_prio(t, NULL, 0); |
360 | 360 | ||
361 | 361 | ||
362 | /* check if there are jobs waiting for this resource */ | 362 | /* check if there are jobs waiting for this resource */ |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 10d9e545a831..fcaf1fb49249 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #include <litmus/affinity.h> | 25 | #include <litmus/affinity.h> |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
29 | #include <litmus/litmus_softirq.h> | ||
30 | #endif | ||
31 | |||
28 | #ifdef CONFIG_LITMUS_NVIDIA | 32 | #ifdef CONFIG_LITMUS_NVIDIA |
29 | #include <litmus/nvidia_info.h> | 33 | #include <litmus/nvidia_info.h> |
30 | #endif | 34 | #endif |
@@ -475,6 +479,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
475 | } | 479 | } |
476 | #endif | 480 | #endif |
477 | 481 | ||
482 | |||
478 | /* Restore preserved fields. */ | 483 | /* Restore preserved fields. */ |
479 | if (restore) { | 484 | if (restore) { |
480 | p->rt_param.task_params = user_config; | 485 | p->rt_param.task_params = user_config; |
@@ -516,6 +521,19 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
516 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 521 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
517 | } | 522 | } |
518 | 523 | ||
524 | |||
525 | tsk_rt(tsk)->inh_task_linkback_idx = -1; /* denotes invalid idx */ | ||
526 | tsk_rt(tsk)->inh_task_linkbacks = kmalloc(BITS_PER_LONG*sizeof(void*), GFP_ATOMIC); | ||
527 | if (!tsk_rt(tsk)->inh_task_linkbacks) { | ||
528 | printk(KERN_WARNING "litmus: no memory for linkbacks.\n"); | ||
529 | retval = -ENOMEM; | ||
530 | goto out; | ||
531 | } | ||
532 | else { | ||
533 | memset(tsk_rt(tsk)->inh_task_linkbacks, 0, | ||
534 | BITS_PER_LONG*sizeof(*tsk_rt(tsk)->inh_task_linkbacks)); | ||
535 | } | ||
536 | |||
519 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | 537 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) |
520 | init_gpu_affinity_state(tsk); | 538 | init_gpu_affinity_state(tsk); |
521 | #endif | 539 | #endif |
@@ -595,6 +613,9 @@ void litmus_exit_task(struct task_struct* tsk) | |||
595 | 613 | ||
596 | litmus->task_exit(tsk); | 614 | litmus->task_exit(tsk); |
597 | 615 | ||
616 | BUG_ON(!tsk_rt(tsk)->inh_task_linkbacks); | ||
617 | kfree(tsk_rt(tsk)->inh_task_linkbacks); | ||
618 | |||
598 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | 619 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); |
599 | bheap_node_free(tsk_rt(tsk)->heap_node); | 620 | bheap_node_free(tsk_rt(tsk)->heap_node); |
600 | release_heap_free(tsk_rt(tsk)->rel_heap); | 621 | release_heap_free(tsk_rt(tsk)->rel_heap); |
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c index c9ffab1564c3..cd351079c8bf 100644 --- a/litmus/prioq_lock.c +++ b/litmus/prioq_lock.c | |||
@@ -876,7 +876,7 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
876 | WARN_ON(1); | 876 | WARN_ON(1); |
877 | } | 877 | } |
878 | 878 | ||
879 | litmus->decrease_prio(t, new_max_eff_prio); | 879 | litmus->decrease_prio(t, new_max_eff_prio, 0); |
880 | } | 880 | } |
881 | } | 881 | } |
882 | 882 | ||
@@ -1161,7 +1161,8 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
1161 | void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | 1161 | void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, |
1162 | struct task_struct* t, | 1162 | struct task_struct* t, |
1163 | raw_spinlock_t* to_unlock, | 1163 | raw_spinlock_t* to_unlock, |
1164 | unsigned long irqflags) | 1164 | unsigned long irqflags, |
1165 | int budget_triggered) | ||
1165 | { | 1166 | { |
1166 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | 1167 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); |
1167 | 1168 | ||
@@ -1239,7 +1240,7 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
1239 | } | 1240 | } |
1240 | 1241 | ||
1241 | // beware: recursion | 1242 | // beware: recursion |
1242 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | 1243 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags, budget_triggered); // will unlock mutex->lock |
1243 | } | 1244 | } |
1244 | else { | 1245 | else { |
1245 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 1246 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
@@ -1273,7 +1274,8 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
1273 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, | 1274 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, |
1274 | t, | 1275 | t, |
1275 | &mutex->lock, | 1276 | &mutex->lock, |
1276 | irqflags); | 1277 | irqflags, |
1278 | budget_triggered); | ||
1277 | } | 1279 | } |
1278 | else { | 1280 | else { |
1279 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | 1281 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", |
@@ -1287,6 +1289,10 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
1287 | } | 1289 | } |
1288 | } | 1290 | } |
1289 | 1291 | ||
1292 | void prioq_mutex_budget_exhausted(struct litmus_lock* l, struct task_struct* t) | ||
1293 | { | ||
1294 | TRACE_TASK(t, "TODO!\n"); | ||
1295 | } | ||
1290 | 1296 | ||
1291 | int prioq_mutex_close(struct litmus_lock* l) | 1297 | int prioq_mutex_close(struct litmus_lock* l) |
1292 | { | 1298 | { |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 64e04405dd32..4551fb851dbd 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -461,7 +461,7 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
461 | /* Don't advance deadline/refresh budget. Use the remaining budget for | 461 | /* Don't advance deadline/refresh budget. Use the remaining budget for |
462 | * the backlogged job. | 462 | * the backlogged job. |
463 | * | 463 | * |
464 | * NOTE: Allowing backlogged jobs comsume remaining budget may affet | 464 | * NOTE: Allowing backlogged jobs comsume remaining budget may affect |
465 | * blocking bound analysis. | 465 | * blocking bound analysis. |
466 | */ | 466 | */ |
467 | } | 467 | } |
@@ -536,6 +536,26 @@ static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t) | |||
536 | return HRTIMER_NORESTART; | 536 | return HRTIMER_NORESTART; |
537 | } | 537 | } |
538 | 538 | ||
539 | static void cedf_check_schedule(struct task_struct* t) | ||
540 | { | ||
541 | int cpu; | ||
542 | |||
543 | cpu = (tsk_rt(t)->linked_on != NO_CPU) ? | ||
544 | tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on; | ||
545 | if (cpu == smp_processor_id()) { | ||
546 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); | ||
547 | litmus_reschedule_local(); | ||
548 | set_will_schedule(); | ||
549 | } | ||
550 | else if (cpu != NO_CPU) { | ||
551 | TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); | ||
552 | litmus_reschedule(cpu); | ||
553 | } | ||
554 | else { | ||
555 | TRACE_TASK(t, "is not running, so no rescheduling necessary.\n"); | ||
556 | } | ||
557 | } | ||
558 | |||
539 | static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) | 559 | static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) |
540 | { | 560 | { |
541 | enum hrtimer_restart restart = HRTIMER_NORESTART; | 561 | enum hrtimer_restart restart = HRTIMER_NORESTART; |
@@ -576,10 +596,10 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) | |||
576 | litmus_reschedule(cpu); | 596 | litmus_reschedule(cpu); |
577 | } | 597 | } |
578 | else { | 598 | else { |
599 | lt_t remaining; | ||
579 | cedf_domain_t* cluster = task_cpu_cluster(t); | 600 | cedf_domain_t* cluster = task_cpu_cluster(t); |
601 | int do_prio_reeval = 0; | ||
580 | unsigned long flags; | 602 | unsigned long flags; |
581 | lt_t remaining; | ||
582 | |||
583 | 603 | ||
584 | BUG_ON(cpu != NO_CPU); | 604 | BUG_ON(cpu != NO_CPU); |
585 | 605 | ||
@@ -587,63 +607,49 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) | |||
587 | // 2) if holds locks, tell the locking protocol to re-eval priority | 607 | // 2) if holds locks, tell the locking protocol to re-eval priority |
588 | // 3) -- the LP must undo any inheritance relations if appropriate | 608 | // 3) -- the LP must undo any inheritance relations if appropriate |
589 | 609 | ||
590 | |||
591 | /* force job completion */ | 610 | /* force job completion */ |
592 | TRACE_TASK(t, "blocked, postponing deadline\n"); | 611 | TRACE_TASK(t, "blocked, postponing deadline\n"); |
593 | 612 | ||
594 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 613 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
595 | job_completion(t, 1); /* refreshes budget */ | 614 | job_completion(t, 1); /* refreshes budget */ |
596 | 615 | ||
616 | #ifdef CONFIG_LITMUS_LOCKING | ||
617 | /* Decrease in base-priority is masked by inheritance, so | ||
618 | * we do not need to recheck any prior scheduling decisions | ||
619 | * or established inheritance relations. */ | ||
620 | do_prio_reeval = (tsk_rt(t)->inh_task == NULL); | ||
621 | |||
622 | /* drop the lock to make prio propagation easy... may need to | ||
623 | * do this all within cluster lock if there are races... */ | ||
624 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
597 | 625 | ||
598 | //#ifdef CONFIG_LITMUS_LOCKING | 626 | if (do_prio_reeval) |
599 | // if (tsk_rt(t))->inh_task) { | 627 | sobliv_revaluate_task(t); |
600 | // /* change in base-priority is masked */ | 628 | else |
601 | // } | 629 | TRACE_TASK(t, "skipping reevaluation since inheritance " |
602 | // else { | 630 | "masks change in base-priority.\n"); |
603 | //#ifdef CONFIG_LITMUS_NESTED_LOCKING | 631 | |
604 | // struct litmus_lock *blocked_lock; | 632 | |
605 | // | 633 | /* push any changed state... */ |
606 | // raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | 634 | if (do_prio_reeval && tsk_rt(t)->used_linkback_slots) { |
607 | // if (holds_locks(t)) { | 635 | int i; |
608 | // struct task_struct* hp_blocked = top_priority(&tsk_rt(t)->hp_blocked_tasks); | 636 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
609 | // if (litmus->compare(hp_blocked, t)) | 637 | /* any running task that inherits from t may need to be rescheduled */ |
610 | // __increase_priority_inheritance(t, effective_priority(hp_blocked)); | 638 | for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, |
611 | // } | 639 | sizeof(tsk_rt(t)->used_linkback_slots)); |
612 | // | 640 | i < BITS_PER_LONG; |
613 | // blocked_lock = tsk_rt(t)->blocked_lock; | 641 | i = find_next_bit(&tsk_rt(t)->used_linkback_slots, |
614 | // if(blocked_lock) { | 642 | sizeof(tsk_rt(t)->used_linkback_slots), i+1)) { |
615 | // if(blocked_lock->ops->supports_nesting) { | 643 | cedf_check_schedule(tsk_rt(t)->inh_task_linkbacks[i]); |
616 | // TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", | 644 | } |
617 | // blocked_lock->ident); | 645 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
618 | // | 646 | } |
619 | // // beware: recursion | 647 | #endif |
620 | // blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, | ||
621 | // to_unlock, | ||
622 | // irqflags); | ||
623 | // } | ||
624 | // else { | ||
625 | // TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | ||
626 | // blocked_lock); | ||
627 | // unlock_fine_irqrestore(to_unlock, irqflags); | ||
628 | // } | ||
629 | // } | ||
630 | // else { | ||
631 | // TRACE_TASK(t, "is not blocked. No propagation.\n"); | ||
632 | // unlock_fine_irqrestore(to_unlock, irqflags); | ||
633 | // } | ||
634 | //#endif | ||
635 | // } | ||
636 | //#endif | ||
637 | 648 | ||
638 | hrtimer_forward_now(&get_budget_timer(t).timer.timer, | 649 | hrtimer_forward_now(&get_budget_timer(t).timer.timer, |
639 | ns_to_ktime(budget_remaining(t))); | 650 | ns_to_ktime(budget_remaining(t))); |
640 | remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer); | 651 | remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer); |
641 | 652 | ||
642 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
643 | |||
644 | |||
645 | |||
646 | |||
647 | TRACE_TASK(t, "rearmed timer to %ld\n", remaining); | 653 | TRACE_TASK(t, "rearmed timer to %ld\n", remaining); |
648 | restart = HRTIMER_RESTART; | 654 | restart = HRTIMER_RESTART; |
649 | } | 655 | } |
@@ -1131,6 +1137,25 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
1131 | out_set_state: | 1137 | out_set_state: |
1132 | #endif | 1138 | #endif |
1133 | 1139 | ||
1140 | #ifdef CONFIG_LITMUS_LOCKING | ||
1141 | /* Update priority inheritance linkbacks. | ||
1142 | * A blocked task may have multiple tasks that inherit from it, but only | ||
1143 | * one of those tasks should be runnable. Provide a link-back between the | ||
1144 | * blocked task and the one that inherits from it. */ | ||
1145 | |||
1146 | /* TODO: Support klmirqd and aux tasks */ | ||
1147 | /* TODO: MOVE THESE CALLS TO __increase AND __decrease TO CATCH ALL CASES. | ||
1148 | PAY ATTENTION TO RUN-STATE OF INHERITOR & INHERITEE */ | ||
1149 | if (next != prev) { | ||
1150 | if (prev && tsk_rt(prev)->inh_task) { | ||
1151 | clear_inh_task_linkback(prev, tsk_rt(prev)->inh_task); | ||
1152 | } | ||
1153 | if (next && tsk_rt(next)->inh_task) { | ||
1154 | set_inh_task_linkback(next, tsk_rt(next)->inh_task); | ||
1155 | } | ||
1156 | } | ||
1157 | #endif | ||
1158 | |||
1134 | sched_state_task_picked(); | 1159 | sched_state_task_picked(); |
1135 | raw_spin_unlock(&cluster->cluster_lock); | 1160 | raw_spin_unlock(&cluster->cluster_lock); |
1136 | 1161 | ||
@@ -1414,6 +1439,7 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1414 | int linked_on; | 1439 | int linked_on; |
1415 | int check_preempt = 0; | 1440 | int check_preempt = 0; |
1416 | cedf_domain_t* cluster; | 1441 | cedf_domain_t* cluster; |
1442 | struct task_struct* old_prio_inh = tsk_rt(t)->inh_task; | ||
1417 | 1443 | ||
1418 | if (prio_inh && prio_inh == effective_priority(t)) { | 1444 | if (prio_inh && prio_inh == effective_priority(t)) { |
1419 | /* relationship already established. */ | 1445 | /* relationship already established. */ |
@@ -1464,15 +1490,26 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1464 | #endif | 1490 | #endif |
1465 | sched_trace_eff_prio_change(t, prio_inh); | 1491 | sched_trace_eff_prio_change(t, prio_inh); |
1466 | 1492 | ||
1467 | if (NULL != get_budget_timer(t).ops->on_inherit && | 1493 | /* clear out old inheritance relation */ |
1468 | NULL != get_budget_timer(prio_inh).ops->on_inherit) { | 1494 | if (NULL != old_prio_inh && |
1469 | get_budget_timer(t).ops->on_inherit(t, prio_inh); | 1495 | NULL != get_budget_timer(t).ops->on_disinherit && |
1496 | NULL != get_budget_timer(old_prio_inh).ops->on_disinherit) { | ||
1497 | get_budget_timer(t).ops->on_disinherit(t, old_prio_inh); | ||
1470 | } | 1498 | } |
1499 | if (old_prio_inh) | ||
1500 | clear_inh_task_linkback(t, old_prio_inh); | ||
1471 | 1501 | ||
1472 | TRACE_TASK(t, "inherits priority from %s/%d\n", | 1502 | TRACE_TASK(t, "inherits priority from %s/%d\n", |
1473 | prio_inh->comm, prio_inh->pid); | 1503 | prio_inh->comm, prio_inh->pid); |
1474 | tsk_rt(t)->inh_task = prio_inh; | 1504 | tsk_rt(t)->inh_task = prio_inh; |
1475 | 1505 | ||
1506 | /* update inheritance relation */ | ||
1507 | if (prio_inh && | ||
1508 | NULL != get_budget_timer(t).ops->on_inherit && | ||
1509 | NULL != get_budget_timer(prio_inh).ops->on_inherit) { | ||
1510 | get_budget_timer(t).ops->on_inherit(t, prio_inh); | ||
1511 | } | ||
1512 | |||
1476 | linked_on = tsk_rt(t)->linked_on; | 1513 | linked_on = tsk_rt(t)->linked_on; |
1477 | 1514 | ||
1478 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 1515 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
@@ -1488,7 +1525,10 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1488 | binheap_add(&per_cpu(cedf_cpu_entries, linked_on).hn, | 1525 | binheap_add(&per_cpu(cedf_cpu_entries, linked_on).hn, |
1489 | &cluster->cpu_heap, cpu_entry_t, hn); | 1526 | &cluster->cpu_heap, cpu_entry_t, hn); |
1490 | 1527 | ||
1491 | } else { | 1528 | /* tell prio_inh that we're __running__ with its priority */ |
1529 | set_inh_task_linkback(t, prio_inh); | ||
1530 | } | ||
1531 | else { | ||
1492 | /* holder may be queued: first stop queue changes */ | 1532 | /* holder may be queued: first stop queue changes */ |
1493 | raw_spin_lock(&cluster->domain.release_lock); | 1533 | raw_spin_lock(&cluster->domain.release_lock); |
1494 | if (is_queued(t)) { | 1534 | if (is_queued(t)) { |
@@ -1585,12 +1625,14 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1585 | 1625 | ||
1586 | /* called with IRQs off */ | 1626 | /* called with IRQs off */ |
1587 | static int __decrease_priority_inheritance(struct task_struct* t, | 1627 | static int __decrease_priority_inheritance(struct task_struct* t, |
1588 | struct task_struct* prio_inh) | 1628 | struct task_struct* prio_inh, |
1629 | int budget_tiggered) | ||
1589 | { | 1630 | { |
1590 | cedf_domain_t* cluster; | 1631 | cedf_domain_t* cluster; |
1591 | int success = 1; | 1632 | int success = 1; |
1633 | struct task_struct* old_prio_inh = tsk_rt(t)->inh_task; | ||
1592 | 1634 | ||
1593 | if (prio_inh == tsk_rt(t)->inh_task) { | 1635 | if (prio_inh == old_prio_inh) { |
1594 | /* relationship already established. */ | 1636 | /* relationship already established. */ |
1595 | TRACE_TASK(t, "already inherits priority from %s/%d\n", | 1637 | TRACE_TASK(t, "already inherits priority from %s/%d\n", |
1596 | (prio_inh) ? prio_inh->comm : "(null)", | 1638 | (prio_inh) ? prio_inh->comm : "(null)", |
@@ -1633,17 +1675,27 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1633 | } | 1675 | } |
1634 | #endif | 1676 | #endif |
1635 | 1677 | ||
1636 | if (NULL != tsk_rt(t)->inh_task && | ||
1637 | NULL != get_budget_timer(t).ops->on_disinherit && | ||
1638 | NULL != get_budget_timer(tsk_rt(t)->inh_task).ops->on_disinherit) { | ||
1639 | get_budget_timer(t).ops->on_disinherit(t, tsk_rt(t)->inh_task); | ||
1640 | } | ||
1641 | |||
1642 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1678 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1643 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 1679 | if(budget_tiggered || __edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
1644 | #endif | 1680 | #endif |
1645 | sched_trace_eff_prio_change(t, prio_inh); | 1681 | sched_trace_eff_prio_change(t, prio_inh); |
1646 | 1682 | ||
1683 | if (budget_tiggered) { | ||
1684 | BUG_ON(!old_prio_inh); | ||
1685 | TRACE_TASK(t, "budget-triggered 'decrease' in priority. " | ||
1686 | "%s/%d's budget should have just been exhuasted.\n", | ||
1687 | old_prio_inh->comm, old_prio_inh->pid); | ||
1688 | } | ||
1689 | |||
1690 | /* clear out old inheritance relation */ | ||
1691 | if (NULL != old_prio_inh && | ||
1692 | NULL != get_budget_timer(t).ops->on_disinherit && | ||
1693 | NULL != get_budget_timer(old_prio_inh).ops->on_disinherit) { | ||
1694 | get_budget_timer(t).ops->on_disinherit(t, old_prio_inh); | ||
1695 | } | ||
1696 | if (old_prio_inh) | ||
1697 | clear_inh_task_linkback(t, old_prio_inh); | ||
1698 | |||
1647 | /* A job only stops inheriting a priority when it releases a | 1699 | /* A job only stops inheriting a priority when it releases a |
1648 | * resource. Thus we can make the following assumption.*/ | 1700 | * resource. Thus we can make the following assumption.*/ |
1649 | if(prio_inh) | 1701 | if(prio_inh) |
@@ -1652,11 +1704,22 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1652 | else | 1704 | else |
1653 | TRACE_TASK(t, "base priority restored.\n"); | 1705 | TRACE_TASK(t, "base priority restored.\n"); |
1654 | 1706 | ||
1707 | /* set up new inheritance relation */ | ||
1655 | tsk_rt(t)->inh_task = prio_inh; | 1708 | tsk_rt(t)->inh_task = prio_inh; |
1656 | 1709 | ||
1710 | if (prio_inh && | ||
1711 | NULL != get_budget_timer(t).ops->on_inherit && | ||
1712 | NULL != get_budget_timer(prio_inh).ops->on_inherit) { | ||
1713 | get_budget_timer(t).ops->on_inherit(t, prio_inh); | ||
1714 | } | ||
1715 | |||
1657 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | 1716 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
1658 | TRACE_TASK(t, "is scheduled.\n"); | 1717 | TRACE_TASK(t, "is scheduled.\n"); |
1659 | 1718 | ||
1719 | /* link back to new inheritance */ | ||
1720 | if (prio_inh) | ||
1721 | set_inh_task_linkback(t, prio_inh); | ||
1722 | |||
1660 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 1723 | /* Check if rescheduling is necessary. We can't use heap_decrease() |
1661 | * since the priority was effectively lowered. */ | 1724 | * since the priority was effectively lowered. */ |
1662 | unlink(t); | 1725 | unlink(t); |
@@ -1682,16 +1745,14 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1682 | 1745 | ||
1683 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1746 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1684 | /* propagate to aux tasks */ | 1747 | /* propagate to aux tasks */ |
1685 | if (tsk_rt(t)->has_aux_tasks) { | 1748 | if (tsk_rt(t)->has_aux_tasks) |
1686 | aux_task_owner_decrease_priority(t); | 1749 | aux_task_owner_decrease_priority(t); |
1687 | } | ||
1688 | #endif | 1750 | #endif |
1689 | 1751 | ||
1690 | #ifdef CONFIG_LITMUS_NVIDIA | 1752 | #ifdef CONFIG_LITMUS_NVIDIA |
1691 | /* propagate to gpu */ | 1753 | /* propagate to gpu */ |
1692 | if (tsk_rt(t)->held_gpus) { | 1754 | if (tsk_rt(t)->held_gpus) |
1693 | gpu_owner_decrease_priority(t); | 1755 | gpu_owner_decrease_priority(t); |
1694 | } | ||
1695 | #endif | 1756 | #endif |
1696 | 1757 | ||
1697 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1758 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
@@ -1713,7 +1774,8 @@ out: | |||
1713 | } | 1774 | } |
1714 | 1775 | ||
1715 | static void decrease_priority_inheritance(struct task_struct* t, | 1776 | static void decrease_priority_inheritance(struct task_struct* t, |
1716 | struct task_struct* prio_inh) | 1777 | struct task_struct* prio_inh, |
1778 | int budget_tiggered) | ||
1717 | { | 1779 | { |
1718 | cedf_domain_t* cluster = task_cpu_cluster(t); | 1780 | cedf_domain_t* cluster = task_cpu_cluster(t); |
1719 | 1781 | ||
@@ -1723,7 +1785,7 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1723 | (prio_inh) ? prio_inh->comm : "null", | 1785 | (prio_inh) ? prio_inh->comm : "null", |
1724 | (prio_inh) ? prio_inh->pid : 0); | 1786 | (prio_inh) ? prio_inh->pid : 0); |
1725 | 1787 | ||
1726 | __decrease_priority_inheritance(t, prio_inh); | 1788 | __decrease_priority_inheritance(t, prio_inh, budget_tiggered); |
1727 | 1789 | ||
1728 | raw_spin_unlock(&cluster->cluster_lock); | 1790 | raw_spin_unlock(&cluster->cluster_lock); |
1729 | 1791 | ||
@@ -1795,10 +1857,11 @@ static void nested_increase_priority_inheritance(struct task_struct* t, | |||
1795 | static void nested_decrease_priority_inheritance(struct task_struct* t, | 1857 | static void nested_decrease_priority_inheritance(struct task_struct* t, |
1796 | struct task_struct* prio_inh, | 1858 | struct task_struct* prio_inh, |
1797 | raw_spinlock_t *to_unlock, | 1859 | raw_spinlock_t *to_unlock, |
1798 | unsigned long irqflags) | 1860 | unsigned long irqflags, |
1861 | int budget_tiggered) | ||
1799 | { | 1862 | { |
1800 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | 1863 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
1801 | decrease_priority_inheritance(t, prio_inh); | 1864 | decrease_priority_inheritance(t, prio_inh, budget_tiggered); |
1802 | 1865 | ||
1803 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 1866 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
1804 | 1867 | ||
@@ -1810,7 +1873,8 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, | |||
1810 | // beware: recursion | 1873 | // beware: recursion |
1811 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, | 1874 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, |
1812 | to_unlock, | 1875 | to_unlock, |
1813 | irqflags); | 1876 | irqflags, |
1877 | budget_tiggered); | ||
1814 | } | 1878 | } |
1815 | else { | 1879 | else { |
1816 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | 1880 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", |
@@ -1833,6 +1897,7 @@ static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = { | |||
1833 | .close = fifo_mutex_close, | 1897 | .close = fifo_mutex_close, |
1834 | .deallocate = fifo_mutex_free, | 1898 | .deallocate = fifo_mutex_free, |
1835 | 1899 | ||
1900 | .budget_exhausted = fifo_mutex_budget_exhausted, | ||
1836 | .propagate_increase_inheritance = fifo_mutex_propagate_increase_inheritance, | 1901 | .propagate_increase_inheritance = fifo_mutex_propagate_increase_inheritance, |
1837 | .propagate_decrease_inheritance = fifo_mutex_propagate_decrease_inheritance, | 1902 | .propagate_decrease_inheritance = fifo_mutex_propagate_decrease_inheritance, |
1838 | 1903 | ||
@@ -1849,6 +1914,8 @@ static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = { | |||
1849 | .requires_atomic_dgl = 0, | 1914 | .requires_atomic_dgl = 0, |
1850 | #endif | 1915 | #endif |
1851 | .supports_nesting = 1, | 1916 | .supports_nesting = 1, |
1917 | .supports_budget_exhaustion = 1, | ||
1918 | .is_omlp_family = 0, | ||
1852 | }; | 1919 | }; |
1853 | 1920 | ||
1854 | static struct litmus_lock* cedf_new_fifo_mutex(void) | 1921 | static struct litmus_lock* cedf_new_fifo_mutex(void) |
@@ -1864,6 +1931,7 @@ static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = { | |||
1864 | .close = prioq_mutex_close, | 1931 | .close = prioq_mutex_close, |
1865 | .deallocate = prioq_mutex_free, | 1932 | .deallocate = prioq_mutex_free, |
1866 | 1933 | ||
1934 | .budget_exhausted = prioq_mutex_budget_exhausted, | ||
1867 | .propagate_increase_inheritance = prioq_mutex_propagate_increase_inheritance, | 1935 | .propagate_increase_inheritance = prioq_mutex_propagate_increase_inheritance, |
1868 | .propagate_decrease_inheritance = prioq_mutex_propagate_decrease_inheritance, | 1936 | .propagate_decrease_inheritance = prioq_mutex_propagate_decrease_inheritance, |
1869 | 1937 | ||
@@ -1880,6 +1948,8 @@ static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = { | |||
1880 | .requires_atomic_dgl = 1, | 1948 | .requires_atomic_dgl = 1, |
1881 | #endif | 1949 | #endif |
1882 | .supports_nesting = 1, | 1950 | .supports_nesting = 1, |
1951 | .supports_budget_exhaustion = 1, | ||
1952 | .is_omlp_family = 0, | ||
1883 | }; | 1953 | }; |
1884 | 1954 | ||
1885 | static struct litmus_lock* cedf_new_prioq_mutex(void) | 1955 | static struct litmus_lock* cedf_new_prioq_mutex(void) |
@@ -1895,6 +1965,9 @@ static struct litmus_lock_ops cedf_ikglp_lock_ops = { | |||
1895 | .close = ikglp_close, | 1965 | .close = ikglp_close, |
1896 | .deallocate = ikglp_free, | 1966 | .deallocate = ikglp_free, |
1897 | 1967 | ||
1968 | .budget_exhausted = ikglp_budget_exhausted, | ||
1969 | .omlp_virtual_unlock = ikglp_virtual_unlock, | ||
1970 | |||
1898 | // ikglp can only be an outer-most lock. | 1971 | // ikglp can only be an outer-most lock. |
1899 | .propagate_increase_inheritance = NULL, | 1972 | .propagate_increase_inheritance = NULL, |
1900 | .propagate_decrease_inheritance = NULL, | 1973 | .propagate_decrease_inheritance = NULL, |
@@ -1904,6 +1977,8 @@ static struct litmus_lock_ops cedf_ikglp_lock_ops = { | |||
1904 | .requires_atomic_dgl = 0, | 1977 | .requires_atomic_dgl = 0, |
1905 | #endif | 1978 | #endif |
1906 | .supports_nesting = 0, | 1979 | .supports_nesting = 0, |
1980 | .supports_budget_exhaustion = 1, | ||
1981 | .is_omlp_family = 1, | ||
1907 | }; | 1982 | }; |
1908 | 1983 | ||
1909 | static struct litmus_lock* cedf_new_ikglp(void* __user arg) | 1984 | static struct litmus_lock* cedf_new_ikglp(void* __user arg) |
@@ -1934,6 +2009,8 @@ static struct litmus_lock_ops cedf_kfmlp_lock_ops = { | |||
1934 | .requires_atomic_dgl = 0, | 2009 | .requires_atomic_dgl = 0, |
1935 | #endif | 2010 | #endif |
1936 | .supports_nesting = 0, | 2011 | .supports_nesting = 0, |
2012 | .supports_budget_exhaustion = 0, | ||
2013 | .is_omlp_family = 0, | ||
1937 | }; | 2014 | }; |
1938 | 2015 | ||
1939 | 2016 | ||
@@ -2243,6 +2320,7 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
2243 | .plugin_name = "C-EDF", | 2320 | .plugin_name = "C-EDF", |
2244 | .finish_switch = cedf_finish_switch, | 2321 | .finish_switch = cedf_finish_switch, |
2245 | .tick = cedf_tick, | 2322 | .tick = cedf_tick, |
2323 | .check_schedule = cedf_check_schedule, | ||
2246 | .task_new = cedf_task_new, | 2324 | .task_new = cedf_task_new, |
2247 | .complete_job = complete_job, | 2325 | .complete_job = complete_job, |
2248 | .task_exit = cedf_task_exit, | 2326 | .task_exit = cedf_task_exit, |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 821e96cd4ec9..0756aaddb390 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -1296,7 +1296,8 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1296 | 1296 | ||
1297 | /* called with IRQs off */ | 1297 | /* called with IRQs off */ |
1298 | static int __decrease_priority_inheritance(struct task_struct* t, | 1298 | static int __decrease_priority_inheritance(struct task_struct* t, |
1299 | struct task_struct* prio_inh) | 1299 | struct task_struct* prio_inh, |
1300 | int budget_triggered) | ||
1300 | { | 1301 | { |
1301 | int success = 1; | 1302 | int success = 1; |
1302 | 1303 | ||
@@ -1309,7 +1310,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1309 | } | 1310 | } |
1310 | 1311 | ||
1311 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1312 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1312 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 1313 | if(budget_triggered || __edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
1313 | #endif | 1314 | #endif |
1314 | /* A job only stops inheriting a priority when it releases a | 1315 | /* A job only stops inheriting a priority when it releases a |
1315 | * resource. Thus we can make the following assumption.*/ | 1316 | * resource. Thus we can make the following assumption.*/ |
@@ -1379,13 +1380,14 @@ out: | |||
1379 | } | 1380 | } |
1380 | 1381 | ||
1381 | static void decrease_priority_inheritance(struct task_struct* t, | 1382 | static void decrease_priority_inheritance(struct task_struct* t, |
1382 | struct task_struct* prio_inh) | 1383 | struct task_struct* prio_inh, |
1384 | int budget_triggered) | ||
1383 | { | 1385 | { |
1384 | int success; | 1386 | int success; |
1385 | 1387 | ||
1386 | raw_spin_lock(&gsnedf_lock); | 1388 | raw_spin_lock(&gsnedf_lock); |
1387 | 1389 | ||
1388 | success = __decrease_priority_inheritance(t, prio_inh); | 1390 | success = __decrease_priority_inheritance(t, prio_inh, budget_triggered); |
1389 | 1391 | ||
1390 | raw_spin_unlock(&gsnedf_lock); | 1392 | raw_spin_unlock(&gsnedf_lock); |
1391 | 1393 | ||
@@ -1454,10 +1456,11 @@ static void nested_increase_priority_inheritance(struct task_struct* t, | |||
1454 | static void nested_decrease_priority_inheritance(struct task_struct* t, | 1456 | static void nested_decrease_priority_inheritance(struct task_struct* t, |
1455 | struct task_struct* prio_inh, | 1457 | struct task_struct* prio_inh, |
1456 | raw_spinlock_t *to_unlock, | 1458 | raw_spinlock_t *to_unlock, |
1457 | unsigned long irqflags) | 1459 | unsigned long irqflags, |
1460 | int budget_triggered) | ||
1458 | { | 1461 | { |
1459 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | 1462 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
1460 | decrease_priority_inheritance(t, prio_inh); | 1463 | decrease_priority_inheritance(t, prio_inh, budget_triggered); |
1461 | 1464 | ||
1462 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 1465 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
1463 | 1466 | ||
@@ -1469,7 +1472,8 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, | |||
1469 | // beware: recursion | 1472 | // beware: recursion |
1470 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, | 1473 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, |
1471 | to_unlock, | 1474 | to_unlock, |
1472 | irqflags); | 1475 | irqflags, |
1476 | budget_triggered); | ||
1473 | } | 1477 | } |
1474 | else { | 1478 | else { |
1475 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | 1479 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", |
@@ -1699,7 +1703,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
1699 | 1703 | ||
1700 | /* we lose the benefit of priority inheritance (if any) */ | 1704 | /* we lose the benefit of priority inheritance (if any) */ |
1701 | if (tsk_rt(t)->inh_task) | 1705 | if (tsk_rt(t)->inh_task) |
1702 | decrease_priority_inheritance(t, NULL); | 1706 | decrease_priority_inheritance(t, NULL, 0); |
1703 | 1707 | ||
1704 | out: | 1708 | out: |
1705 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 1709 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index e870fc14d4d3..74bf6b1d2ce4 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -92,6 +92,10 @@ static void litmus_dummy_tick(struct task_struct* tsk) | |||
92 | { | 92 | { |
93 | } | 93 | } |
94 | 94 | ||
95 | static void litmus_dummy_check_schedule(struct task_struct* tsk) | ||
96 | { | ||
97 | } | ||
98 | |||
95 | static long litmus_dummy_admit_task(struct task_struct* tsk) | 99 | static long litmus_dummy_admit_task(struct task_struct* tsk) |
96 | { | 100 | { |
97 | printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", | 101 | printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", |
@@ -150,7 +154,7 @@ static void litmus_dummy_increase_prio(struct task_struct* t, struct task_struct | |||
150 | { | 154 | { |
151 | } | 155 | } |
152 | 156 | ||
153 | static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh) | 157 | static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh, int budget_triggered) |
154 | { | 158 | { |
155 | } | 159 | } |
156 | 160 | ||
@@ -160,7 +164,7 @@ static int litmus_dummy___increase_prio(struct task_struct* t, struct task_struc | |||
160 | return 0; | 164 | return 0; |
161 | } | 165 | } |
162 | 166 | ||
163 | static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struct* prio_inh) | 167 | static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struct* prio_inh, int budget_triggered) |
164 | { | 168 | { |
165 | TRACE_CUR("WARNING: Dummy litmus_dummy___decrease_prio called!\n"); | 169 | TRACE_CUR("WARNING: Dummy litmus_dummy___decrease_prio called!\n"); |
166 | return 0; | 170 | return 0; |
@@ -194,7 +198,8 @@ static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task | |||
194 | } | 198 | } |
195 | 199 | ||
196 | static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task_struct* prio_inh, | 200 | static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task_struct* prio_inh, |
197 | raw_spinlock_t *to_unlock, unsigned long irqflags) | 201 | raw_spinlock_t *to_unlock, unsigned long irqflags, |
202 | int budget_triggered) | ||
198 | { | 203 | { |
199 | } | 204 | } |
200 | 205 | ||
@@ -242,6 +247,7 @@ struct sched_plugin linux_sched_plugin = { | |||
242 | .complete_job = litmus_dummy_complete_job, | 247 | .complete_job = litmus_dummy_complete_job, |
243 | .schedule = litmus_dummy_schedule, | 248 | .schedule = litmus_dummy_schedule, |
244 | .finish_switch = litmus_dummy_finish_switch, | 249 | .finish_switch = litmus_dummy_finish_switch, |
250 | .check_schedule = litmus_dummy_check_schedule, | ||
245 | .activate_plugin = litmus_dummy_activate_plugin, | 251 | .activate_plugin = litmus_dummy_activate_plugin, |
246 | .deactivate_plugin = litmus_dummy_deactivate_plugin, | 252 | .deactivate_plugin = litmus_dummy_deactivate_plugin, |
247 | .compare = litmus_dummy_compare, | 253 | .compare = litmus_dummy_compare, |
@@ -299,6 +305,7 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
299 | CHECK(finish_switch); | 305 | CHECK(finish_switch); |
300 | CHECK(schedule); | 306 | CHECK(schedule); |
301 | CHECK(tick); | 307 | CHECK(tick); |
308 | CHECK(check_schedule); | ||
302 | CHECK(task_wake_up); | 309 | CHECK(task_wake_up); |
303 | CHECK(task_exit); | 310 | CHECK(task_exit); |
304 | CHECK(task_block); | 311 | CHECK(task_block); |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index e243b8007826..af318058f379 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -193,7 +193,7 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
193 | struct st_event_record* rec = get_record(ST_COMPLETION, t); | 193 | struct st_event_record* rec = get_record(ST_COMPLETION, t); |
194 | if (rec) { | 194 | if (rec) { |
195 | rec->data.completion.when = now(); | 195 | rec->data.completion.when = now(); |
196 | rec->data.completion.backlog_remaining = tsk_rt(t)->job_params.job_backlog; | 196 | rec->data.completion.backlog_remaining = tsk_rt(t)->job_params.backlog; |
197 | rec->data.completion.was_backlog_job = tsk_rt(t)->job_params.is_backlogged_job; | 197 | rec->data.completion.was_backlog_job = tsk_rt(t)->job_params.is_backlogged_job; |
198 | rec->data.completion.forced = forced; | 198 | rec->data.completion.forced = forced; |
199 | put_record(rec); | 199 | put_record(rec); |