diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-02-20 09:59:45 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-02-20 09:59:45 -0500 |
commit | d6ddecb0d2cee3880a2785c2b4345336855dc6e5 (patch) | |
tree | ece73990c2adac7a3cdddbe3ec6e9d0a0e3a425c | |
parent | 8428e06c674ef22f3455709318cc2aeb63590c6e (diff) |
Minor fixes and cleanup.
-rw-r--r-- | include/litmus/locking.h | 6 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 16 | ||||
-rw-r--r-- | litmus/edf_common.c | 10 | ||||
-rw-r--r-- | litmus/fifo_lock.c | 7 | ||||
-rw-r--r-- | litmus/gpu_affinity.c | 79 | ||||
-rw-r--r-- | litmus/jobs.c | 2 | ||||
-rw-r--r-- | litmus/litmus.c | 11 | ||||
-rw-r--r-- | litmus/locking.c | 79 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 18 |
9 files changed, 97 insertions, 131 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index b9c6a2b1d01e..02cc9cf4bb55 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -153,11 +153,11 @@ struct litmus_lock_ops { | |||
153 | 153 | ||
154 | /* all flags at the end */ | 154 | /* all flags at the end */ |
155 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 155 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
156 | int supports_nesting:1; | 156 | unsigned int supports_nesting:1; |
157 | #endif | 157 | #endif |
158 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 158 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
159 | int supports_dgl:1; | 159 | unsigned int supports_dgl:1; |
160 | int requires_atomic_dgl:1; | 160 | unsigned int requires_atomic_dgl:1; |
161 | #endif | 161 | #endif |
162 | }; | 162 | }; |
163 | 163 | ||
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index 5aa9f7634fbf..a9fa9df5ef8b 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -74,7 +74,7 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s | |||
74 | 74 | ||
75 | struct list_head *pos; | 75 | struct list_head *pos; |
76 | 76 | ||
77 | TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | 77 | // TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); |
78 | 78 | ||
79 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { | 79 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { |
80 | struct task_struct *aux = | 80 | struct task_struct *aux = |
@@ -90,7 +90,7 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s | |||
90 | } | 90 | } |
91 | else { | 91 | else { |
92 | // aux tasks don't touch rt locks, so no nested call needed. | 92 | // aux tasks don't touch rt locks, so no nested call needed. |
93 | TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); | 93 | // TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); |
94 | retval = litmus->__increase_prio(aux, hp); | 94 | retval = litmus->__increase_prio(aux, hp); |
95 | } | 95 | } |
96 | } | 96 | } |
@@ -104,7 +104,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
104 | 104 | ||
105 | struct list_head *pos; | 105 | struct list_head *pos; |
106 | 106 | ||
107 | TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | 107 | // TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); |
108 | 108 | ||
109 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { | 109 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { |
110 | struct task_struct *aux = | 110 | struct task_struct *aux = |
@@ -115,7 +115,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
115 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); | 115 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); |
116 | } | 116 | } |
117 | else { | 117 | else { |
118 | TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); | 118 | // TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); |
119 | retval = litmus->__decrease_prio(aux, hp); | 119 | retval = litmus->__decrease_prio(aux, hp); |
120 | } | 120 | } |
121 | } | 121 | } |
@@ -147,7 +147,7 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
147 | goto out; | 147 | goto out; |
148 | } | 148 | } |
149 | 149 | ||
150 | TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | 150 | // TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); |
151 | 151 | ||
152 | hp = container_of( | 152 | hp = container_of( |
153 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 153 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
@@ -171,7 +171,7 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
171 | /* check if the eff. prio. of hp has changed */ | 171 | /* check if the eff. prio. of hp has changed */ |
172 | if (increase_aux || (effective_priority(hp) != hp_eff)) { | 172 | if (increase_aux || (effective_priority(hp) != hp_eff)) { |
173 | hp_eff = effective_priority(hp); | 173 | hp_eff = effective_priority(hp); |
174 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 174 | // TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
175 | retval = aux_tasks_increase_priority(leader, hp_eff); | 175 | retval = aux_tasks_increase_priority(leader, hp_eff); |
176 | } | 176 | } |
177 | out: | 177 | out: |
@@ -201,7 +201,7 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
201 | goto out; | 201 | goto out; |
202 | } | 202 | } |
203 | 203 | ||
204 | TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | 204 | // TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); |
205 | 205 | ||
206 | hp = container_of( | 206 | hp = container_of( |
207 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 207 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
@@ -219,7 +219,7 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
219 | /* if the new_hp is still t, or if the effective priority has changed */ | 219 | /* if the new_hp is still t, or if the effective priority has changed */ |
220 | if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { | 220 | if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { |
221 | hp_eff = effective_priority(new_hp); | 221 | hp_eff = effective_priority(new_hp); |
222 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 222 | // TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
223 | retval = aux_tasks_decrease_priority(leader, hp_eff); | 223 | retval = aux_tasks_decrease_priority(leader, hp_eff); |
224 | } | 224 | } |
225 | } | 225 | } |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 32ee5f464ef8..52ccac998142 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -108,12 +108,11 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
108 | // one of these is an aux task without inheritance. | 108 | // one of these is an aux task without inheritance. |
109 | if (first_lo_aux != second_lo_aux) { | 109 | if (first_lo_aux != second_lo_aux) { |
110 | int temp = (first_lo_aux < second_lo_aux); // non-lo-aux has higher priority. | 110 | int temp = (first_lo_aux < second_lo_aux); // non-lo-aux has higher priority. |
111 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); | ||
112 | return temp; | 111 | return temp; |
113 | } | 112 | } |
114 | else { | 113 | else { |
115 | /* both MUST be lo_aux. tie-break. */ | 114 | /* both MUST be lo_aux. tie-break. */ |
116 | TRACE_CUR("aux tie break!\n"); | 115 | //TRACE_CUR("aux tie break!\n"); |
117 | goto aux_tie_break; | 116 | goto aux_tie_break; |
118 | } | 117 | } |
119 | } | 118 | } |
@@ -123,7 +122,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
123 | // inh_task is !NULL for both tasks since neither was a lo_aux task. | 122 | // inh_task is !NULL for both tasks since neither was a lo_aux task. |
124 | // Both aux tasks inherit from the same task, so tie-break | 123 | // Both aux tasks inherit from the same task, so tie-break |
125 | // by base priority of the aux tasks. | 124 | // by base priority of the aux tasks. |
126 | TRACE_CUR("aux tie break!\n"); | 125 | //TRACE_CUR("aux tie break!\n"); |
127 | goto aux_tie_break; | 126 | goto aux_tie_break; |
128 | } | 127 | } |
129 | } | 128 | } |
@@ -139,12 +138,11 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
139 | // one of these is an klmirqd thread without inheritance. | 138 | // one of these is an klmirqd thread without inheritance. |
140 | if (first_lo_klmirqd != second_lo_klmirqd) { | 139 | if (first_lo_klmirqd != second_lo_klmirqd) { |
141 | int temp = (first_lo_klmirqd < second_lo_klmirqd); // non-klmirqd has higher priority | 140 | int temp = (first_lo_klmirqd < second_lo_klmirqd); // non-klmirqd has higher priority |
142 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); | ||
143 | return temp; | 141 | return temp; |
144 | } | 142 | } |
145 | else { | 143 | else { |
146 | /* both MUST be klmirqd. tie-break. */ | 144 | /* both MUST be klmirqd. tie-break. */ |
147 | TRACE_CUR("klmirqd tie break!\n"); | 145 | //TRACE_CUR("klmirqd tie break!\n"); |
148 | goto klmirqd_tie_break; | 146 | goto klmirqd_tie_break; |
149 | } | 147 | } |
150 | } | 148 | } |
@@ -154,7 +152,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
154 | // inh_task is !NULL for both tasks since neither was a lo_klmirqd task. | 152 | // inh_task is !NULL for both tasks since neither was a lo_klmirqd task. |
155 | // Both klmirqd tasks inherit from the same task, so tie-break | 153 | // Both klmirqd tasks inherit from the same task, so tie-break |
156 | // by base priority of the klmirqd tasks. | 154 | // by base priority of the klmirqd tasks. |
157 | TRACE_CUR("klmirqd tie break!\n"); | 155 | //TRACE_CUR("klmirqd tie break!\n"); |
158 | goto klmirqd_tie_break; | 156 | goto klmirqd_tie_break; |
159 | } | 157 | } |
160 | } | 158 | } |
diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c index dfe56bface6d..b3e956f5a93a 100644 --- a/litmus/fifo_lock.c +++ b/litmus/fifo_lock.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/uaccess.h> | 2 | #include <linux/uaccess.h> |
3 | 3 | ||
4 | #include <litmus/trace.h> | 4 | #include <litmus/trace.h> |
5 | #include <litmus/sched_trace.h> | ||
5 | #include <litmus/sched_plugin.h> | 6 | #include <litmus/sched_plugin.h> |
6 | #include <litmus/fifo_lock.h> | 7 | #include <litmus/fifo_lock.h> |
7 | 8 | ||
@@ -416,6 +417,10 @@ int fifo_mutex_unlock(struct litmus_lock* l) | |||
416 | 417 | ||
417 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 418 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
418 | if(dgl_wait) { | 419 | if(dgl_wait) { |
420 | // we normally do this tracing in locking.c, but that code | ||
421 | // doesn't have visibility into this hand-off. | ||
422 | sched_trace_lock(dgl_wait->task, l->ident, 1); | ||
423 | |||
419 | select_next_lock_if_primary(l, dgl_wait); | 424 | select_next_lock_if_primary(l, dgl_wait); |
420 | --(dgl_wait->nr_remaining); | 425 | --(dgl_wait->nr_remaining); |
421 | wake_up_task = (dgl_wait->nr_remaining == 0); | 426 | wake_up_task = (dgl_wait->nr_remaining == 0); |
@@ -504,8 +509,6 @@ out: | |||
504 | #endif | 509 | #endif |
505 | unlock_global_irqrestore(dgl_lock, flags); | 510 | unlock_global_irqrestore(dgl_lock, flags); |
506 | 511 | ||
507 | TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); | ||
508 | |||
509 | return err; | 512 | return err; |
510 | } | 513 | } |
511 | 514 | ||
diff --git a/litmus/gpu_affinity.c b/litmus/gpu_affinity.c index 9e421ce9efc2..f4bfb1a67097 100644 --- a/litmus/gpu_affinity.c +++ b/litmus/gpu_affinity.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #define MIN(a, b) ((a < b) ? a : b) | 20 | #define MIN(a, b) ((a < b) ? a : b) |
21 | 21 | ||
22 | #if 0 | 22 | #if 0 |
23 | /* PID feedback controller */ | ||
23 | static fp_t update_estimate(feedback_est_t* fb, fp_t a, fp_t b, lt_t observed) | 24 | static fp_t update_estimate(feedback_est_t* fb, fp_t a, fp_t b, lt_t observed) |
24 | { | 25 | { |
25 | fp_t relative_err; | 26 | fp_t relative_err; |
@@ -78,14 +79,22 @@ lt_t isqrt(lt_t n) | |||
78 | 79 | ||
79 | void update_gpu_estimate(struct task_struct *t, lt_t observed) | 80 | void update_gpu_estimate(struct task_struct *t, lt_t observed) |
80 | { | 81 | { |
81 | //feedback_est_t *fb = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); | ||
82 | avg_est_t *est; | 82 | avg_est_t *est; |
83 | struct migration_info mig_info; | 83 | |
84 | 84 | ||
85 | BUG_ON(tsk_rt(t)->gpu_migration > MIG_LAST); | 85 | BUG_ON(tsk_rt(t)->gpu_migration > MIG_LAST); |
86 | 86 | ||
87 | est = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); | 87 | est = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); |
88 | 88 | ||
89 | { | ||
90 | /* log the migration event */ | ||
91 | struct migration_info mig_info; | ||
92 | mig_info.observed = observed; | ||
93 | mig_info.estimated = est->avg; | ||
94 | mig_info.distance = tsk_rt(t)->gpu_migration; | ||
95 | sched_trace_migration(t, &mig_info); | ||
96 | } | ||
97 | |||
89 | if (unlikely(observed > OBSERVATION_CAP)) { | 98 | if (unlikely(observed > OBSERVATION_CAP)) { |
90 | TRACE_TASK(t, "Crazy observation greater than was dropped: %llu > %llu\n", | 99 | TRACE_TASK(t, "Crazy observation greater than was dropped: %llu > %llu\n", |
91 | observed, | 100 | observed, |
@@ -93,22 +102,6 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) | |||
93 | return; | 102 | return; |
94 | } | 103 | } |
95 | 104 | ||
96 | #if 0 | ||
97 | // filter out values that are HI_THRESHOLDx or (1/LO_THRESHOLD)x out | ||
98 | // of range of the average, but only filter if enough samples | ||
99 | // have been taken. | ||
100 | if (likely((est->count > MIN(10, AVG_EST_WINDOW_SIZE/2)))) { | ||
101 | if (unlikely(observed < est->avg/LO_THRESHOLD)) { | ||
102 | TRACE_TASK(t, "Observation is too small: %llu\n", | ||
103 | observed); | ||
104 | return; | ||
105 | } | ||
106 | else if (unlikely(observed > est->avg*HI_THRESHOLD)) { | ||
107 | TRACE_TASK(t, "Observation is too large: %llu\n", | ||
108 | observed); | ||
109 | return; | ||
110 | } | ||
111 | #endif | ||
112 | // filter values outside NUM_STDEVx the standard deviation, | 105 | // filter values outside NUM_STDEVx the standard deviation, |
113 | // but only filter if enough samples have been taken. | 106 | // but only filter if enough samples have been taken. |
114 | if (likely((est->count > MIN(10, AVG_EST_WINDOW_SIZE/2)))) { | 107 | if (likely((est->count > MIN(10, AVG_EST_WINDOW_SIZE/2)))) { |
@@ -129,8 +122,6 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) | |||
129 | } | 122 | } |
130 | } | 123 | } |
131 | 124 | ||
132 | |||
133 | |||
134 | if (unlikely(est->count < AVG_EST_WINDOW_SIZE)) { | 125 | if (unlikely(est->count < AVG_EST_WINDOW_SIZE)) { |
135 | ++est->count; | 126 | ++est->count; |
136 | } | 127 | } |
@@ -138,60 +129,12 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) | |||
138 | est->sum -= est->history[est->idx]; | 129 | est->sum -= est->history[est->idx]; |
139 | } | 130 | } |
140 | 131 | ||
141 | mig_info.observed = observed; | ||
142 | mig_info.estimated = est->avg; | ||
143 | mig_info.distance = tsk_rt(t)->gpu_migration; | ||
144 | sched_trace_migration(t, &mig_info); | ||
145 | |||
146 | est->history[est->idx] = observed; | 132 | est->history[est->idx] = observed; |
147 | est->sum += observed; | 133 | est->sum += observed; |
148 | est->avg = est->sum/est->count; | 134 | est->avg = est->sum/est->count; |
149 | est->std = isqrt(varience(est->history, est->avg, est->count)); | 135 | est->std = isqrt(varience(est->history, est->avg, est->count)); |
150 | est->idx = (est->idx + 1) % AVG_EST_WINDOW_SIZE; | 136 | est->idx = (est->idx + 1) % AVG_EST_WINDOW_SIZE; |
151 | 137 | ||
152 | |||
153 | #if 0 | ||
154 | if(unlikely(fb->est.val == 0)) { | ||
155 | // kludge-- cap observed values to prevent whacky estimations. | ||
156 | // whacky stuff happens during the first few jobs. | ||
157 | if(unlikely(observed > OBSERVATION_CAP)) { | ||
158 | TRACE_TASK(t, "Crazy observation was capped: %llu -> %llu\n", | ||
159 | observed, OBSERVATION_CAP); | ||
160 | observed = OBSERVATION_CAP; | ||
161 | } | ||
162 | |||
163 | // take the first observation as our estimate | ||
164 | // (initial value of 0 was bogus anyhow) | ||
165 | fb->est = _integer_to_fp(observed); | ||
166 | fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. | ||
167 | } | ||
168 | else { | ||
169 | fp_t rel_err = update_estimate(fb, | ||
170 | tsk_rt(t)->gpu_fb_param_a[tsk_rt(t)->gpu_migration], | ||
171 | tsk_rt(t)->gpu_fb_param_b[tsk_rt(t)->gpu_migration], | ||
172 | observed); | ||
173 | |||
174 | if(unlikely(_fp_to_integer(fb->est) <= 0)) { | ||
175 | TRACE_TASK(t, "Invalid estimate. Patching.\n"); | ||
176 | fb->est = _integer_to_fp(observed); | ||
177 | fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. | ||
178 | } | ||
179 | else { | ||
180 | struct migration_info mig_info; | ||
181 | |||
182 | sched_trace_prediction_err(t, | ||
183 | &(tsk_rt(t)->gpu_migration), | ||
184 | &rel_err); | ||
185 | |||
186 | mig_info.observed = observed; | ||
187 | mig_info.estimated = get_gpu_estimate(t, tsk_rt(t)->gpu_migration); | ||
188 | mig_info.distance = tsk_rt(t)->gpu_migration; | ||
189 | |||
190 | sched_trace_migration(t, &mig_info); | ||
191 | } | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | TRACE_TASK(t, "GPU est update after (dist = %d, obs = %llu): %llu\n", | 138 | TRACE_TASK(t, "GPU est update after (dist = %d, obs = %llu): %llu\n", |
196 | tsk_rt(t)->gpu_migration, | 139 | tsk_rt(t)->gpu_migration, |
197 | observed, | 140 | observed, |
diff --git a/litmus/jobs.c b/litmus/jobs.c index bdfc41004d38..659625433867 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -28,6 +28,8 @@ static inline void setup_release(struct task_struct *t, lt_t release) | |||
28 | 28 | ||
29 | /* don't confuse Linux */ | 29 | /* don't confuse Linux */ |
30 | t->rt.time_slice = 1; | 30 | t->rt.time_slice = 1; |
31 | |||
32 | TRACE_TASK(t, "preparing for next job: %d\n", t->rt_param.job_params.job_no); | ||
31 | } | 33 | } |
32 | 34 | ||
33 | void prepare_for_next_period(struct task_struct *t) | 35 | void prepare_for_next_period(struct task_struct *t) |
diff --git a/litmus/litmus.c b/litmus/litmus.c index a69a3d0e9128..3e15ea432293 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -316,8 +316,8 @@ asmlinkage long sys_sched_trace_event(int event, struct st_inject_args __user *_ | |||
316 | 316 | ||
317 | struct st_inject_args args; | 317 | struct st_inject_args args; |
318 | 318 | ||
319 | if (is_realtime(t)) { | 319 | if ((event != ST_INJECT_ACTION) && is_realtime(t)) { |
320 | printk(KERN_WARNING "Only non-real-time tasks may inject sched_trace events.\n"); | 320 | printk(KERN_WARNING "Only non-real-time tasks may inject sched_trace events (except for ST_INJECT_ACTION).\n"); |
321 | retval = -EINVAL; | 321 | retval = -EINVAL; |
322 | goto out; | 322 | goto out; |
323 | } | 323 | } |
@@ -368,6 +368,13 @@ asmlinkage long sys_sched_trace_event(int event, struct st_inject_args __user *_ | |||
368 | 368 | ||
369 | sched_trace_task_release(t); | 369 | sched_trace_task_release(t); |
370 | break; | 370 | break; |
371 | case ST_INJECT_ACTION: | ||
372 | if (!__args) { | ||
373 | retval = -EINVAL; | ||
374 | goto out; | ||
375 | } | ||
376 | sched_trace_action(t, args.action); | ||
377 | break; | ||
371 | 378 | ||
372 | /**********************/ | 379 | /**********************/ |
373 | /* unsupported events */ | 380 | /* unsupported events */ |
diff --git a/litmus/locking.c b/litmus/locking.c index 8ba46f85f5c6..73ebde3e8957 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <litmus/trace.h> | 10 | #include <litmus/trace.h> |
11 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
12 | #include <litmus/wait.h> | 12 | #include <litmus/wait.h> |
13 | #include <litmus/sched_trace.h> | ||
13 | 14 | ||
14 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 15 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
15 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
@@ -124,6 +125,8 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
124 | TRACE_CUR("Attempts to lock %d\n", l->ident); | 125 | TRACE_CUR("Attempts to lock %d\n", l->ident); |
125 | err = l->ops->lock(l); | 126 | err = l->ops->lock(l); |
126 | if (!err) { | 127 | if (!err) { |
128 | sched_trace_lock(current, l->ident, 1); | ||
129 | |||
127 | TRACE_CUR("Got lock %d\n", l->ident); | 130 | TRACE_CUR("Got lock %d\n", l->ident); |
128 | } | 131 | } |
129 | } | 132 | } |
@@ -156,6 +159,8 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
156 | TRACE_CUR("Attempts to unlock %d\n", l->ident); | 159 | TRACE_CUR("Attempts to unlock %d\n", l->ident); |
157 | err = l->ops->unlock(l); | 160 | err = l->ops->unlock(l); |
158 | if (!err) { | 161 | if (!err) { |
162 | sched_trace_lock(current, l->ident, 0); | ||
163 | |||
159 | TRACE_CUR("Unlocked %d\n", l->ident); | 164 | TRACE_CUR("Unlocked %d\n", l->ident); |
160 | } | 165 | } |
161 | } | 166 | } |
@@ -376,6 +381,9 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t | |||
376 | for(i = 0; i < dgl_wait->size; ++i) { | 381 | for(i = 0; i < dgl_wait->size; ++i) { |
377 | struct litmus_lock *l = dgl_wait->locks[i]; | 382 | struct litmus_lock *l = dgl_wait->locks[i]; |
378 | l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); | 383 | l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); |
384 | |||
385 | sched_trace_lock(dgl_wait->task, l->ident, 1); | ||
386 | |||
379 | BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); | 387 | BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); |
380 | } | 388 | } |
381 | 389 | ||
@@ -390,9 +398,11 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
390 | raw_spinlock_t *dgl_lock; | 398 | raw_spinlock_t *dgl_lock; |
391 | 399 | ||
392 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 400 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
393 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; | 401 | { |
394 | snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size); | 402 | char dglstr[MAX_DGL_SIZE*5]; |
395 | TRACE_CUR("Locking DGL with size %d: %s\n", dgl_wait->size, dglstr); | 403 | snprintf_dgl(dglstr, sizeof(dgl_wait->size*5), dgl_wait->locks, dgl_wait->size); |
404 | TRACE_CUR("Locking DGL with size %d: %s\n", dgl_wait->size, dglstr); | ||
405 | } | ||
396 | #endif | 406 | #endif |
397 | 407 | ||
398 | BUG_ON(dgl_wait->task != current); | 408 | BUG_ON(dgl_wait->task != current); |
@@ -409,6 +419,7 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
409 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. | 419 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. |
410 | 420 | ||
411 | if(tmp->ops->dgl_lock(tmp, dgl_wait, &dgl_wait->wq_nodes[i])) { | 421 | if(tmp->ops->dgl_lock(tmp, dgl_wait, &dgl_wait->wq_nodes[i])) { |
422 | sched_trace_lock(dgl_wait->task, tmp->ident, 1); | ||
412 | --(dgl_wait->nr_remaining); | 423 | --(dgl_wait->nr_remaining); |
413 | TRACE_CUR("Acquired lock %d immediatly.\n", tmp->ident); | 424 | TRACE_CUR("Acquired lock %d immediatly.\n", tmp->ident); |
414 | } | 425 | } |
@@ -446,11 +457,11 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
446 | TRACE_CUR("Woken up from DGL suspension.\n"); | 457 | TRACE_CUR("Woken up from DGL suspension.\n"); |
447 | } | 458 | } |
448 | 459 | ||
449 | // FOR SANITY CHECK FOR TESTING | 460 | // // FOR SANITY CHECK FOR TESTING |
450 | for(i = 0; i < dgl_wait->size; ++i) { | 461 | // for(i = 0; i < dgl_wait->size; ++i) { |
451 | struct litmus_lock *tmp = dgl_wait->locks[i]; | 462 | // struct litmus_lock *tmp = dgl_wait->locks[i]; |
452 | BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); | 463 | // BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); |
453 | } | 464 | // } |
454 | 465 | ||
455 | TRACE_CUR("Acquired entire DGL\n"); | 466 | TRACE_CUR("Acquired entire DGL\n"); |
456 | 467 | ||
@@ -467,9 +478,11 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) | |||
467 | struct task_struct *t = current; | 478 | struct task_struct *t = current; |
468 | 479 | ||
469 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 480 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
470 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; | 481 | { |
471 | snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size); | 482 | char dglstr[MAX_DGL_SIZE*5]; |
472 | TRACE_CUR("Atomic locking DGL with size %d: %s\n", dgl_wait->size, dglstr); | 483 | snprintf_dgl(dglstr, sizeof(dgl_wait->size*5), dgl_wait->locks, dgl_wait->size); |
484 | TRACE_CUR("Atomic locking DGL with size %d: %s\n", dgl_wait->size, dglstr); | ||
485 | } | ||
473 | #endif | 486 | #endif |
474 | 487 | ||
475 | dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); | 488 | dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); |
@@ -523,11 +536,11 @@ all_acquired: | |||
523 | 536 | ||
524 | dgl_wait->nr_remaining = 0; | 537 | dgl_wait->nr_remaining = 0; |
525 | 538 | ||
526 | // SANITY CHECK FOR TESTING | 539 | // // SANITY CHECK FOR TESTING |
527 | for(i = 0; i < dgl_wait->size; ++i) { | 540 | // for(i = 0; i < dgl_wait->size; ++i) { |
528 | struct litmus_lock *tmp = dgl_wait->locks[i]; | 541 | // struct litmus_lock *tmp = dgl_wait->locks[i]; |
529 | BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); | 542 | // BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); |
530 | } | 543 | // } |
531 | 544 | ||
532 | TRACE_CUR("Acquired entire DGL\n"); | 545 | TRACE_CUR("Acquired entire DGL\n"); |
533 | 546 | ||
@@ -540,19 +553,14 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
540 | struct task_struct *t = current; | 553 | struct task_struct *t = current; |
541 | long err = -EINVAL; | 554 | long err = -EINVAL; |
542 | int dgl_ods[MAX_DGL_SIZE]; | 555 | int dgl_ods[MAX_DGL_SIZE]; |
543 | int i; | ||
544 | |||
545 | int num_need_atomic = 0; | ||
546 | |||
547 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. | ||
548 | 556 | ||
549 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | 557 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) |
550 | goto out; | 558 | goto out; |
551 | 559 | ||
552 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | 560 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) |
553 | goto out; | 561 | goto out; |
554 | 562 | ||
555 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | 563 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) |
556 | goto out; | 564 | goto out; |
557 | 565 | ||
558 | if (!is_realtime(t)) { | 566 | if (!is_realtime(t)) { |
@@ -566,6 +574,10 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
566 | err = sys_litmus_lock(dgl_ods[0]); | 574 | err = sys_litmus_lock(dgl_ods[0]); |
567 | } | 575 | } |
568 | else { | 576 | else { |
577 | int i; | ||
578 | int num_need_atomic = 0; | ||
579 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. | ||
580 | |||
569 | init_dgl_wait_state(&dgl_wait_state); | 581 | init_dgl_wait_state(&dgl_wait_state); |
570 | 582 | ||
571 | for(i = 0; i < dgl_size; ++i) { | 583 | for(i = 0; i < dgl_size; ++i) { |
@@ -618,11 +630,9 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | |||
618 | 630 | ||
619 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 631 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
620 | { | 632 | { |
621 | char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; | 633 | char dglstr[MAX_DGL_SIZE*5]; |
622 | snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); | 634 | snprintf_dgl(dglstr, sizeof(dgl_wait->size*5), dgl_locks, dgl_size); |
623 | TRACE_CUR("Unlocking a DGL with size %d: %s\n", | 635 | TRACE_CUR("Unlocking a DGL with size %d: %s\n", dgl_size, dglstr); |
624 | dgl_size, | ||
625 | dglstr); | ||
626 | } | 636 | } |
627 | #endif | 637 | #endif |
628 | 638 | ||
@@ -634,6 +644,7 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | |||
634 | TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); | 644 | TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); |
635 | 645 | ||
636 | tmp_err = l->ops->unlock(l); | 646 | tmp_err = l->ops->unlock(l); |
647 | sched_trace_lock(current, l->ident, 0); | ||
637 | 648 | ||
638 | if(tmp_err) { | 649 | if(tmp_err) { |
639 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); | 650 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); |
@@ -650,18 +661,14 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
650 | { | 661 | { |
651 | long err = -EINVAL; | 662 | long err = -EINVAL; |
652 | int dgl_ods[MAX_DGL_SIZE]; | 663 | int dgl_ods[MAX_DGL_SIZE]; |
653 | struct od_table_entry* entry; | ||
654 | int i; | ||
655 | |||
656 | struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; | ||
657 | 664 | ||
658 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | 665 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) |
659 | goto out; | 666 | goto out; |
660 | 667 | ||
661 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | 668 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) |
662 | goto out; | 669 | goto out; |
663 | 670 | ||
664 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | 671 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) |
665 | goto out; | 672 | goto out; |
666 | 673 | ||
667 | 674 | ||
@@ -671,8 +678,10 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
671 | err = sys_litmus_unlock(dgl_ods[0]); | 678 | err = sys_litmus_unlock(dgl_ods[0]); |
672 | } | 679 | } |
673 | else { | 680 | else { |
681 | struct litmus_lock *dgl_locks[MAX_DGL_SIZE]; | ||
682 | int i; | ||
674 | for(i = 0; i < dgl_size; ++i) { | 683 | for(i = 0; i < dgl_size; ++i) { |
675 | entry = get_entry_for_od(dgl_ods[i]); | 684 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); |
676 | if(entry && is_lock(entry)) { | 685 | if(entry && is_lock(entry)) { |
677 | dgl_locks[i] = get_lock(entry); | 686 | dgl_locks[i] = get_lock(entry); |
678 | if(!dgl_locks[i]->ops->supports_dgl) { | 687 | if(!dgl_locks[i]->ops->supports_dgl) { |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 69f30188f3ba..fc8f277a1958 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -1161,6 +1161,13 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1161 | int check_preempt = 0; | 1161 | int check_preempt = 0; |
1162 | cedf_domain_t* cluster; | 1162 | cedf_domain_t* cluster; |
1163 | 1163 | ||
1164 | if (prio_inh && prio_inh == effective_priority(t)) { | ||
1165 | /* relationship already established. */ | ||
1166 | TRACE_TASK(t, "already has effective priority of %s/%d\n", | ||
1167 | prio_inh->comm, prio_inh->pid); | ||
1168 | goto out; | ||
1169 | } | ||
1170 | |||
1164 | if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { | 1171 | if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { |
1165 | TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", | 1172 | TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", |
1166 | prio_inh->comm, prio_inh->pid, | 1173 | prio_inh->comm, prio_inh->pid, |
@@ -1182,13 +1189,6 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1182 | #endif | 1189 | #endif |
1183 | } | 1190 | } |
1184 | 1191 | ||
1185 | if (prio_inh && prio_inh == effective_priority(t)) { | ||
1186 | /* relationship already established. */ | ||
1187 | TRACE_TASK(t, "already has effective priority of %s/%d\n", | ||
1188 | prio_inh->comm, prio_inh->pid); | ||
1189 | goto out; | ||
1190 | } | ||
1191 | |||
1192 | cluster = task_cpu_cluster(t); | 1192 | cluster = task_cpu_cluster(t); |
1193 | 1193 | ||
1194 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1194 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
@@ -1196,6 +1196,8 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1196 | /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */ | 1196 | /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */ |
1197 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | 1197 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { |
1198 | #endif | 1198 | #endif |
1199 | sched_trace_eff_prio_change(t, prio_inh); | ||
1200 | |||
1199 | TRACE_TASK(t, "inherits priority from %s/%d\n", | 1201 | TRACE_TASK(t, "inherits priority from %s/%d\n", |
1200 | prio_inh->comm, prio_inh->pid); | 1202 | prio_inh->comm, prio_inh->pid); |
1201 | tsk_rt(t)->inh_task = prio_inh; | 1203 | tsk_rt(t)->inh_task = prio_inh; |
@@ -1348,6 +1350,8 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1348 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1350 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1349 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 1351 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
1350 | #endif | 1352 | #endif |
1353 | sched_trace_eff_prio_change(t, prio_inh); | ||
1354 | |||
1351 | /* A job only stops inheriting a priority when it releases a | 1355 | /* A job only stops inheriting a priority when it releases a |
1352 | * resource. Thus we can make the following assumption.*/ | 1356 | * resource. Thus we can make the following assumption.*/ |
1353 | if(prio_inh) | 1357 | if(prio_inh) |