diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-16 17:44:37 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-16 17:44:37 -0400 |
commit | 4e8f9b7c2e9134ca31feb91dee3609a95df6de56 (patch) | |
tree | 1e7a115068299b0fce7b8369731ed74bffa1a3c7 | |
parent | 4ad6ba08f0dab67bbd89a26b27f1cc86e3c45c13 (diff) |
Implement real-time aux threads. G-EDF only.
-rw-r--r-- | include/litmus/aux_tasks.h | 4 | ||||
-rw-r--r-- | include/litmus/litmus.h | 4 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 4 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 24 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 243 | ||||
-rw-r--r-- | litmus/edf_common.c | 83 | ||||
-rw-r--r-- | litmus/litmus.c | 44 | ||||
-rw-r--r-- | litmus/preempt.c | 25 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 169 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 4 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 22 |
12 files changed, 357 insertions, 271 deletions
diff --git a/include/litmus/aux_tasks.h b/include/litmus/aux_tasks.h index 8e50ac85b082..3bb6b26fef09 100644 --- a/include/litmus/aux_tasks.h +++ b/include/litmus/aux_tasks.h | |||
@@ -3,8 +3,6 @@ | |||
3 | 3 | ||
4 | struct task_struct; | 4 | struct task_struct; |
5 | 5 | ||
6 | #define MAGIC_AUX_TASK_PERIOD ~((lt_t)0) | ||
7 | |||
8 | /* admit an aux task with default parameters */ | 6 | /* admit an aux task with default parameters */ |
9 | //int admit_aux_task(struct task_struct *t); | 7 | //int admit_aux_task(struct task_struct *t); |
10 | 8 | ||
@@ -30,4 +28,4 @@ int aux_task_owner_increase_priority(struct task_struct *t); | |||
30 | /* call when an aux_owner decreases its priority */ | 28 | /* call when an aux_owner decreases its priority */ |
31 | int aux_task_owner_decrease_priority(struct task_struct *t); | 29 | int aux_task_owner_decrease_priority(struct task_struct *t); |
32 | 30 | ||
33 | #endif \ No newline at end of file | 31 | #endif |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index f9829167294d..db2987a24686 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -184,8 +184,10 @@ static inline int request_exit_np_atomic(struct task_struct *t) | |||
184 | * retry loop here since tasks might exploit that to | 184 | * retry loop here since tasks might exploit that to |
185 | * keep the kernel busy indefinitely. */ | 185 | * keep the kernel busy indefinitely. */ |
186 | } | 186 | } |
187 | } else | 187 | } |
188 | else { | ||
188 | return 0; | 189 | return 0; |
190 | } | ||
189 | } | 191 | } |
190 | 192 | ||
191 | #else | 193 | #else |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 2a6c70f1dd37..c45ba23d7650 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -284,11 +284,11 @@ struct rt_param { | |||
284 | struct litmus_lock* blocked_lock; | 284 | struct litmus_lock* blocked_lock; |
285 | #endif | 285 | #endif |
286 | 286 | ||
287 | 287 | ||
288 | #ifdef CONFIG_LITMUS_LOCKING | 288 | #ifdef CONFIG_LITMUS_LOCKING |
289 | unsigned int is_aux_task:1; | 289 | unsigned int is_aux_task:1; |
290 | unsigned int has_aux_tasks:1; | 290 | unsigned int has_aux_tasks:1; |
291 | 291 | ||
292 | struct list_head aux_task_node; | 292 | struct list_head aux_task_node; |
293 | struct binheap_node aux_task_owner_node; | 293 | struct binheap_node aux_task_owner_node; |
294 | #endif | 294 | #endif |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index bd75e7c09a10..65736b2a9199 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -150,7 +150,7 @@ struct sched_plugin { | |||
150 | allocate_lock_t allocate_lock; | 150 | allocate_lock_t allocate_lock; |
151 | increase_prio_t increase_prio; | 151 | increase_prio_t increase_prio; |
152 | decrease_prio_t decrease_prio; | 152 | decrease_prio_t decrease_prio; |
153 | 153 | ||
154 | __increase_prio_t __increase_prio; | 154 | __increase_prio_t __increase_prio; |
155 | __decrease_prio_t __decrease_prio; | 155 | __decrease_prio_t __decrease_prio; |
156 | #endif | 156 | #endif |
diff --git a/kernel/sched.c b/kernel/sched.c index 9e8d8698323b..0e4b3d40cd29 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2703,8 +2703,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
2703 | unsigned long flags; | 2703 | unsigned long flags; |
2704 | int cpu, success = 0; | 2704 | int cpu, success = 0; |
2705 | 2705 | ||
2706 | if (is_realtime(p)) | 2706 | if (is_realtime(p)) { |
2707 | //WARN_ON(1); | ||
2707 | TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); | 2708 | TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); |
2709 | } | ||
2708 | 2710 | ||
2709 | smp_wmb(); | 2711 | smp_wmb(); |
2710 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 2712 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
@@ -3169,6 +3171,12 @@ asmlinkage void schedule_tail(struct task_struct *prev) | |||
3169 | { | 3171 | { |
3170 | struct rq *rq = this_rq(); | 3172 | struct rq *rq = this_rq(); |
3171 | 3173 | ||
3174 | sched_trace_task_switch_to(current); | ||
3175 | |||
3176 | if (sched_state_validate_switch()) { | ||
3177 | WARN_ON(1); | ||
3178 | } | ||
3179 | |||
3172 | finish_task_switch(rq, prev); | 3180 | finish_task_switch(rq, prev); |
3173 | 3181 | ||
3174 | /* | 3182 | /* |
@@ -4416,8 +4424,16 @@ litmus_need_resched_nonpreemptible: | |||
4416 | 4424 | ||
4417 | post_schedule(rq); | 4425 | post_schedule(rq); |
4418 | 4426 | ||
4419 | if (sched_state_validate_switch()) | 4427 | if (sched_state_validate_switch()) { |
4428 | TRACE_CUR("cpu %d: have to redo scheduling decision!\n", cpu); | ||
4420 | goto litmus_need_resched_nonpreemptible; | 4429 | goto litmus_need_resched_nonpreemptible; |
4430 | } | ||
4431 | else if (current->policy == SCHED_LITMUS) { | ||
4432 | TRACE_CUR("cpu %d: valid switch to rt task %s/%d.\n", cpu, current->comm, current->pid); | ||
4433 | } | ||
4434 | else { | ||
4435 | // TRACE_CUR("cpu %d: switch: %s/%d\n", cpu, current->comm, current->pid); | ||
4436 | } | ||
4421 | 4437 | ||
4422 | preempt_enable_no_resched(); | 4438 | preempt_enable_no_resched(); |
4423 | 4439 | ||
@@ -4430,8 +4446,8 @@ litmus_need_resched_nonpreemptible: | |||
4430 | 4446 | ||
4431 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 4447 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
4432 | litmus->run_tasklets(prev); | 4448 | litmus->run_tasklets(prev); |
4433 | #endif | 4449 | #endif |
4434 | 4450 | ||
4435 | srp_ceiling_block(); | 4451 | srp_ceiling_block(); |
4436 | } | 4452 | } |
4437 | EXPORT_SYMBOL(schedule); | 4453 | EXPORT_SYMBOL(schedule); |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index c197a95fc3a1..5057137bbbea 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -10,22 +10,37 @@ static int admit_aux_task(struct task_struct *t) | |||
10 | { | 10 | { |
11 | int retval = 0; | 11 | int retval = 0; |
12 | struct task_struct *leader = t->group_leader; | 12 | struct task_struct *leader = t->group_leader; |
13 | 13 | ||
14 | |||
15 | /* budget enforcement increments job numbers. job numbers are used in | ||
16 | * tie-breaking of aux_tasks. method helps ensure: | ||
17 | * 1) aux threads with no inherited priority can starve another (they share | ||
18 | * the CPUs equally. | ||
19 | * 2) aux threads that inherit the same priority cannot starve each other. | ||
20 | * | ||
21 | * Assuming aux threads are well-behavied (they do very little work and | ||
22 | * suspend), risk of starvation should not be an issue, but this is a | ||
23 | * fail-safe. | ||
24 | */ | ||
14 | struct rt_task tp = { | 25 | struct rt_task tp = { |
15 | .exec_cost = 0, | 26 | //.period = MAGIC_AUX_TASK_PERIOD, |
16 | .period = MAGIC_AUX_TASK_PERIOD, | 27 | //.relative_deadline = MAGIC_AUX_TASK_PERIOD, |
17 | .relative_deadline = MAGIC_AUX_TASK_PERIOD, | 28 | .period = 1000000, /* has to wait 1 ms before it can run again once it has exhausted budget */ |
29 | .relative_deadline = 1000000, | ||
30 | .exec_cost = 1000000, /* allow full utilization */ | ||
18 | .phase = 0, | 31 | .phase = 0, |
19 | .cpu = task_cpu(leader), /* take CPU of group leader */ | 32 | .cpu = task_cpu(leader), /* take CPU of group leader */ |
20 | .budget_policy = NO_ENFORCEMENT, | 33 | //.budget_policy = NO_ENFORCEMENT, |
34 | .budget_policy = QUANTUM_ENFORCEMENT, | ||
35 | .budget_signal_policy = NO_SIGNALS, | ||
21 | .cls = RT_CLASS_BEST_EFFORT | 36 | .cls = RT_CLASS_BEST_EFFORT |
22 | }; | 37 | }; |
23 | 38 | ||
24 | struct sched_param param = { .sched_priority = 0}; | 39 | struct sched_param param = { .sched_priority = 0}; |
25 | 40 | ||
26 | tsk_rt(t)->task_params = tp; | 41 | tsk_rt(t)->task_params = tp; |
27 | retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, ¶m); | 42 | retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, ¶m); |
28 | 43 | ||
29 | return retval; | 44 | return retval; |
30 | } | 45 | } |
31 | 46 | ||
@@ -33,19 +48,19 @@ int exit_aux_task(struct task_struct *t) | |||
33 | { | 48 | { |
34 | int retval = 0; | 49 | int retval = 0; |
35 | struct task_struct *leader = t->group_leader; | 50 | struct task_struct *leader = t->group_leader; |
36 | 51 | ||
37 | BUG_ON(!tsk_rt(t)->is_aux_task); | 52 | BUG_ON(!tsk_rt(t)->is_aux_task); |
38 | 53 | ||
39 | TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 54 | TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
40 | 55 | ||
41 | list_del(&tsk_rt(t)->aux_task_node); | 56 | list_del(&tsk_rt(t)->aux_task_node); |
42 | 57 | ||
43 | tsk_rt(t)->is_aux_task = 0; | 58 | tsk_rt(t)->is_aux_task = 0; |
44 | 59 | ||
45 | if (tsk_rt(t)->inh_task) { | 60 | if (tsk_rt(t)->inh_task) { |
46 | litmus->decrease_prio(t, NULL); | 61 | litmus->decrease_prio(t, NULL); |
47 | } | 62 | } |
48 | 63 | ||
49 | return retval; | 64 | return retval; |
50 | } | 65 | } |
51 | 66 | ||
@@ -53,34 +68,23 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s | |||
53 | { | 68 | { |
54 | int retval = 0; | 69 | int retval = 0; |
55 | struct list_head *pos; | 70 | struct list_head *pos; |
56 | 71 | ||
57 | TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | 72 | TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); |
58 | 73 | ||
59 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { | 74 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { |
60 | struct task_struct *aux = | 75 | struct task_struct *aux = |
61 | container_of(list_entry(pos, struct rt_param, aux_task_node), | 76 | container_of(list_entry(pos, struct rt_param, aux_task_node), |
62 | struct task_struct, rt_param); | 77 | struct task_struct, rt_param); |
63 | 78 | ||
64 | if (!is_realtime(aux)) { | 79 | if (!is_realtime(aux)) { |
65 | #if 0 | ||
66 | /* currently can't do this here because of scheduler deadlock on itself */ | ||
67 | TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); | ||
68 | retval = admit_aux_task(aux); | ||
69 | |||
70 | if (retval != 0) { | ||
71 | TRACE_CUR("failed to admit aux task %s/%d\n", aux->comm, aux->pid); | ||
72 | goto out; | ||
73 | } | ||
74 | #endif | ||
75 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); | 80 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); |
76 | } | 81 | } |
77 | 82 | ||
78 | // aux tasks don't touch rt locks, so no nested call needed. | 83 | // aux tasks don't touch rt locks, so no nested call needed. |
79 | TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); | 84 | TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); |
80 | retval = litmus->__increase_prio(aux, hp); | 85 | retval = litmus->__increase_prio(aux, hp); |
81 | } | 86 | } |
82 | 87 | ||
83 | //out: | ||
84 | return retval; | 88 | return retval; |
85 | } | 89 | } |
86 | 90 | ||
@@ -88,30 +92,15 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
88 | { | 92 | { |
89 | int retval = 0; | 93 | int retval = 0; |
90 | struct list_head *pos; | 94 | struct list_head *pos; |
91 | 95 | ||
92 | TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); | 96 | TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); |
93 | 97 | ||
94 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { | 98 | list_for_each(pos, &tsk_aux(leader)->aux_tasks) { |
95 | struct task_struct *aux = | 99 | struct task_struct *aux = |
96 | container_of(list_entry(pos, struct rt_param, aux_task_node), | 100 | container_of(list_entry(pos, struct rt_param, aux_task_node), |
97 | struct task_struct, rt_param); | 101 | struct task_struct, rt_param); |
98 | 102 | ||
99 | if (!is_realtime(aux)) { | 103 | if (!is_realtime(aux)) { |
100 | #if 0 | ||
101 | /* currently can't do this here because of scheduler deadlock on itself */ | ||
102 | TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); | ||
103 | retval = admit_aux_task(aux); | ||
104 | |||
105 | if (retval != 0) | ||
106 | goto out; | ||
107 | |||
108 | if (hp) { | ||
109 | // aux tasks don't touch rt locks, so no nested call needed. | ||
110 | TRACE_CUR("decreasing (actually increasing) %s/%d.\n", aux->comm, aux->pid); | ||
111 | retval = litmus->__increase_prio(aux, hp); | ||
112 | } | ||
113 | #endif | ||
114 | |||
115 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); | 104 | TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); |
116 | } | 105 | } |
117 | else { | 106 | else { |
@@ -119,8 +108,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s | |||
119 | retval = litmus->__decrease_prio(aux, hp); | 108 | retval = litmus->__decrease_prio(aux, hp); |
120 | } | 109 | } |
121 | } | 110 | } |
122 | 111 | ||
123 | //out: | ||
124 | return retval; | 112 | return retval; |
125 | } | 113 | } |
126 | 114 | ||
@@ -133,20 +121,20 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
133 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | 121 | BUG_ON(!tsk_rt(t)->has_aux_tasks); |
134 | BUG_ON(!is_realtime(t)); | 122 | BUG_ON(!is_realtime(t)); |
135 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); | 123 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); |
136 | 124 | ||
137 | leader = t->group_leader; | 125 | leader = t->group_leader; |
138 | 126 | ||
139 | TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | 127 | TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); |
140 | 128 | ||
141 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 129 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
142 | struct task_struct, rt_param); | 130 | struct task_struct, rt_param); |
143 | 131 | ||
144 | if (hp == t) { | 132 | if (hp == t) { |
145 | goto out; // already hp, nothing to do. | 133 | goto out; // already hp, nothing to do. |
146 | } | 134 | } |
147 | 135 | ||
148 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | 136 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); |
149 | 137 | ||
150 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 138 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
151 | struct task_struct, rt_param); | 139 | struct task_struct, rt_param); |
152 | 140 | ||
@@ -155,7 +143,7 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
155 | retval = aux_tasks_increase_priority(leader, | 143 | retval = aux_tasks_increase_priority(leader, |
156 | (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp); | 144 | (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp); |
157 | } | 145 | } |
158 | 146 | ||
159 | out: | 147 | out: |
160 | return retval; | 148 | return retval; |
161 | } | 149 | } |
@@ -166,15 +154,15 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
166 | struct task_struct *leader; | 154 | struct task_struct *leader; |
167 | struct task_struct *hp = NULL; | 155 | struct task_struct *hp = NULL; |
168 | struct task_struct *new_hp = NULL; | 156 | struct task_struct *new_hp = NULL; |
169 | 157 | ||
170 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | 158 | BUG_ON(!tsk_rt(t)->has_aux_tasks); |
171 | BUG_ON(!is_realtime(t)); | 159 | BUG_ON(!is_realtime(t)); |
172 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); | 160 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); |
173 | 161 | ||
174 | leader = t->group_leader; | 162 | leader = t->group_leader; |
175 | 163 | ||
176 | TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | 164 | TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); |
177 | 165 | ||
178 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 166 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
179 | struct task_struct, rt_param); | 167 | struct task_struct, rt_param); |
180 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | 168 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); |
@@ -182,7 +170,7 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
182 | struct rt_param, aux_task_owner_node); | 170 | struct rt_param, aux_task_owner_node); |
183 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 171 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
184 | struct task_struct, rt_param); | 172 | struct task_struct, rt_param); |
185 | 173 | ||
186 | if (hp == t && new_hp != t) { | 174 | if (hp == t && new_hp != t) { |
187 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 175 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
188 | retval = aux_tasks_decrease_priority(leader, | 176 | retval = aux_tasks_decrease_priority(leader, |
@@ -204,28 +192,28 @@ long enable_aux_task_owner(struct task_struct *t) | |||
204 | TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); | 192 | TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); |
205 | return -1; | 193 | return -1; |
206 | } | 194 | } |
207 | 195 | ||
208 | BUG_ON(!is_realtime(t)); | 196 | BUG_ON(!is_realtime(t)); |
209 | 197 | ||
210 | if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | 198 | if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { |
211 | TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid); | 199 | TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid); |
212 | goto out; | 200 | goto out; |
213 | } | 201 | } |
214 | 202 | ||
215 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, | 203 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, |
216 | struct rt_param, aux_task_owner_node); | 204 | struct rt_param, aux_task_owner_node); |
217 | 205 | ||
218 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 206 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
219 | struct task_struct, rt_param); | 207 | struct task_struct, rt_param); |
220 | if (hp == t) { | 208 | if (hp == t) { |
221 | /* we're the new hp */ | 209 | /* we're the new hp */ |
222 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 210 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
223 | 211 | ||
224 | retval = aux_tasks_increase_priority(leader, | 212 | retval = aux_tasks_increase_priority(leader, |
225 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | 213 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); |
226 | } | 214 | } |
227 | 215 | ||
228 | 216 | ||
229 | out: | 217 | out: |
230 | return retval; | 218 | return retval; |
231 | } | 219 | } |
@@ -236,42 +224,42 @@ long disable_aux_task_owner(struct task_struct *t) | |||
236 | struct task_struct *leader = t->group_leader; | 224 | struct task_struct *leader = t->group_leader; |
237 | struct task_struct *hp; | 225 | struct task_struct *hp; |
238 | struct task_struct *new_hp = NULL; | 226 | struct task_struct *new_hp = NULL; |
239 | 227 | ||
240 | if (!tsk_rt(t)->has_aux_tasks) { | 228 | if (!tsk_rt(t)->has_aux_tasks) { |
241 | TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); | 229 | TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); |
242 | return -1; | 230 | return -1; |
243 | } | 231 | } |
244 | 232 | ||
245 | BUG_ON(!is_realtime(t)); | 233 | BUG_ON(!is_realtime(t)); |
246 | 234 | ||
247 | if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | 235 | if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { |
248 | TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid); | 236 | TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid); |
249 | goto out; | 237 | goto out; |
250 | } | 238 | } |
251 | 239 | ||
252 | TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 240 | TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
253 | 241 | ||
254 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 242 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
255 | struct task_struct, rt_param); | 243 | struct task_struct, rt_param); |
256 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | 244 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); |
257 | 245 | ||
258 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { | 246 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { |
259 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 247 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
260 | struct task_struct, rt_param); | 248 | struct task_struct, rt_param); |
261 | } | 249 | } |
262 | 250 | ||
263 | if (hp == t && new_hp != t) { | 251 | if (hp == t && new_hp != t) { |
264 | struct task_struct *to_inh = NULL; | 252 | struct task_struct *to_inh = NULL; |
265 | 253 | ||
266 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 254 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
267 | 255 | ||
268 | if (new_hp) { | 256 | if (new_hp) { |
269 | to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; | 257 | to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; |
270 | } | 258 | } |
271 | 259 | ||
272 | retval = aux_tasks_decrease_priority(leader, to_inh); | 260 | retval = aux_tasks_decrease_priority(leader, to_inh); |
273 | } | 261 | } |
274 | 262 | ||
275 | out: | 263 | out: |
276 | return retval; | 264 | return retval; |
277 | } | 265 | } |
@@ -284,60 +272,47 @@ static int aux_task_owner_max_priority_order(struct binheap_node *a, | |||
284 | struct task_struct, rt_param); | 272 | struct task_struct, rt_param); |
285 | struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node), | 273 | struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node), |
286 | struct task_struct, rt_param); | 274 | struct task_struct, rt_param); |
287 | 275 | ||
288 | BUG_ON(!d_a); | 276 | BUG_ON(!d_a); |
289 | BUG_ON(!d_b); | 277 | BUG_ON(!d_b); |
290 | 278 | ||
291 | return litmus->compare(d_a, d_b); | 279 | return litmus->compare(d_a, d_b); |
292 | } | 280 | } |
293 | 281 | ||
294 | 282 | ||
295 | asmlinkage long sys_slave_non_rt_threads(void) | 283 | static long __do_enable_slave_non_rt_threads(void) |
296 | { | 284 | { |
297 | long retval = 0; | 285 | long retval = 0; |
298 | struct task_struct *leader; | 286 | struct task_struct *leader; |
299 | struct task_struct *t; | 287 | struct task_struct *t; |
300 | 288 | ||
301 | read_lock_irq(&tasklist_lock); | ||
302 | |||
303 | leader = current->group_leader; | 289 | leader = current->group_leader; |
304 | 290 | ||
305 | #if 0 | ||
306 | t = leader; | ||
307 | do { | ||
308 | if (tsk_rt(t)->has_aux_tasks || tsk_rt(t)->is_aux_task) { | ||
309 | printk("slave_non_rt_tasks may only be called once per process.\n"); | ||
310 | retval = -EINVAL; | ||
311 | goto out_unlock; | ||
312 | } | ||
313 | } while (t != leader); | ||
314 | #endif | ||
315 | |||
316 | if (!tsk_aux(leader)->initialized) { | 291 | if (!tsk_aux(leader)->initialized) { |
317 | INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks); | 292 | INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks); |
318 | INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order); | 293 | INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order); |
319 | tsk_aux(leader)->initialized = 1; | 294 | tsk_aux(leader)->initialized = 1; |
320 | } | 295 | } |
321 | 296 | ||
322 | t = leader; | 297 | t = leader; |
323 | do { | 298 | do { |
324 | /* doesn't hurt to initialize them both */ | 299 | /* doesn't hurt to initialize them both */ |
325 | INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); | 300 | INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); |
326 | INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); | 301 | INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); |
327 | 302 | ||
328 | TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", | 303 | TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", |
329 | leader->comm, leader->pid, t->comm, t->pid, | 304 | leader->comm, leader->pid, t->comm, t->pid, |
330 | tsk_rt(t)->task_params.period); | 305 | tsk_rt(t)->task_params.period); |
331 | 306 | ||
332 | /* inspect heap_node to see if it is an rt task */ | 307 | /* inspect heap_node to see if it is an rt task */ |
333 | if (tsk_rt(t)->task_params.period == 0 || | 308 | if (tsk_rt(t)->task_params.period == 0) { //|| |
334 | tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { | 309 | // tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { |
335 | if (!tsk_rt(t)->is_aux_task) { | 310 | if (!tsk_rt(t)->is_aux_task) { |
336 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | 311 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); |
337 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ | 312 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ |
338 | tsk_rt(t)->is_aux_task = 1; | 313 | tsk_rt(t)->is_aux_task = 1; |
339 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); | 314 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); |
340 | 315 | ||
341 | (void)admit_aux_task(t); | 316 | (void)admit_aux_task(t); |
342 | } | 317 | } |
343 | else { | 318 | else { |
@@ -348,10 +323,6 @@ asmlinkage long sys_slave_non_rt_threads(void) | |||
348 | if (!tsk_rt(t)->has_aux_tasks) { | 323 | if (!tsk_rt(t)->has_aux_tasks) { |
349 | TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | 324 | TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); |
350 | tsk_rt(t)->has_aux_tasks = 1; | 325 | tsk_rt(t)->has_aux_tasks = 1; |
351 | if (is_realtime(t)) { | ||
352 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, | ||
353 | struct rt_param, aux_task_owner_node); | ||
354 | } | ||
355 | } | 326 | } |
356 | else { | 327 | else { |
357 | TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); | 328 | TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); |
@@ -361,16 +332,72 @@ asmlinkage long sys_slave_non_rt_threads(void) | |||
361 | t = next_thread(t); | 332 | t = next_thread(t); |
362 | } while(t != leader); | 333 | } while(t != leader); |
363 | 334 | ||
364 | 335 | ||
365 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { | 336 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { |
366 | struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 337 | struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
367 | struct task_struct, rt_param); | 338 | struct task_struct, rt_param); |
368 | TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); | 339 | TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); |
369 | retval = aux_tasks_increase_priority(leader, | 340 | retval = aux_tasks_increase_priority(leader, |
370 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | 341 | (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); |
342 | } | ||
343 | |||
344 | return retval; | ||
345 | } | ||
346 | |||
347 | static long __do_disable_slave_non_rt_threads(void) | ||
348 | { | ||
349 | long retval = 0; | ||
350 | struct task_struct *leader; | ||
351 | struct task_struct *t; | ||
352 | |||
353 | leader = current->group_leader; | ||
354 | |||
355 | t = leader; | ||
356 | do { | ||
357 | if (tsk_rt(t)->is_aux_task) { | ||
358 | |||
359 | TRACE_CUR("%s/%d is an aux task.\n", t->comm, t->pid); | ||
360 | |||
361 | if (is_realtime(t)) { | ||
362 | long temp_retval; | ||
363 | struct sched_param param = { .sched_priority = 0}; | ||
364 | |||
365 | TRACE_CUR("%s/%d is real-time. Changing policy to SCHED_NORMAL.\n", t->comm, t->pid); | ||
366 | |||
367 | temp_retval = sched_setscheduler_nocheck(t, SCHED_NORMAL, ¶m); | ||
368 | |||
369 | if (temp_retval != 0) { | ||
370 | TRACE_CUR("error changing policy of %s/%d to SCHED_NORMAL\n", t->comm, t->pid); | ||
371 | if (retval == 0) { | ||
372 | retval = temp_retval; | ||
373 | } | ||
374 | else { | ||
375 | TRACE_CUR("prior error (%d) masks new error (%d)\n", retval, temp_retval); | ||
376 | } | ||
377 | } | ||
378 | } | ||
379 | |||
380 | tsk_rt(t)->is_aux_task = 0; | ||
381 | } | ||
382 | t = next_thread(t); | ||
383 | } while(t != leader); | ||
384 | |||
385 | return retval; | ||
386 | } | ||
387 | |||
388 | asmlinkage long sys_slave_non_rt_threads(int enable) | ||
389 | { | ||
390 | long retval; | ||
391 | |||
392 | read_lock_irq(&tasklist_lock); | ||
393 | |||
394 | if (enable) { | ||
395 | retval = __do_enable_slave_non_rt_threads(); | ||
396 | } | ||
397 | else { | ||
398 | retval = __do_disable_slave_non_rt_threads(); | ||
371 | } | 399 | } |
372 | 400 | ||
373 | //out_unlock: | ||
374 | read_unlock_irq(&tasklist_lock); | 401 | read_unlock_irq(&tasklist_lock); |
375 | 402 | ||
376 | return retval; | 403 | return retval; |
@@ -378,7 +405,7 @@ asmlinkage long sys_slave_non_rt_threads(void) | |||
378 | 405 | ||
379 | #else | 406 | #else |
380 | 407 | ||
381 | asmlinkage long sys_slave_non_rt_tasks(void) | 408 | asmlinkage long sys_slave_non_rt_tasks(int enable) |
382 | { | 409 | { |
383 | printk("Unsupported. Recompile with CONFIG_LITMUS_LOCKING.\n"); | 410 | printk("Unsupported. Recompile with CONFIG_LITMUS_LOCKING.\n"); |
384 | return -EINVAL; | 411 | return -EINVAL; |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b439299e5fc..ca06f6ec103e 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <litmus/fpmath.h> | 22 | #include <litmus/fpmath.h> |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifdef CONFIG_EDF_TIE_BREAK_HASH | 25 | //#ifdef CONFIG_EDF_TIE_BREAK_HASH |
26 | #include <linux/hash.h> | 26 | #include <linux/hash.h> |
27 | static inline long edf_hash(struct task_struct *t) | 27 | static inline long edf_hash(struct task_struct *t) |
28 | { | 28 | { |
@@ -41,7 +41,22 @@ static inline long edf_hash(struct task_struct *t) | |||
41 | */ | 41 | */ |
42 | return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); | 42 | return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); |
43 | } | 43 | } |
44 | #endif | 44 | //#endif |
45 | |||
46 | int aux_tie_break(struct task_struct *first, struct task_struct *second) | ||
47 | { | ||
48 | long fhash = edf_hash(first); | ||
49 | long shash = edf_hash(second); | ||
50 | if (fhash < shash) { | ||
51 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, 1); | ||
52 | return 1; | ||
53 | } | ||
54 | else if(fhash == shash) { | ||
55 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, (first->pid < second->pid)); | ||
56 | return first->pid < second->pid; | ||
57 | } | ||
58 | return 0; | ||
59 | } | ||
45 | 60 | ||
46 | 61 | ||
47 | /* edf_higher_prio - returns true if first has a higher EDF priority | 62 | /* edf_higher_prio - returns true if first has a higher EDF priority |
@@ -60,6 +75,11 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
60 | struct task_struct *first_task = first; | 75 | struct task_struct *first_task = first; |
61 | struct task_struct *second_task = second; | 76 | struct task_struct *second_task = second; |
62 | 77 | ||
78 | int first_lo_aux; | ||
79 | int second_lo_aux; | ||
80 | int first_hi_aux; | ||
81 | int second_hi_aux; | ||
82 | |||
63 | /* There is no point in comparing a task to itself. */ | 83 | /* There is no point in comparing a task to itself. */ |
64 | if (first && first == second) { | 84 | if (first && first == second) { |
65 | TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid); | 85 | TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid); |
@@ -74,23 +94,34 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
74 | } | 94 | } |
75 | 95 | ||
76 | #ifdef CONFIG_LITMUS_LOCKING | 96 | #ifdef CONFIG_LITMUS_LOCKING |
77 | /* aux threads with no inheritance have lowest priority; however, do a PID | 97 | |
78 | * tie break if both threads are aux threads with no inheritance. | 98 | first_lo_aux = first->rt_param.is_aux_task && !first->rt_param.inh_task; |
79 | */ | 99 | second_lo_aux = second->rt_param.is_aux_task && !second->rt_param.inh_task; |
80 | if (unlikely(first->rt_param.is_aux_task && !first->rt_param.inh_task)) { | 100 | |
81 | if (second->rt_param.is_aux_task && !second->rt_param.inh_task) { | 101 | if (first_lo_aux && !second_lo_aux) { |
82 | /* pid break */ | 102 | TRACE_CUR("%s/%d >> %s/%d --- 0\n", first->comm, first->pid, second->comm, second->pid); |
83 | if (first->pid < second->pid) { | ||
84 | return 1; | ||
85 | } | ||
86 | } | ||
87 | return 0; | 103 | return 0; |
88 | } | 104 | } |
89 | if (unlikely(second->rt_param.is_aux_task && !second->rt_param.inh_task)) { | 105 | else if (second_lo_aux && !first_lo_aux) { |
90 | /* no need for pid break -- case already tested */ | 106 | TRACE_CUR("%s/%d >> %s/%d --- 1\n", first->comm, first->pid, second->comm, second->pid); |
91 | return 1; | 107 | return 1; |
92 | } | 108 | } |
93 | 109 | else if (first_lo_aux && second_lo_aux) { | |
110 | int aux_lo_tie_break = aux_tie_break(first, second); | ||
111 | TRACE_CUR("low aux tie break: %s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, aux_lo_tie_break); | ||
112 | return aux_lo_tie_break; | ||
113 | } | ||
114 | |||
115 | first_hi_aux = first->rt_param.is_aux_task && first->rt_param.inh_task; | ||
116 | second_hi_aux = second->rt_param.is_aux_task && second->rt_param.inh_task; | ||
117 | |||
118 | if (first_hi_aux && second_hi_aux && first->rt_param.inh_task == second->rt_param.inh_task) { | ||
119 | int aux_hi_tie_break = aux_tie_break(first, second); | ||
120 | TRACE_CUR("hi aux tie break: %s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, aux_hi_tie_break); | ||
121 | return aux_hi_tie_break; | ||
122 | } | ||
123 | |||
124 | |||
94 | /* Check for EFFECTIVE priorities. Change task | 125 | /* Check for EFFECTIVE priorities. Change task |
95 | * used for comparison in such a case. | 126 | * used for comparison in such a case. |
96 | */ | 127 | */ |
@@ -149,7 +180,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
149 | */ | 180 | */ |
150 | if (get_lateness(first_task) > get_lateness(second_task)) { | 181 | if (get_lateness(first_task) > get_lateness(second_task)) { |
151 | return 1; | 182 | return 1; |
152 | } | 183 | } |
153 | pid_break = (get_lateness(first_task) == get_lateness(second_task)); | 184 | pid_break = (get_lateness(first_task) == get_lateness(second_task)); |
154 | 185 | ||
155 | 186 | ||
@@ -171,8 +202,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
171 | return 1; | 202 | return 1; |
172 | } | 203 | } |
173 | pid_break = _eq(fnorm, snorm); | 204 | pid_break = _eq(fnorm, snorm); |
174 | 205 | ||
175 | 206 | ||
176 | #elif defined(CONFIG_EDF_TIE_BREAK_HASH) | 207 | #elif defined(CONFIG_EDF_TIE_BREAK_HASH) |
177 | /* Tie break by comparing hashs of (pid, job#) tuple. There should be | 208 | /* Tie break by comparing hashs of (pid, job#) tuple. There should be |
178 | * a 50% chance that first_task has a higher priority than second_task. | 209 | * a 50% chance that first_task has a higher priority than second_task. |
@@ -184,8 +215,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
184 | } | 215 | } |
185 | pid_break = (fhash == shash); | 216 | pid_break = (fhash == shash); |
186 | #else | 217 | #else |
187 | 218 | ||
188 | 219 | ||
189 | /* CONFIG_EDF_PID_TIE_BREAK */ | 220 | /* CONFIG_EDF_PID_TIE_BREAK */ |
190 | pid_break = 1; // fall through to tie-break by pid; | 221 | pid_break = 1; // fall through to tie-break by pid; |
191 | #endif | 222 | #endif |
@@ -197,11 +228,17 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
197 | } | 228 | } |
198 | else if (first_task->pid == second_task->pid) { | 229 | else if (first_task->pid == second_task->pid) { |
199 | #ifdef CONFIG_LITMUS_SOFTIRQD | 230 | #ifdef CONFIG_LITMUS_SOFTIRQD |
200 | if (first_task->rt_param.is_proxy_thread < | 231 | if (first_task->rt_param.is_proxy_thread < |
201 | second_task->rt_param.is_proxy_thread) { | 232 | second_task->rt_param.is_proxy_thread) { |
202 | return 1; | 233 | return 1; |
203 | } | 234 | } |
204 | #endif | 235 | #endif |
236 | if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { | ||
237 | TRACE_CUR("AUX BREAK!\n"); | ||
238 | return 1; | ||
239 | } | ||
240 | |||
241 | |||
205 | /* Something could be wrong if you get this far. */ | 242 | /* Something could be wrong if you get this far. */ |
206 | if (unlikely(first->rt_param.inh_task == | 243 | if (unlikely(first->rt_param.inh_task == |
207 | second->rt_param.inh_task)) { | 244 | second->rt_param.inh_task)) { |
@@ -220,8 +257,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
220 | BUG_ON(!first->rt_param.inh_task && | 257 | BUG_ON(!first->rt_param.inh_task && |
221 | !second->rt_param.inh_task); | 258 | !second->rt_param.inh_task); |
222 | 259 | ||
223 | /* The task with the inherited priority wins. */ | 260 | /* The task withOUT the inherited priority wins. */ |
224 | if (!second->rt_param.inh_task) { | 261 | if (second->rt_param.inh_task) { |
225 | /* | 262 | /* |
226 | * common with aux tasks. | 263 | * common with aux tasks. |
227 | TRACE_CUR("unusual comparison: " | 264 | TRACE_CUR("unusual comparison: " |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 1b4182ac3337..e2bf2a7ad01b 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -338,7 +338,7 @@ void init_gpu_affinity_state(struct task_struct* p) | |||
338 | //p->rt_param.gpu_fb_param_a = _frac(14008, 10000); | 338 | //p->rt_param.gpu_fb_param_a = _frac(14008, 10000); |
339 | //p->rt_param.gpu_fb_param_b = _frac(16024, 10000); | 339 | //p->rt_param.gpu_fb_param_b = _frac(16024, 10000); |
340 | 340 | ||
341 | #if 0 | 341 | #if 0 |
342 | // emperical; | 342 | // emperical; |
343 | p->rt_param.gpu_fb_param_a[0] = _frac(7550, 10000); | 343 | p->rt_param.gpu_fb_param_a[0] = _frac(7550, 10000); |
344 | p->rt_param.gpu_fb_param_b[0] = _frac(45800, 10000); | 344 | p->rt_param.gpu_fb_param_b[0] = _frac(45800, 10000); |
@@ -362,13 +362,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
362 | { | 362 | { |
363 | struct rt_task user_config = {}; | 363 | struct rt_task user_config = {}; |
364 | void* ctrl_page = NULL; | 364 | void* ctrl_page = NULL; |
365 | 365 | ||
366 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 366 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
367 | binheap_order_t prio_order = NULL; | 367 | binheap_order_t prio_order = NULL; |
368 | #endif | 368 | #endif |
369 | 369 | ||
370 | TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); | 370 | TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); |
371 | 371 | ||
372 | if (restore) { | 372 | if (restore) { |
373 | /* Safe user-space provided configuration data. | 373 | /* Safe user-space provided configuration data. |
374 | * and allocated page. */ | 374 | * and allocated page. */ |
@@ -419,7 +419,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
419 | if (!restore) | 419 | if (!restore) |
420 | memset(&p->aux_data, 0, sizeof(p->aux_data)); | 420 | memset(&p->aux_data, 0, sizeof(p->aux_data)); |
421 | #endif | 421 | #endif |
422 | 422 | ||
423 | /* Restore preserved fields. */ | 423 | /* Restore preserved fields. */ |
424 | if (restore) { | 424 | if (restore) { |
425 | p->rt_param.task_params = user_config; | 425 | p->rt_param.task_params = user_config; |
@@ -437,11 +437,8 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
437 | } | 437 | } |
438 | 438 | ||
439 | 439 | ||
440 | #ifdef CONFIG_LITMUS_LOCKING | 440 | |
441 | long __litmus_admit_task(struct task_struct* tsk, int clear_aux) | ||
442 | #else | ||
443 | long __litmus_admit_task(struct task_struct* tsk) | 441 | long __litmus_admit_task(struct task_struct* tsk) |
444 | #endif | ||
445 | { | 442 | { |
446 | long retval = 0; | 443 | long retval = 0; |
447 | unsigned long flags; | 444 | unsigned long flags; |
@@ -486,14 +483,6 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
486 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); | 483 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); |
487 | #endif | 484 | #endif |
488 | 485 | ||
489 | #ifdef CONFIG_LITMUS_LOCKING | ||
490 | /* turns out our aux thread isn't really an aux thread. */ | ||
491 | if (clear_aux && tsk_rt(tsk)->is_aux_task) { | ||
492 | exit_aux_task(tsk); | ||
493 | tsk_rt(tsk)->has_aux_tasks = 1; | ||
494 | } | ||
495 | #endif | ||
496 | |||
497 | retval = litmus->admit_task(tsk); | 486 | retval = litmus->admit_task(tsk); |
498 | 487 | ||
499 | if (!retval) { | 488 | if (!retval) { |
@@ -511,7 +500,7 @@ out_unlock: | |||
511 | long litmus_admit_task(struct task_struct* tsk) | 500 | long litmus_admit_task(struct task_struct* tsk) |
512 | { | 501 | { |
513 | long retval = 0; | 502 | long retval = 0; |
514 | 503 | ||
515 | BUG_ON(is_realtime(tsk)); | 504 | BUG_ON(is_realtime(tsk)); |
516 | 505 | ||
517 | if (get_rt_relative_deadline(tsk) == 0 || | 506 | if (get_rt_relative_deadline(tsk) == 0 || |
@@ -533,12 +522,8 @@ long litmus_admit_task(struct task_struct* tsk) | |||
533 | goto out; | 522 | goto out; |
534 | } | 523 | } |
535 | 524 | ||
536 | #ifdef CONFIG_LITMUS_LOCKING | ||
537 | retval = __litmus_admit_task(tsk, (tsk_rt(tsk)->task_params.period != MAGIC_AUX_TASK_PERIOD)); | ||
538 | #else | ||
539 | retval = __litmus_admit_task(tsk); | 525 | retval = __litmus_admit_task(tsk); |
540 | #endif | 526 | |
541 | |||
542 | out: | 527 | out: |
543 | return retval; | 528 | return retval; |
544 | } | 529 | } |
@@ -624,18 +609,21 @@ out: | |||
624 | */ | 609 | */ |
625 | void litmus_fork(struct task_struct* p) | 610 | void litmus_fork(struct task_struct* p) |
626 | { | 611 | { |
627 | reinit_litmus_state(p, 0); | ||
628 | |||
629 | if (is_realtime(p)) { | 612 | if (is_realtime(p)) { |
630 | TRACE_TASK(p, "fork, is real-time\n"); | 613 | TRACE_TASK(p, "fork, is real-time\n"); |
614 | |||
631 | /* clean out any litmus related state, don't preserve anything */ | 615 | /* clean out any litmus related state, don't preserve anything */ |
632 | //reinit_litmus_state(p, 0); | 616 | reinit_litmus_state(p, 0); |
617 | |||
633 | /* Don't let the child be a real-time task. */ | 618 | /* Don't let the child be a real-time task. */ |
634 | p->sched_reset_on_fork = 1; | 619 | p->sched_reset_on_fork = 1; |
620 | |||
635 | } else { | 621 | } else { |
636 | /* non-rt tasks might have ctrl_page set */ | 622 | /* non-rt tasks might have ctrl_page set */ |
637 | tsk_rt(p)->ctrl_page = NULL; | 623 | tsk_rt(p)->ctrl_page = NULL; |
638 | 624 | ||
625 | reinit_litmus_state(p, 0); | ||
626 | |||
639 | /* still don't inherit any parental parameters */ | 627 | /* still don't inherit any parental parameters */ |
640 | //memset(&p->rt_param, 0, sizeof(p->rt_param)); | 628 | //memset(&p->rt_param, 0, sizeof(p->rt_param)); |
641 | //memset(&p->aux_data, 0, sizeof(p->aux_data)); | 629 | //memset(&p->aux_data, 0, sizeof(p->aux_data)); |
@@ -736,10 +724,6 @@ static int __init _init_litmus(void) | |||
736 | init_topology(); | 724 | init_topology(); |
737 | #endif | 725 | #endif |
738 | 726 | ||
739 | #ifdef CONFIG_LITMUS_NVIDIA | ||
740 | //init_nvidia_info(); | ||
741 | #endif | ||
742 | |||
743 | return 0; | 727 | return 0; |
744 | } | 728 | } |
745 | 729 | ||
diff --git a/litmus/preempt.c b/litmus/preempt.c index a2cae3648e15..c9ccc80c1df9 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c | |||
@@ -74,25 +74,37 @@ void litmus_reschedule(int cpu) | |||
74 | * is not aware of the need to reschedule at this point. */ | 74 | * is not aware of the need to reschedule at this point. */ |
75 | 75 | ||
76 | /* is a context switch in progress? */ | 76 | /* is a context switch in progress? */ |
77 | if (cpu_is_in_sched_state(cpu, TASK_PICKED)) | 77 | if (cpu_is_in_sched_state(cpu, TASK_PICKED)) { |
78 | picked_transition_ok = sched_state_transition_on( | 78 | picked_transition_ok = sched_state_transition_on( |
79 | cpu, TASK_PICKED, PICKED_WRONG_TASK); | 79 | cpu, TASK_PICKED, PICKED_WRONG_TASK); |
80 | 80 | ||
81 | TRACE_CUR("cpu %d: picked_transition_ok = %d\n", cpu, picked_transition_ok); | ||
82 | } | ||
83 | else { | ||
84 | TRACE_CUR("cpu %d: picked_transition_ok = 0 (static)\n", cpu); | ||
85 | } | ||
86 | |||
81 | if (!picked_transition_ok && | 87 | if (!picked_transition_ok && |
82 | cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { | 88 | cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { |
83 | /* We either raced with the end of the context switch, or the | 89 | /* We either raced with the end of the context switch, or the |
84 | * CPU was in TASK_SCHEDULED anyway. */ | 90 | * CPU was in TASK_SCHEDULED anyway. */ |
85 | scheduled_transition_ok = sched_state_transition_on( | 91 | scheduled_transition_ok = sched_state_transition_on( |
86 | cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); | 92 | cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); |
93 | TRACE_CUR("cpu %d: scheduled_transition_ok = %d\n", cpu, scheduled_transition_ok); | ||
94 | } | ||
95 | else { | ||
96 | TRACE_CUR("cpu %d: scheduled_transition_ok = 0 (static)\n", cpu); | ||
87 | } | 97 | } |
88 | 98 | ||
89 | /* If the CPU was in state TASK_SCHEDULED, then we need to cause the | 99 | /* If the CPU was in state TASK_SCHEDULED, then we need to cause the |
90 | * scheduler to be invoked. */ | 100 | * scheduler to be invoked. */ |
91 | if (scheduled_transition_ok) { | 101 | if (scheduled_transition_ok) { |
92 | if (smp_processor_id() == cpu) | 102 | if (smp_processor_id() == cpu) { |
93 | set_tsk_need_resched(current); | 103 | set_tsk_need_resched(current); |
94 | else | 104 | } |
105 | else { | ||
95 | smp_send_reschedule(cpu); | 106 | smp_send_reschedule(cpu); |
107 | } | ||
96 | } | 108 | } |
97 | 109 | ||
98 | TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", | 110 | TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", |
@@ -103,11 +115,16 @@ void litmus_reschedule(int cpu) | |||
103 | 115 | ||
104 | void litmus_reschedule_local(void) | 116 | void litmus_reschedule_local(void) |
105 | { | 117 | { |
106 | if (is_in_sched_state(TASK_PICKED)) | 118 | if (is_in_sched_state(TASK_PICKED)) { |
107 | set_sched_state(PICKED_WRONG_TASK); | 119 | set_sched_state(PICKED_WRONG_TASK); |
120 | |||
121 | TRACE_CUR("cpu %d: transitioned to PICKED_WRONG_TASK\n", smp_processor_id()); | ||
122 | } | ||
108 | else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { | 123 | else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { |
109 | set_sched_state(WILL_SCHEDULE); | 124 | set_sched_state(WILL_SCHEDULE); |
110 | set_tsk_need_resched(current); | 125 | set_tsk_need_resched(current); |
126 | |||
127 | TRACE_CUR("cpu %d: transitioned to WILL_SCHEDULE\n", smp_processor_id()); | ||
111 | } | 128 | } |
112 | } | 129 | } |
113 | 130 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 5b8ca6698423..270e06c20bbf 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -167,6 +167,7 @@ struct tasklet_head gsnedf_pending_tasklets; | |||
167 | * TRACE() log. | 167 | * TRACE() log. |
168 | #define WANT_ALL_SCHED_EVENTS | 168 | #define WANT_ALL_SCHED_EVENTS |
169 | */ | 169 | */ |
170 | //#define WANT_ALL_SCHED_EVENTS | ||
170 | 171 | ||
171 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) | 172 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) |
172 | { | 173 | { |
@@ -209,8 +210,17 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
209 | struct task_struct* tmp; | 210 | struct task_struct* tmp; |
210 | int on_cpu; | 211 | int on_cpu; |
211 | 212 | ||
213 | //int print = (linked != NULL || entry->linked != NULL); | ||
214 | |||
212 | BUG_ON(linked && !is_realtime(linked)); | 215 | BUG_ON(linked && !is_realtime(linked)); |
213 | 216 | ||
217 | /* | ||
218 | if (print) { | ||
219 | TRACE_CUR("linked = %s/%d\n", (linked) ? linked->comm : "(nil)", (linked)? linked->pid : 0); | ||
220 | TRACE_CUR("entry->linked = %s/%d\n", (entry->linked) ? entry->linked->comm : "(nil)", (entry->linked)? entry->linked->pid : 0); | ||
221 | } | ||
222 | */ | ||
223 | |||
214 | /* Currently linked task is set to be unlinked. */ | 224 | /* Currently linked task is set to be unlinked. */ |
215 | if (entry->linked) { | 225 | if (entry->linked) { |
216 | entry->linked->rt_param.linked_on = NO_CPU; | 226 | entry->linked->rt_param.linked_on = NO_CPU; |
@@ -246,12 +256,18 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
246 | linked->rt_param.linked_on = entry->cpu; | 256 | linked->rt_param.linked_on = entry->cpu; |
247 | } | 257 | } |
248 | entry->linked = linked; | 258 | entry->linked = linked; |
249 | #ifdef WANT_ALL_SCHED_EVENTS | 259 | |
250 | if (linked) | 260 | /* |
251 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | 261 | if (print) { |
252 | else | 262 | //#ifdef WANT_ALL_SCHED_EVENTS |
253 | TRACE("NULL linked to %d.\n", entry->cpu); | 263 | if (linked) |
254 | #endif | 264 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); |
265 | else | ||
266 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
267 | //#endif | ||
268 | } | ||
269 | */ | ||
270 | |||
255 | update_cpu_position(entry); | 271 | update_cpu_position(entry); |
256 | } | 272 | } |
257 | 273 | ||
@@ -297,36 +313,19 @@ static noinline void requeue(struct task_struct* task) | |||
297 | BUG_ON(is_queued(task)); | 313 | BUG_ON(is_queued(task)); |
298 | 314 | ||
299 | if (is_released(task, litmus_clock())) { | 315 | if (is_released(task, litmus_clock())) { |
300 | 316 | ||
301 | if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { | 317 | if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { |
302 | /* aux_task probably transitioned to real-time while it was blocked */ | 318 | /* aux_task probably transitioned to real-time while it was blocked */ |
303 | TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); | 319 | TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); |
320 | unlink(task); /* really needed? */ | ||
304 | } | 321 | } |
305 | else { | 322 | else { |
306 | __add_ready(&gsnedf, task); | 323 | __add_ready(&gsnedf, task); |
307 | |||
308 | #if 0 | ||
309 | if (tsk_rt(task)->has_aux_tasks) { | ||
310 | |||
311 | TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); | ||
312 | /* allow it's prio inheritance to act on aux threads */ | ||
313 | enable_aux_task_owner(task); | ||
314 | } | ||
315 | #endif | ||
316 | } | 324 | } |
317 | } | 325 | } |
318 | else { | 326 | else { |
319 | /* it has got to wait */ | 327 | /* it has got to wait */ |
320 | add_release(&gsnedf, task); | 328 | add_release(&gsnedf, task); |
321 | |||
322 | #if 0 | ||
323 | if (tsk_rt(task)->has_aux_tasks) { | ||
324 | |||
325 | TRACE_CUR("%s/%d is waiting for release and has aux tasks.\n", task->comm, task->pid); | ||
326 | /* prevent prio inheritance from acting while it's not ready */ | ||
327 | disable_aux_task_owner(task); | ||
328 | } | ||
329 | #endif | ||
330 | } | 329 | } |
331 | } | 330 | } |
332 | 331 | ||
@@ -368,7 +367,8 @@ static void check_for_preemptions(void) | |||
368 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | 367 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); |
369 | if (affinity) | 368 | if (affinity) |
370 | last = affinity; | 369 | last = affinity; |
371 | else if (requeue_preempted_job(last->linked)) | 370 | |
371 | if (requeue_preempted_job(last->linked)) | ||
372 | requeue(last->linked); | 372 | requeue(last->linked); |
373 | } | 373 | } |
374 | #else | 374 | #else |
@@ -393,45 +393,11 @@ static noinline void gsnedf_job_arrival(struct task_struct* task) | |||
393 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | 393 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) |
394 | { | 394 | { |
395 | unsigned long flags; | 395 | unsigned long flags; |
396 | //struct bheap_node* node; | ||
397 | 396 | ||
398 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 397 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
399 | 398 | ||
400 | #if 0 | ||
401 | node = tasks->head; | ||
402 | while(node) { | ||
403 | struct task_struct *task = bheap2task(node); | ||
404 | |||
405 | if (tsk_rt(task)->has_aux_tasks) { | ||
406 | |||
407 | TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); | ||
408 | |||
409 | /* allow it's prio inheritance to act on aux threads */ | ||
410 | enable_aux_task_owner(task); | ||
411 | } | ||
412 | |||
413 | /* pre-order sub-tree traversal */ | ||
414 | if (node->child) { | ||
415 | /* go down */ | ||
416 | node = node->child; | ||
417 | } | ||
418 | else if(node->parent && node->parent->next) { | ||
419 | /* go up a level and across */ | ||
420 | node = node->parent->next; | ||
421 | } | ||
422 | else if(!node->parent && node->next) { | ||
423 | /* go to the next binomial tree */ | ||
424 | node = node->next; | ||
425 | } | ||
426 | else { | ||
427 | /* the end! */ | ||
428 | node = NULL; | ||
429 | } | ||
430 | } | ||
431 | #endif | ||
432 | |||
433 | __merge_ready(rt, tasks); | 399 | __merge_ready(rt, tasks); |
434 | 400 | ||
435 | check_for_preemptions(); | 401 | check_for_preemptions(); |
436 | 402 | ||
437 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 403 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
@@ -449,12 +415,12 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
449 | #endif | 415 | #endif |
450 | 416 | ||
451 | TRACE_TASK(t, "job_completion().\n"); | 417 | TRACE_TASK(t, "job_completion().\n"); |
452 | 418 | ||
453 | /* set flags */ | 419 | /* set flags */ |
454 | set_rt_flags(t, RT_F_SLEEP); | 420 | set_rt_flags(t, RT_F_SLEEP); |
455 | /* prepare for next period */ | 421 | /* prepare for next period */ |
456 | prepare_for_next_period(t); | 422 | prepare_for_next_period(t); |
457 | 423 | ||
458 | if (is_released(t, litmus_clock())) | 424 | if (is_released(t, litmus_clock())) |
459 | sched_trace_task_release(t); | 425 | sched_trace_task_release(t); |
460 | /* unlink */ | 426 | /* unlink */ |
@@ -497,6 +463,10 @@ static void gsnedf_tick(struct task_struct* t) | |||
497 | } | 463 | } |
498 | } | 464 | } |
499 | } | 465 | } |
466 | |||
467 | if(is_realtime(t)) { | ||
468 | TRACE_TASK(t, "tick %llu\n", litmus_clock()); | ||
469 | } | ||
500 | } | 470 | } |
501 | 471 | ||
502 | 472 | ||
@@ -838,6 +808,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
838 | int out_of_time, signal_budget, sleep, preempt, np, exists, blocks; | 808 | int out_of_time, signal_budget, sleep, preempt, np, exists, blocks; |
839 | struct task_struct* next = NULL; | 809 | struct task_struct* next = NULL; |
840 | 810 | ||
811 | //int completion = 0; | ||
812 | |||
841 | #ifdef CONFIG_RELEASE_MASTER | 813 | #ifdef CONFIG_RELEASE_MASTER |
842 | /* Bail out early if we are the release master. | 814 | /* Bail out early if we are the release master. |
843 | * The release master never schedules any real-time tasks. | 815 | * The release master never schedules any real-time tasks. |
@@ -873,22 +845,22 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
873 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 845 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
874 | #endif | 846 | #endif |
875 | 847 | ||
876 | /* | 848 | if (exists) { |
877 | if (exists) | ||
878 | TRACE_TASK(prev, | 849 | TRACE_TASK(prev, |
879 | "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d " | 850 | "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d " |
880 | "state:%d sig:%d\n", | 851 | "state:%d sig:%d\n", |
881 | blocks, out_of_time, signal_budget, np, sleep, preempt, | 852 | blocks, out_of_time, signal_budget, np, sleep, preempt, |
882 | prev->state, signal_pending(prev)); | 853 | prev->state, signal_pending(prev)); |
883 | */ | 854 | } |
884 | 855 | ||
885 | if (entry->linked && preempt) | 856 | if (entry->linked && preempt) |
886 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 857 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
887 | entry->linked->comm, entry->linked->pid); | 858 | entry->linked->comm, entry->linked->pid); |
888 | 859 | ||
889 | /* Send the signal that the budget has been exhausted */ | 860 | /* Send the signal that the budget has been exhausted */ |
890 | if (signal_budget) | 861 | if (signal_budget) { |
891 | send_sigbudget(entry->scheduled); | 862 | send_sigbudget(entry->scheduled); |
863 | } | ||
892 | 864 | ||
893 | /* If a task blocks we have no choice but to reschedule. | 865 | /* If a task blocks we have no choice but to reschedule. |
894 | */ | 866 | */ |
@@ -919,8 +891,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
919 | * this. Don't do a job completion if we block (can't have timers running | 891 | * this. Don't do a job completion if we block (can't have timers running |
920 | * for blocked jobs). | 892 | * for blocked jobs). |
921 | */ | 893 | */ |
922 | if (!np && (out_of_time || sleep) && !blocks) | 894 | if (!np && (out_of_time || sleep) && !blocks) { |
923 | job_completion(entry->scheduled, !sleep); | 895 | job_completion(entry->scheduled, !sleep); |
896 | //completion = 1; | ||
897 | } | ||
924 | 898 | ||
925 | /* Link pending task if we became unlinked. | 899 | /* Link pending task if we became unlinked. |
926 | */ | 900 | */ |
@@ -953,8 +927,21 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
953 | next = prev; | 927 | next = prev; |
954 | } | 928 | } |
955 | 929 | ||
930 | #if 0 | ||
931 | if (completion) { | ||
932 | TRACE_CUR("switching away from a completion\n"); | ||
933 | } | ||
934 | #endif | ||
935 | |||
956 | sched_state_task_picked(); | 936 | sched_state_task_picked(); |
957 | 937 | ||
938 | #if 0 | ||
939 | if (next && is_realtime(next) && tsk_rt(next)->is_aux_task && !tsk_rt(next)->inh_task) { | ||
940 | TRACE_TASK(next, "is aux with no inheritance. preventing it from actually running.\n"); | ||
941 | next = NULL; | ||
942 | } | ||
943 | #endif | ||
944 | |||
958 | raw_spin_unlock(&gsnedf_lock); | 945 | raw_spin_unlock(&gsnedf_lock); |
959 | 946 | ||
960 | #ifdef WANT_ALL_SCHED_EVENTS | 947 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -965,7 +952,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
965 | else if (exists && !next) | 952 | else if (exists && !next) |
966 | TRACE("becomes idle at %llu.\n", litmus_clock()); | 953 | TRACE("becomes idle at %llu.\n", litmus_clock()); |
967 | #endif | 954 | #endif |
968 | 955 | ||
969 | return next; | 956 | return next; |
970 | } | 957 | } |
971 | 958 | ||
@@ -991,7 +978,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
991 | unsigned long flags; | 978 | unsigned long flags; |
992 | cpu_entry_t* entry; | 979 | cpu_entry_t* entry; |
993 | 980 | ||
994 | TRACE("gsn edf: task new %d\n", t->pid); | 981 | TRACE("gsn edf: task new = %d on_rq = %d running = %d\n", t->pid, on_rq, running); |
995 | 982 | ||
996 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 983 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
997 | 984 | ||
@@ -1060,11 +1047,11 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
1060 | #endif | 1047 | #endif |
1061 | 1048 | ||
1062 | if (tsk_rt(task)->has_aux_tasks) { | 1049 | if (tsk_rt(task)->has_aux_tasks) { |
1063 | 1050 | ||
1064 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); | 1051 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); |
1065 | disable_aux_task_owner(task); | 1052 | disable_aux_task_owner(task); |
1066 | } | 1053 | } |
1067 | 1054 | ||
1068 | gsnedf_job_arrival(task); | 1055 | gsnedf_job_arrival(task); |
1069 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1056 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1070 | } | 1057 | } |
@@ -1081,11 +1068,11 @@ static void gsnedf_task_block(struct task_struct *t) | |||
1081 | unlink(t); | 1068 | unlink(t); |
1082 | 1069 | ||
1083 | if (tsk_rt(t)->has_aux_tasks) { | 1070 | if (tsk_rt(t)->has_aux_tasks) { |
1084 | 1071 | ||
1085 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); | 1072 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); |
1086 | enable_aux_task_owner(t); | 1073 | enable_aux_task_owner(t); |
1087 | } | 1074 | } |
1088 | 1075 | ||
1089 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1076 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1090 | 1077 | ||
1091 | BUG_ON(!is_realtime(t)); | 1078 | BUG_ON(!is_realtime(t)); |
@@ -1105,17 +1092,17 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
1105 | exit_aux_task(t); /* cannot be called with gsnedf_lock held */ | 1092 | exit_aux_task(t); /* cannot be called with gsnedf_lock held */ |
1106 | } | 1093 | } |
1107 | #endif | 1094 | #endif |
1108 | 1095 | ||
1109 | /* unlink if necessary */ | 1096 | /* unlink if necessary */ |
1110 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1097 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1111 | 1098 | ||
1112 | #ifdef CONFIG_LITMUS_LOCKING | 1099 | #ifdef CONFIG_LITMUS_LOCKING |
1113 | /* make sure we clean up on our way out */ | 1100 | /* make sure we clean up on our way out */ |
1114 | if(tsk_rt(t)->has_aux_tasks) { | 1101 | if(tsk_rt(t)->has_aux_tasks) { |
1115 | disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ | 1102 | disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ |
1116 | } | 1103 | } |
1117 | #endif | 1104 | #endif |
1118 | 1105 | ||
1119 | unlink(t); | 1106 | unlink(t); |
1120 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 1107 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
1121 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 1108 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
@@ -1161,7 +1148,7 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1161 | prio_inh->comm, prio_inh->pid); | 1148 | prio_inh->comm, prio_inh->pid); |
1162 | goto out; | 1149 | goto out; |
1163 | } | 1150 | } |
1164 | 1151 | ||
1165 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1152 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1166 | /* this sanity check allows for weaker locking in protocols */ | 1153 | /* this sanity check allows for weaker locking in protocols */ |
1167 | /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ | 1154 | /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ |
@@ -1221,8 +1208,8 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1221 | &gsnedf.ready_queue); | 1208 | &gsnedf.ready_queue); |
1222 | check_for_preemptions(); | 1209 | check_for_preemptions(); |
1223 | } | 1210 | } |
1224 | 1211 | ||
1225 | 1212 | ||
1226 | /* propagate to aux tasks */ | 1213 | /* propagate to aux tasks */ |
1227 | if (tsk_rt(t)->has_aux_tasks) { | 1214 | if (tsk_rt(t)->has_aux_tasks) { |
1228 | aux_task_owner_increase_priority(t); | 1215 | aux_task_owner_increase_priority(t); |
@@ -1242,7 +1229,7 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1242 | success = 0; | 1229 | success = 0; |
1243 | } | 1230 | } |
1244 | #endif | 1231 | #endif |
1245 | 1232 | ||
1246 | out: | 1233 | out: |
1247 | return success; | 1234 | return success; |
1248 | } | 1235 | } |
@@ -1251,7 +1238,7 @@ out: | |||
1251 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 1238 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
1252 | { | 1239 | { |
1253 | int success; | 1240 | int success; |
1254 | 1241 | ||
1255 | raw_spin_lock(&gsnedf_lock); | 1242 | raw_spin_lock(&gsnedf_lock); |
1256 | 1243 | ||
1257 | success = __increase_priority_inheritance(t, prio_inh); | 1244 | success = __increase_priority_inheritance(t, prio_inh); |
@@ -1267,7 +1254,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1267 | #endif | 1254 | #endif |
1268 | 1255 | ||
1269 | raw_spin_unlock(&gsnedf_lock); | 1256 | raw_spin_unlock(&gsnedf_lock); |
1270 | 1257 | ||
1271 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1258 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
1272 | if(tsk_rt(t)->held_gpus) { | 1259 | if(tsk_rt(t)->held_gpus) { |
1273 | int i; | 1260 | int i; |
@@ -1286,7 +1273,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1286 | struct task_struct* prio_inh) | 1273 | struct task_struct* prio_inh) |
1287 | { | 1274 | { |
1288 | int success = 1; | 1275 | int success = 1; |
1289 | 1276 | ||
1290 | if (prio_inh == tsk_rt(t)->inh_task) { | 1277 | if (prio_inh == tsk_rt(t)->inh_task) { |
1291 | /* relationship already established. */ | 1278 | /* relationship already established. */ |
1292 | TRACE_TASK(t, "already inherits priority from %s/%d\n", | 1279 | TRACE_TASK(t, "already inherits priority from %s/%d\n", |
@@ -1294,7 +1281,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1294 | (prio_inh) ? prio_inh->pid : 0); | 1281 | (prio_inh) ? prio_inh->pid : 0); |
1295 | goto out; | 1282 | goto out; |
1296 | } | 1283 | } |
1297 | 1284 | ||
1298 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1285 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1299 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 1286 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
1300 | #endif | 1287 | #endif |
@@ -1331,7 +1318,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1331 | } | 1318 | } |
1332 | raw_spin_unlock(&gsnedf.release_lock); | 1319 | raw_spin_unlock(&gsnedf.release_lock); |
1333 | } | 1320 | } |
1334 | 1321 | ||
1335 | /* propagate to aux tasks */ | 1322 | /* propagate to aux tasks */ |
1336 | if (tsk_rt(t)->has_aux_tasks) { | 1323 | if (tsk_rt(t)->has_aux_tasks) { |
1337 | aux_task_owner_decrease_priority(t); | 1324 | aux_task_owner_decrease_priority(t); |
@@ -1349,7 +1336,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1349 | success = 0; | 1336 | success = 0; |
1350 | } | 1337 | } |
1351 | #endif | 1338 | #endif |
1352 | 1339 | ||
1353 | out: | 1340 | out: |
1354 | return success; | 1341 | return success; |
1355 | } | 1342 | } |
@@ -1358,11 +1345,11 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1358 | struct task_struct* prio_inh) | 1345 | struct task_struct* prio_inh) |
1359 | { | 1346 | { |
1360 | int success; | 1347 | int success; |
1361 | 1348 | ||
1362 | raw_spin_lock(&gsnedf_lock); | 1349 | raw_spin_lock(&gsnedf_lock); |
1363 | 1350 | ||
1364 | success = __decrease_priority_inheritance(t, prio_inh); | 1351 | success = __decrease_priority_inheritance(t, prio_inh); |
1365 | 1352 | ||
1366 | #ifdef CONFIG_LITMUS_SOFTIRQD | 1353 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1367 | if(tsk_rt(t)->cur_klitirqd != NULL) | 1354 | if(tsk_rt(t)->cur_klitirqd != NULL) |
1368 | { | 1355 | { |
@@ -1374,7 +1361,7 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1374 | #endif | 1361 | #endif |
1375 | 1362 | ||
1376 | raw_spin_unlock(&gsnedf_lock); | 1363 | raw_spin_unlock(&gsnedf_lock); |
1377 | 1364 | ||
1378 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1365 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
1379 | if(tsk_rt(t)->held_gpus) { | 1366 | if(tsk_rt(t)->held_gpus) { |
1380 | int i; | 1367 | int i; |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 9a6fe487718e..62854b576796 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -177,8 +177,10 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | |||
177 | litmus->task_wake_up(p); | 177 | litmus->task_wake_up(p); |
178 | 178 | ||
179 | rq->litmus.nr_running++; | 179 | rq->litmus.nr_running++; |
180 | } else | 180 | } else { |
181 | TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); | 181 | TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); |
182 | //WARN_ON(1); | ||
183 | } | ||
182 | } | 184 | } |
183 | 185 | ||
184 | static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, | 186 | static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d24c9167cff8..f9423861eb1f 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -31,11 +31,19 @@ void preempt_if_preemptable(struct task_struct* t, int cpu) | |||
31 | 31 | ||
32 | int reschedule = 0; | 32 | int reschedule = 0; |
33 | 33 | ||
34 | if (!t) | 34 | TRACE_CUR("preempt_if_preemptable: %s/%d\n", |
35 | (t) ? t->comm : "(nil)", | ||
36 | (t) ? t->pid : 0); | ||
37 | |||
38 | if (!t) { | ||
39 | TRACE_CUR("unconditionally reshcedule\n"); | ||
35 | /* move non-real-time task out of the way */ | 40 | /* move non-real-time task out of the way */ |
36 | reschedule = 1; | 41 | reschedule = 1; |
42 | } | ||
37 | else { | 43 | else { |
38 | if (smp_processor_id() == cpu) { | 44 | if (smp_processor_id() == cpu) { |
45 | TRACE_CUR("preempt local cpu.\n"); | ||
46 | |||
39 | /* local CPU case */ | 47 | /* local CPU case */ |
40 | /* check if we need to poke userspace */ | 48 | /* check if we need to poke userspace */ |
41 | if (is_user_np(t)) | 49 | if (is_user_np(t)) |
@@ -47,14 +55,22 @@ void preempt_if_preemptable(struct task_struct* t, int cpu) | |||
47 | * currently-executing task */ | 55 | * currently-executing task */ |
48 | reschedule = 1; | 56 | reschedule = 1; |
49 | } else { | 57 | } else { |
58 | int is_knp = is_kernel_np(t); | ||
59 | int reqexit = request_exit_np_atomic(t); | ||
60 | TRACE_CUR("preempt remote cpu: isknp = %d reqexit = %d\n", is_knp, reqexit); | ||
61 | |||
50 | /* Remote CPU case. Only notify if it's not a kernel | 62 | /* Remote CPU case. Only notify if it's not a kernel |
51 | * NP section and if we didn't set the userspace | 63 | * NP section and if we didn't set the userspace |
52 | * flag. */ | 64 | * flag. */ |
53 | reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t)); | 65 | //reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t)); |
66 | reschedule = !(is_knp || reqexit); | ||
54 | } | 67 | } |
55 | } | 68 | } |
56 | if (likely(reschedule)) | 69 | |
70 | if (likely(reschedule)) { | ||
71 | TRACE_CUR("calling litmus_reschedule()\n"); | ||
57 | litmus_reschedule(cpu); | 72 | litmus_reschedule(cpu); |
73 | } | ||
58 | } | 74 | } |
59 | 75 | ||
60 | 76 | ||