diff options
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r-- | litmus/sched_mc.c | 402 |
1 files changed, 246 insertions, 156 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index afba6e44716f..7b74958d1f4f 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -22,67 +22,64 @@ | |||
22 | #include <litmus/sched_trace.h> | 22 | #include <litmus/sched_trace.h> |
23 | #include <litmus/domain.h> | 23 | #include <litmus/domain.h> |
24 | #include <litmus/bheap.h> | 24 | #include <litmus/bheap.h> |
25 | #include <litmus/event_group.h> | ||
25 | 26 | ||
26 | #include <litmus/sched_mc.h> | 27 | #include <litmus/sched_mc.h> |
27 | #include <litmus/ce_domain.h> | 28 | #include <litmus/ce_domain.h> |
28 | 29 | ||
29 | /** | 30 | /** |
30 | * cpu_entry_t - State of a CPU for the entire MC system | 31 | * struct cpu_entry - State of a CPU for the entire MC system |
31 | * @cpu CPU id | 32 | * @cpu CPU id |
32 | * @scheduled Task that is physically running | 33 | * @scheduled Task that is physically running |
33 | * @linked Task that should be running / is logically running | 34 | * @linked Task that should be running / is logically running |
34 | * @lock For serialization | 35 | * @lock For serialization |
35 | * @crit_entries Array of CPU state per criticality level | 36 | * @crit_entries Array of CPU state per criticality level |
36 | */ | 37 | */ |
37 | typedef struct { | 38 | struct cpu_entry { |
38 | int cpu; | 39 | int cpu; |
39 | struct task_struct* scheduled; | 40 | struct task_struct* scheduled; |
40 | struct task_struct* linked; | 41 | struct task_struct* linked; |
41 | raw_spinlock_t lock; | 42 | raw_spinlock_t lock; |
42 | crit_entry_t crit_entries[NUM_CRIT_LEVELS]; | 43 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; |
43 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 44 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
44 | struct list_head redir; | 45 | struct list_head redir; |
45 | raw_spinlock_t redir_lock; | 46 | raw_spinlock_t redir_lock; |
46 | #endif | 47 | #endif |
47 | } cpu_entry_t; | 48 | #ifdef CONFIG_MERGE_TIMERS |
49 | struct event_group* event_group; | ||
50 | #endif | ||
51 | }; | ||
48 | 52 | ||
49 | static cpu_entry_t* cpus[NR_CPUS]; | 53 | DEFINE_PER_CPU(struct cpu_entry, cpus); |
50 | #ifdef CONFIG_RELEASE_MASTER | 54 | #ifdef CONFIG_RELEASE_MASTER |
51 | static int interrupt_cpu; | 55 | static int interrupt_cpu; |
52 | #endif | 56 | #endif |
57 | #ifdef CONFIG_MERGE_TIMERS | ||
58 | static struct event_group* global_group; | ||
59 | #endif | ||
53 | 60 | ||
54 | #define domain_data(dom) (container_of(dom, domain_data_t, domain)) | 61 | #define domain_data(dom) (container_of(dom, struct domain_data, domain)) |
55 | #define is_global(dom) (domain_data(dom)->heap) | 62 | #define is_global(dom) (domain_data(dom)->heap) |
56 | #define is_global_task(t) (is_global(get_task_domain(t))) | 63 | #define is_global_task(t) (is_global(get_task_domain(t))) |
57 | #define is_in_list(t) (tsk_rt(t)->list.next != tsk_rt(t)->list) | ||
58 | #define can_requeue(t) \ | 64 | #define can_requeue(t) \ |
59 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU) | 65 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU) |
60 | #define entry_level(e) \ | 66 | #define entry_level(e) \ |
61 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) | 67 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) |
62 | #define crit_cpu(ce) \ | 68 | #define crit_cpu(ce) \ |
63 | (container_of((void*)((ce) - (ce)->level), cpu_entry_t, crit_entries)) | 69 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) |
64 | /* Useful debug macros */ | ||
65 | #define TS "(%s/%d:%d:%s)" | ||
66 | #define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \ | ||
67 | (t) ? t->rt_param.job_params.job_no : 1, \ | ||
68 | (t && get_task_domain(t)) ? get_task_domain(t)->name : "" | ||
69 | #define TRACE_ENTRY(e, fmt, args...) \ | 70 | #define TRACE_ENTRY(e, fmt, args...) \ |
70 | TRACE("P%d, linked=" TS " " fmt "\n", \ | 71 | STRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args) |
71 | e->cpu, TA(e->linked), ##args) | ||
72 | #define TRACE_CRIT_ENTRY(ce, fmt, args...) \ | 72 | #define TRACE_CRIT_ENTRY(ce, fmt, args...) \ |
73 | TRACE("%s P%d, linked=" TS " " fmt "\n", \ | 73 | STRACE("%s P%d, linked=" TS " " fmt "\n", \ |
74 | (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args) | 74 | (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args) |
75 | #undef TRACE_TASK | ||
76 | #define TRACE_TASK(t, fmt, args...) \ | ||
77 | TRACE(TS " " fmt "\n", TA(t), ##args) | ||
78 | 75 | ||
79 | /* | 76 | /* |
80 | * Sort CPUs within a global domain by the domain's priority function. | 77 | * Sort CPUs within a global domain by the domain's priority function. |
81 | */ | 78 | */ |
82 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | 79 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) |
83 | { | 80 | { |
84 | domain_t *domain; | 81 | struct domain *domain; |
85 | crit_entry_t *first, *second; | 82 | struct crit_entry *first, *second; |
86 | struct task_struct *first_link, *second_link; | 83 | struct task_struct *first_link, *second_link; |
87 | 84 | ||
88 | first = a->value; | 85 | first = a->value; |
@@ -104,7 +101,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | |||
104 | * Return true if the domain has a higher priority ready task. The curr | 101 | * Return true if the domain has a higher priority ready task. The curr |
105 | * task must belong to the domain. | 102 | * task must belong to the domain. |
106 | */ | 103 | */ |
107 | noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) | 104 | static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) |
108 | { | 105 | { |
109 | struct task_struct *next = dom->peek_ready(dom); | 106 | struct task_struct *next = dom->peek_ready(dom); |
110 | if (!next || !curr) { | 107 | if (!next || !curr) { |
@@ -119,15 +116,43 @@ noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) | |||
119 | * Return next CPU which should preempted or NULL if the domain has no | 116 | * Return next CPU which should preempted or NULL if the domain has no |
120 | * preemptable CPUs. | 117 | * preemptable CPUs. |
121 | */ | 118 | */ |
122 | static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) | 119 | static inline struct crit_entry* lowest_prio_cpu(struct domain *dom) |
123 | { | 120 | { |
124 | struct bheap *heap = domain_data(dom)->heap; | 121 | struct bheap *heap = domain_data(dom)->heap; |
125 | struct bheap_node* hn = bheap_peek(cpu_lower_prio, heap); | 122 | struct bheap_node* hn = bheap_peek(cpu_lower_prio, heap); |
126 | return (hn) ? hn->value : NULL; | 123 | return (hn) ? hn->value : NULL; |
127 | } | 124 | } |
128 | 125 | ||
129 | /** | 126 | /* |
130 | * update_ghost_time() - Time accounting for ghost tasks. | 127 | * Cancel ghost timer. |
128 | */ | ||
129 | static inline void cancel_ghost(struct crit_entry *ce) | ||
130 | { | ||
131 | #ifdef CONFIG_MERGE_TIMERS | ||
132 | cancel_event(crit_cpu(ce)->event_group, &ce->event); | ||
133 | #else | ||
134 | hrtimer_try_to_cancel(&ce->timer); | ||
135 | #endif | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Arm ghost timer. Will merge timers if the option is specified. | ||
140 | */ | ||
141 | static inline void arm_ghost(struct crit_entry *ce, lt_t fire) | ||
142 | { | ||
143 | #ifdef CONFIG_MERGE_TIMERS | ||
144 | add_event(crit_cpu(ce)->event_group, &ce->event, fire); | ||
145 | #else | ||
146 | __hrtimer_start_range_ns(&ce->timer, | ||
147 | ns_to_ktime(when_to_fire), | ||
148 | 0 /* delta */, | ||
149 | HRTIMER_MODE_ABS_PINNED, | ||
150 | 0 /* no wakeup */); | ||
151 | #endif | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Time accounting for ghost tasks. | ||
131 | * Must be called before a decision is made involving the task's budget. | 156 | * Must be called before a decision is made involving the task's budget. |
132 | */ | 157 | */ |
133 | static void update_ghost_time(struct task_struct *p) | 158 | static void update_ghost_time(struct task_struct *p) |
@@ -137,14 +162,14 @@ static void update_ghost_time(struct task_struct *p) | |||
137 | BUG_ON(!is_ghost(p)); | 162 | BUG_ON(!is_ghost(p)); |
138 | if (unlikely ((s64)delta < 0)) { | 163 | if (unlikely ((s64)delta < 0)) { |
139 | delta = 0; | 164 | delta = 0; |
140 | TRACE_TASK(p, "WARNING: negative time delta"); | 165 | TRACE_MC_TASK(p, "WARNING: negative time delta"); |
141 | } | 166 | } |
142 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { | 167 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { |
143 | TRACE_TASK(p, "Ghost job could have ended"); | 168 | TRACE_MC_TASK(p, "Ghost job could have ended"); |
144 | tsk_mc_data(p)->mc_job.ghost_budget = 0; | 169 | tsk_mc_data(p)->mc_job.ghost_budget = 0; |
145 | p->se.exec_start = clock; | 170 | p->se.exec_start = clock; |
146 | } else { | 171 | } else { |
147 | TRACE_TASK(p, "Ghost job updated, but didn't finish"); | 172 | TRACE_MC_TASK(p, "Ghost job updated, but didn't finish"); |
148 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; | 173 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; |
149 | p->se.exec_start = clock; | 174 | p->se.exec_start = clock; |
150 | } | 175 | } |
@@ -154,7 +179,7 @@ static void update_ghost_time(struct task_struct *p) | |||
154 | * link_task_to_crit() - Logically run a task at a criticality level. | 179 | * link_task_to_crit() - Logically run a task at a criticality level. |
155 | * Caller must hold @ce's domain's lock. | 180 | * Caller must hold @ce's domain's lock. |
156 | */ | 181 | */ |
157 | static void link_task_to_crit(crit_entry_t *ce, | 182 | static void link_task_to_crit(struct crit_entry *ce, |
158 | struct task_struct *task) | 183 | struct task_struct *task) |
159 | { | 184 | { |
160 | lt_t when_to_fire; | 185 | lt_t when_to_fire; |
@@ -168,10 +193,10 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
168 | 193 | ||
169 | /* Unlink last task */ | 194 | /* Unlink last task */ |
170 | if (ce->linked) { | 195 | if (ce->linked) { |
171 | TRACE_TASK(ce->linked, "Unlinking"); | 196 | TRACE_MC_TASK(ce->linked, "Unlinking"); |
172 | ce->linked->rt_param.linked_on = NO_CPU; | 197 | ce->linked->rt_param.linked_on = NO_CPU; |
173 | if (is_ghost(ce->linked)) { | 198 | if (is_ghost(ce->linked)) { |
174 | hrtimer_try_to_cancel(&ce->timer); | 199 | cancel_ghost(ce); |
175 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) { | 200 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) { |
176 | /* Job isn't finished, so do accounting */ | 201 | /* Job isn't finished, so do accounting */ |
177 | update_ghost_time(ce->linked); | 202 | update_ghost_time(ce->linked); |
@@ -192,11 +217,7 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
192 | task->se.exec_start = litmus_clock(); | 217 | task->se.exec_start = litmus_clock(); |
193 | when_to_fire = task->se.exec_start + | 218 | when_to_fire = task->se.exec_start + |
194 | tsk_mc_data(task)->mc_job.ghost_budget; | 219 | tsk_mc_data(task)->mc_job.ghost_budget; |
195 | __hrtimer_start_range_ns(&ce->timer, | 220 | arm_ghost(ce, when_to_fire); |
196 | ns_to_ktime(when_to_fire), | ||
197 | 0 /* delta */, | ||
198 | HRTIMER_MODE_ABS_PINNED, | ||
199 | 0 /* no wakeup */); | ||
200 | } | 221 | } |
201 | } | 222 | } |
202 | 223 | ||
@@ -208,21 +229,23 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
208 | } | 229 | } |
209 | } | 230 | } |
210 | 231 | ||
211 | void mc_check_for_preempt(domain_t*); | 232 | static void check_for_preempt(struct domain*); |
233 | |||
212 | /** | 234 | /** |
213 | * job_arrival() - Called when a task re-enters the system. | 235 | * job_arrival() - Called when a task re-enters the system. |
214 | * Caller must hold no locks. | 236 | * Caller must hold no locks. |
215 | */ | 237 | */ |
216 | static void job_arrival(struct task_struct *task) | 238 | static void job_arrival(struct task_struct *task) |
217 | { | 239 | { |
218 | domain_t *dom = get_task_domain(task); | 240 | struct domain *dom = get_task_domain(task); |
219 | 241 | ||
220 | TRACE_TASK(task, "Job arriving"); | 242 | TRACE_MC_TASK(task, "Job arriving"); |
221 | BUG_ON(!task); | 243 | BUG_ON(!task); |
244 | |||
222 | if (can_requeue(task)) { | 245 | if (can_requeue(task)) { |
223 | raw_spin_lock(dom->lock); | 246 | raw_spin_lock(dom->lock); |
224 | dom->requeue(dom, task); | 247 | dom->requeue(dom, task); |
225 | mc_check_for_preempt(dom); | 248 | check_for_preempt(dom); |
226 | raw_spin_unlock(dom->lock); | 249 | raw_spin_unlock(dom->lock); |
227 | } else { | 250 | } else { |
228 | /* If a global task is scheduled on one cpu, it CANNOT | 251 | /* If a global task is scheduled on one cpu, it CANNOT |
@@ -231,7 +254,7 @@ static void job_arrival(struct task_struct *task) | |||
231 | * causing the system to crash when the task is scheduled | 254 | * causing the system to crash when the task is scheduled |
232 | * in two places simultaneously. | 255 | * in two places simultaneously. |
233 | */ | 256 | */ |
234 | TRACE_TASK(task, "Delayed arrival of scheduled task"); | 257 | TRACE_MC_TASK(task, "Delayed arrival of scheduled task"); |
235 | } | 258 | } |
236 | } | 259 | } |
237 | 260 | ||
@@ -241,7 +264,7 @@ static void job_arrival(struct task_struct *task) | |||
241 | */ | 264 | */ |
242 | static void low_prio_arrival(struct task_struct *task) | 265 | static void low_prio_arrival(struct task_struct *task) |
243 | { | 266 | { |
244 | cpu_entry_t *entry; | 267 | struct cpu_entry *entry; |
245 | 268 | ||
246 | /* Race conditions! */ | 269 | /* Race conditions! */ |
247 | if (!can_requeue(task)) return; | 270 | if (!can_requeue(task)) return; |
@@ -252,9 +275,9 @@ static void low_prio_arrival(struct task_struct *task) | |||
252 | goto arrive; | 275 | goto arrive; |
253 | #endif | 276 | #endif |
254 | if (smp_processor_id() != interrupt_cpu) { | 277 | if (smp_processor_id() != interrupt_cpu) { |
255 | entry = cpus[smp_processor_id()]; | 278 | entry = &__get_cpu_var(cpus); |
256 | raw_spin_lock(&entry->redir_lock); | 279 | raw_spin_lock(&entry->redir_lock); |
257 | TRACE_TASK(task, "Adding to redirect queue"); | 280 | TRACE_MC_TASK(task, "Adding to redirect queue"); |
258 | list_add(&tsk_rt(task)->list, &entry->redir); | 281 | list_add(&tsk_rt(task)->list, &entry->redir); |
259 | raw_spin_unlock(&entry->redir_lock); | 282 | raw_spin_unlock(&entry->redir_lock); |
260 | litmus_reschedule(interrupt_cpu); | 283 | litmus_reschedule(interrupt_cpu); |
@@ -273,18 +296,18 @@ static void low_prio_arrival(struct task_struct *task) | |||
273 | static void fix_global_levels(void) | 296 | static void fix_global_levels(void) |
274 | { | 297 | { |
275 | int c; | 298 | int c; |
276 | cpu_entry_t *e; | 299 | struct cpu_entry *e; |
277 | struct list_head *pos, *safe; | 300 | struct list_head *pos, *safe; |
278 | struct task_struct *t; | 301 | struct task_struct *t; |
279 | 302 | ||
280 | TRACE("Fixing global levels\n"); | 303 | STRACE("Fixing global levels"); |
281 | for_each_online_cpu(c) { | 304 | for_each_online_cpu(c) { |
282 | e = cpus[c]; | 305 | e = &per_cpu(cpus, c); |
283 | raw_spin_lock(&e->redir_lock); | 306 | raw_spin_lock(&e->redir_lock); |
284 | list_for_each_safe(pos, safe, &e->redir) { | 307 | list_for_each_safe(pos, safe, &e->redir) { |
285 | t = list_entry(pos, struct task_struct, rt_param.list); | 308 | t = list_entry(pos, struct task_struct, rt_param.list); |
286 | TRACE_TASK(t, "Dequeued redirected job"); | 309 | BUG_ON(!t); |
287 | BUG_ON(is_queued(t)); | 310 | TRACE_MC_TASK(t, "Dequeued redirected job"); |
288 | list_del_init(pos); | 311 | list_del_init(pos); |
289 | job_arrival(t); | 312 | job_arrival(t); |
290 | } | 313 | } |
@@ -298,10 +321,10 @@ static void fix_global_levels(void) | |||
298 | * The task must first have been linked to one of the CPU's crit_entries. | 321 | * The task must first have been linked to one of the CPU's crit_entries. |
299 | * Caller must hold the entry lock. | 322 | * Caller must hold the entry lock. |
300 | */ | 323 | */ |
301 | static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) | 324 | static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) |
302 | { | 325 | { |
303 | int i = entry_level(entry); | 326 | int i = entry_level(entry); |
304 | TRACE_TASK(task, "Linking to P%d", entry->cpu); | 327 | TRACE_MC_TASK(task, "Linking to P%d", entry->cpu); |
305 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); | 328 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); |
306 | BUG_ON(task && is_ghost(task)); | 329 | BUG_ON(task && is_ghost(task)); |
307 | 330 | ||
@@ -322,10 +345,10 @@ static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) | |||
322 | * | 345 | * |
323 | * Caller must hold the lock for @dom and @ce's CPU lock. | 346 | * Caller must hold the lock for @dom and @ce's CPU lock. |
324 | */ | 347 | */ |
325 | static void preempt(domain_t *dom, crit_entry_t *ce) | 348 | static void preempt(struct domain *dom, struct crit_entry *ce) |
326 | { | 349 | { |
327 | struct task_struct *task = dom->take_ready(dom); | 350 | struct task_struct *task = dom->take_ready(dom); |
328 | cpu_entry_t *entry = crit_cpu(ce); | 351 | struct cpu_entry *entry = crit_cpu(ce); |
329 | 352 | ||
330 | BUG_ON(!task); | 353 | BUG_ON(!task); |
331 | TRACE_CRIT_ENTRY(ce, "Preempted by " TS, TA(task)); | 354 | TRACE_CRIT_ENTRY(ce, "Preempted by " TS, TA(task)); |
@@ -347,17 +370,25 @@ static void preempt(domain_t *dom, crit_entry_t *ce) | |||
347 | * This should be called after a new task has been linked to @entry. | 370 | * This should be called after a new task has been linked to @entry. |
348 | * The caller must hold the @entry->lock, but this method will release it. | 371 | * The caller must hold the @entry->lock, but this method will release it. |
349 | */ | 372 | */ |
350 | static void update_crit_levels(cpu_entry_t *entry) | 373 | static void update_crit_levels(struct cpu_entry *entry) |
351 | { | 374 | { |
352 | int i; | 375 | int i, global_preempted; |
353 | crit_entry_t *ce; | 376 | struct crit_entry *ce; |
354 | struct task_struct *tasks[NUM_CRIT_LEVELS]; | 377 | struct task_struct *readmit[NUM_CRIT_LEVELS]; |
355 | enum crit_level level = entry_level(entry); | 378 | enum crit_level level = entry_level(entry); |
356 | 379 | ||
357 | /* Remove lower priority tasks from the entry */ | 380 | /* Remove lower priority tasks from the entry */ |
358 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { | 381 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { |
359 | ce = &entry->crit_entries[i]; | 382 | ce = &entry->crit_entries[i]; |
360 | tasks[i] = ce->linked; | 383 | |
384 | global_preempted = entry->scheduled == ce->linked && | ||
385 | ce->linked && entry->linked && | ||
386 | !is_ghost(ce->linked) && is_global(ce->domain); | ||
387 | /* Do not readmit global tasks which are preempted! These can't | ||
388 | * ever be re-admitted until they are descheduled for reasons | ||
389 | * explained in job_arrival. | ||
390 | */ | ||
391 | readmit[i] = (!global_preempted) ? ce->linked : NULL; | ||
361 | ce->usable = 0; | 392 | ce->usable = 0; |
362 | if (ce->linked) | 393 | if (ce->linked) |
363 | link_task_to_crit(ce, NULL); | 394 | link_task_to_crit(ce, NULL); |
@@ -368,25 +399,24 @@ static void update_crit_levels(cpu_entry_t *entry) | |||
368 | /* Re-admit tasks to the system */ | 399 | /* Re-admit tasks to the system */ |
369 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { | 400 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { |
370 | ce = &entry->crit_entries[i]; | 401 | ce = &entry->crit_entries[i]; |
371 | if (tasks[i]) | 402 | if (readmit[i]) |
372 | low_prio_arrival(tasks[i]); | 403 | low_prio_arrival(readmit[i]); |
373 | } | 404 | } |
374 | } | 405 | } |
375 | 406 | ||
376 | /** | 407 | /** |
377 | * mc_check_for_preempt() - Causes a preemption if higher-priority tasks are ready. | 408 | * check_for_preempt() - Causes a preemption if higher-priority tasks are ready. |
378 | * Caller must hold domain lock. | 409 | * Caller must hold domain lock. |
379 | * Makes gigantic nasty assumption that there is 1 global criticality level, | 410 | * Makes gigantic nasty assumption that there is 1 global criticality level, |
380 | * and it is the last one in each list, so it doesn't call update_crit.. | 411 | * and it is the last one in each list, so it doesn't call update_crit.. |
381 | */ | 412 | */ |
382 | void mc_check_for_preempt(domain_t *dom) | 413 | static void check_for_preempt(struct domain *dom) |
383 | { | 414 | { |
384 | int preempted = 1; | 415 | int preempted = 1; |
385 | cpu_entry_t *entry; | 416 | struct cpu_entry *entry; |
386 | crit_entry_t *ce; | 417 | struct crit_entry *ce; |
387 | 418 | ||
388 | if (is_global(dom)) { | 419 | if (is_global(dom)) { |
389 | TRACE("domain: %s is global\n", dom->name); | ||
390 | /* Loop until we find a non-preemptable CPU */ | 420 | /* Loop until we find a non-preemptable CPU */ |
391 | while ((ce = lowest_prio_cpu(dom)) && preempted) { | 421 | while ((ce = lowest_prio_cpu(dom)) && preempted) { |
392 | entry = crit_cpu(ce); | 422 | entry = crit_cpu(ce); |
@@ -399,7 +429,6 @@ void mc_check_for_preempt(domain_t *dom) | |||
399 | raw_spin_unlock(&entry->lock); | 429 | raw_spin_unlock(&entry->lock); |
400 | } | 430 | } |
401 | } else /* Partitioned */ { | 431 | } else /* Partitioned */ { |
402 | TRACE("domain: %s is partitioned\n", dom->name); | ||
403 | ce = domain_data(dom)->crit_entry; | 432 | ce = domain_data(dom)->crit_entry; |
404 | entry = crit_cpu(ce); | 433 | entry = crit_cpu(ce); |
405 | raw_spin_lock(&entry->lock); | 434 | raw_spin_lock(&entry->lock); |
@@ -407,7 +436,6 @@ void mc_check_for_preempt(domain_t *dom) | |||
407 | preempt(dom, ce); | 436 | preempt(dom, ce); |
408 | update_crit_levels(entry); | 437 | update_crit_levels(entry); |
409 | } else { | 438 | } else { |
410 | TRACE("domain: %s NOT preempting\n", dom->name); | ||
411 | raw_spin_unlock(&entry->lock); | 439 | raw_spin_unlock(&entry->lock); |
412 | } | 440 | } |
413 | } | 441 | } |
@@ -420,17 +448,17 @@ void mc_check_for_preempt(domain_t *dom) | |||
420 | static void remove_from_all(struct task_struct* task) | 448 | static void remove_from_all(struct task_struct* task) |
421 | { | 449 | { |
422 | int update = 0; | 450 | int update = 0; |
423 | cpu_entry_t *entry; | 451 | struct cpu_entry *entry; |
424 | crit_entry_t *ce; | 452 | struct crit_entry *ce; |
425 | domain_t *dom = get_task_domain(task); | 453 | struct domain *dom = get_task_domain(task); |
426 | 454 | ||
427 | TRACE_TASK(task, "Removing from everything"); | 455 | TRACE_MC_TASK(task, "Removing from everything"); |
428 | BUG_ON(!task); | 456 | BUG_ON(!task); |
429 | 457 | ||
430 | raw_spin_lock(dom->lock); | 458 | raw_spin_lock(dom->lock); |
431 | 459 | ||
432 | if (task->rt_param.linked_on != NO_CPU) { | 460 | if (task->rt_param.linked_on != NO_CPU) { |
433 | entry = cpus[task->rt_param.linked_on]; | 461 | entry = &per_cpu(cpus, task->rt_param.linked_on); |
434 | raw_spin_lock(&entry->lock); | 462 | raw_spin_lock(&entry->lock); |
435 | 463 | ||
436 | /* Unlink only if task is still linked post lock */ | 464 | /* Unlink only if task is still linked post lock */ |
@@ -469,7 +497,8 @@ static void remove_from_all(struct task_struct* task) | |||
469 | */ | 497 | */ |
470 | static void job_completion(struct task_struct *task, int forced) | 498 | static void job_completion(struct task_struct *task, int forced) |
471 | { | 499 | { |
472 | TRACE_TASK(task, "Completed"); | 500 | lt_t now; |
501 | TRACE_MC_TASK(task, "Completed"); | ||
473 | sched_trace_task_completion(task, forced); | 502 | sched_trace_task_completion(task, forced); |
474 | BUG_ON(!task); | 503 | BUG_ON(!task); |
475 | 504 | ||
@@ -477,9 +506,11 @@ static void job_completion(struct task_struct *task, int forced) | |||
477 | set_rt_flags(task, RT_F_SLEEP); | 506 | set_rt_flags(task, RT_F_SLEEP); |
478 | remove_from_all(task); | 507 | remove_from_all(task); |
479 | 508 | ||
509 | now = litmus_clock(); | ||
510 | |||
480 | /* If it's not a ghost job, do ghost job conversion */ | 511 | /* If it's not a ghost job, do ghost job conversion */ |
481 | if (!is_ghost(task)) { | 512 | if (!is_ghost(task)) { |
482 | TRACE_TASK(task, "is not a ghost task\n"); | 513 | TRACE_MC_TASK(task, "is not a ghost task"); |
483 | tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); | 514 | tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); |
484 | tsk_mc_data(task)->mc_job.is_ghost = 1; | 515 | tsk_mc_data(task)->mc_job.is_ghost = 1; |
485 | } | 516 | } |
@@ -489,7 +520,7 @@ static void job_completion(struct task_struct *task, int forced) | |||
489 | * conversion. Revert back to a normal task and complete the period. | 520 | * conversion. Revert back to a normal task and complete the period. |
490 | */ | 521 | */ |
491 | if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { | 522 | if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { |
492 | TRACE_TASK(task, "has zero ghost budget\n"); | 523 | TRACE_MC_TASK(task, "has zero ghost budget"); |
493 | tsk_mc_data(task)->mc_job.is_ghost = 0; | 524 | tsk_mc_data(task)->mc_job.is_ghost = 0; |
494 | prepare_for_next_period(task); | 525 | prepare_for_next_period(task); |
495 | if (is_released(task, litmus_clock())) | 526 | if (is_released(task, litmus_clock())) |
@@ -504,14 +535,22 @@ static void job_completion(struct task_struct *task, int forced) | |||
504 | /** | 535 | /** |
505 | * mc_ghost_exhausted() - Complete logically running ghost task. | 536 | * mc_ghost_exhausted() - Complete logically running ghost task. |
506 | */ | 537 | */ |
538 | #ifdef CONFIG_MERGE_TIMERS | ||
539 | static void mc_ghost_exhausted(struct rt_event *e) | ||
540 | { | ||
541 | struct crit_entry *ce = container_of(e, struct crit_entry, event); | ||
542 | #else | ||
507 | static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | 543 | static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) |
508 | { | 544 | { |
545 | struct crit_entry *ce = container_of(timer, struct crit_entry, timer); | ||
546 | #endif | ||
547 | |||
509 | unsigned long flags; | 548 | unsigned long flags; |
510 | struct task_struct *tmp = NULL; | 549 | struct task_struct *tmp = NULL; |
511 | crit_entry_t *ce = container_of(timer, crit_entry_t, timer); | ||
512 | 550 | ||
513 | local_irq_save(flags); | 551 | local_irq_save(flags); |
514 | TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); | 552 | TRACE("Ghost exhausted\n"); |
553 | TRACE_CRIT_ENTRY(ce, "Firing here"); | ||
515 | 554 | ||
516 | /* Due to race conditions, we cannot just set the linked | 555 | /* Due to race conditions, we cannot just set the linked |
517 | * task's budget to 0 as it may no longer be the task | 556 | * task's budget to 0 as it may no longer be the task |
@@ -532,15 +571,17 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
532 | job_completion(tmp, 0); | 571 | job_completion(tmp, 0); |
533 | 572 | ||
534 | local_irq_restore(flags); | 573 | local_irq_restore(flags); |
574 | #ifndef CONFIG_MERGE_TIMERS | ||
535 | return HRTIMER_NORESTART; | 575 | return HRTIMER_NORESTART; |
576 | #endif | ||
536 | } | 577 | } |
537 | 578 | ||
538 | static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) | 579 | static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) |
539 | { | 580 | { |
540 | struct ce_dom_data *ce_data = | 581 | struct ce_dom_data *ce_data = |
541 | container_of(timer, struct ce_dom_data, timer); | 582 | container_of(timer, struct ce_dom_data, timer); |
542 | crit_entry_t *ce = &cpus[ce_data->cpu]->crit_entries[CRIT_LEVEL_A]; | 583 | struct crit_entry *ce = &per_cpu(cpus, ce_data->cpu).crit_entries[CRIT_LEVEL_A]; |
543 | domain_t *dom = ce->domain; | 584 | struct domain *dom = ce->domain; |
544 | struct task_struct *old_link = NULL; | 585 | struct task_struct *old_link = NULL; |
545 | unsigned long flags; | 586 | unsigned long flags; |
546 | 587 | ||
@@ -566,13 +607,13 @@ static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) | |||
566 | /* job completion will check for preemptions by means of calling job | 607 | /* job completion will check for preemptions by means of calling job |
567 | * arrival if the task is not blocked */ | 608 | * arrival if the task is not blocked */ |
568 | if (NULL != old_link) { | 609 | if (NULL != old_link) { |
569 | TRACE(" old_link " TS " so will call job completion\n", TA(old_link)); | 610 | STRACE("old_link " TS " so will call job completion\n", TA(old_link)); |
570 | raw_spin_unlock(dom->lock); | 611 | raw_spin_unlock(dom->lock); |
571 | job_completion(old_link, 0); | 612 | job_completion(old_link, 0); |
572 | } else { | 613 | } else { |
573 | TRACE(" old_link was null, so will call check for preempt\n"); | 614 | STRACE("old_link was null, so will call check for preempt\n"); |
574 | raw_spin_unlock(dom->lock); | 615 | raw_spin_unlock(dom->lock); |
575 | mc_check_for_preempt(dom); | 616 | check_for_preempt(dom); |
576 | } | 617 | } |
577 | 618 | ||
578 | local_irq_restore(flags); | 619 | local_irq_restore(flags); |
@@ -588,12 +629,12 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
588 | { | 629 | { |
589 | unsigned long flags; | 630 | unsigned long flags; |
590 | struct task_struct *first = bheap_peek(rt->order, tasks)->value; | 631 | struct task_struct *first = bheap_peek(rt->order, tasks)->value; |
591 | domain_t *dom = get_task_domain(first); | 632 | struct domain *dom = get_task_domain(first); |
592 | 633 | ||
593 | raw_spin_lock_irqsave(dom->lock, flags); | 634 | raw_spin_lock_irqsave(dom->lock, flags); |
594 | TRACE_TASK(first, "Jobs released"); | 635 | TRACE(TS "Jobs released\n", TA(first)); |
595 | __merge_ready(rt, tasks); | 636 | __merge_ready(rt, tasks); |
596 | mc_check_for_preempt(dom); | 637 | check_for_preempt(dom); |
597 | raw_spin_unlock_irqrestore(dom->lock, flags); | 638 | raw_spin_unlock_irqrestore(dom->lock, flags); |
598 | } | 639 | } |
599 | 640 | ||
@@ -604,7 +645,7 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
604 | static void mc_task_new(struct task_struct *t, int on_rq, int running) | 645 | static void mc_task_new(struct task_struct *t, int on_rq, int running) |
605 | { | 646 | { |
606 | unsigned long flags; | 647 | unsigned long flags; |
607 | cpu_entry_t* entry; | 648 | struct cpu_entry* entry; |
608 | enum crit_level level = tsk_mc_crit(t); | 649 | enum crit_level level = tsk_mc_crit(t); |
609 | 650 | ||
610 | local_irq_save(flags); | 651 | local_irq_save(flags); |
@@ -612,9 +653,9 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
612 | 653 | ||
613 | /* Assign domain */ | 654 | /* Assign domain */ |
614 | if (level < CRIT_LEVEL_C) | 655 | if (level < CRIT_LEVEL_C) |
615 | entry = cpus[get_partition(t)]; | 656 | entry = &per_cpu(cpus, get_partition(t)); |
616 | else | 657 | else |
617 | entry = cpus[task_cpu(t)]; | 658 | entry = &per_cpu(cpus, task_cpu(t)); |
618 | t->rt_param._domain = entry->crit_entries[level].domain; | 659 | t->rt_param._domain = entry->crit_entries[level].domain; |
619 | 660 | ||
620 | /* Setup job params */ | 661 | /* Setup job params */ |
@@ -644,7 +685,7 @@ static void mc_task_wake_up(struct task_struct *task) | |||
644 | lt_t now = litmus_clock(); | 685 | lt_t now = litmus_clock(); |
645 | local_irq_save(flags); | 686 | local_irq_save(flags); |
646 | 687 | ||
647 | TRACE_TASK(task, "Wakes up"); | 688 | TRACE(TS " wakes up\n", TA(task)); |
648 | if (is_tardy(task, now)) { | 689 | if (is_tardy(task, now)) { |
649 | /* Task missed its last release */ | 690 | /* Task missed its last release */ |
650 | release_at(task, now); | 691 | release_at(task, now); |
@@ -663,7 +704,7 @@ static void mc_task_block(struct task_struct *task) | |||
663 | { | 704 | { |
664 | unsigned long flags; | 705 | unsigned long flags; |
665 | local_irq_save(flags); | 706 | local_irq_save(flags); |
666 | TRACE_TASK(task, "Block at %llu", litmus_clock()); | 707 | TRACE(TS " blocks\n", TA(task)); |
667 | remove_from_all(task); | 708 | remove_from_all(task); |
668 | local_irq_restore(flags); | 709 | local_irq_restore(flags); |
669 | } | 710 | } |
@@ -676,11 +717,11 @@ static void mc_task_exit(struct task_struct *task) | |||
676 | unsigned long flags; | 717 | unsigned long flags; |
677 | local_irq_save(flags); | 718 | local_irq_save(flags); |
678 | BUG_ON(!is_realtime(task)); | 719 | BUG_ON(!is_realtime(task)); |
679 | TRACE_TASK(task, "RIP"); | 720 | TRACE(TS " RIP\n", TA(task)); |
680 | 721 | ||
681 | remove_from_all(task); | 722 | remove_from_all(task); |
682 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | 723 | if (tsk_rt(task)->scheduled_on != NO_CPU) { |
683 | cpus[tsk_rt(task)->scheduled_on]->scheduled = NULL; | 724 | per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL; |
684 | tsk_rt(task)->scheduled_on = NO_CPU; | 725 | tsk_rt(task)->scheduled_on = NO_CPU; |
685 | } | 726 | } |
686 | 727 | ||
@@ -728,9 +769,9 @@ out: | |||
728 | static struct task_struct* mc_schedule(struct task_struct * prev) | 769 | static struct task_struct* mc_schedule(struct task_struct * prev) |
729 | { | 770 | { |
730 | unsigned long flags; | 771 | unsigned long flags; |
731 | domain_t *dom; | 772 | struct domain *dom; |
732 | crit_entry_t *ce; | 773 | struct crit_entry *ce; |
733 | cpu_entry_t* entry = cpus[smp_processor_id()]; | 774 | struct cpu_entry* entry = &__get_cpu_var(cpus); |
734 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; | 775 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; |
735 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; | 776 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; |
736 | 777 | ||
@@ -742,6 +783,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
742 | BUG_ON(is_realtime(prev) && !entry->scheduled); | 783 | BUG_ON(is_realtime(prev) && !entry->scheduled); |
743 | 784 | ||
744 | /* Determine state */ | 785 | /* Determine state */ |
786 | raw_spin_lock(&entry->lock); | ||
745 | exists = entry->scheduled != NULL; | 787 | exists = entry->scheduled != NULL; |
746 | blocks = exists && !is_running(entry->scheduled); | 788 | blocks = exists && !is_running(entry->scheduled); |
747 | out_of_time = exists && budget_enforced(entry->scheduled) && | 789 | out_of_time = exists && budget_enforced(entry->scheduled) && |
@@ -754,24 +796,27 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
754 | 796 | ||
755 | if (exists) { | 797 | if (exists) { |
756 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 798 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
757 | TRACE_TASK(prev, | 799 | TRACE(TS |
758 | "blocks:%d out_of_time:%d sleep:%d preempt:%d " | 800 | " blocks:%d out_of_time:%d sleep:%d preempt:%d " |
759 | "state:%d sig:%d global:%d", | 801 | "state:%d sig:%d global:%d\n", TA(prev), |
760 | blocks, out_of_time, sleep, preempt, | 802 | blocks, out_of_time, sleep, preempt, |
761 | prev->state, signal_pending(prev), global); | 803 | prev->state, signal_pending(prev), global); |
762 | } | 804 | } |
805 | raw_spin_unlock(&entry->lock); | ||
806 | |||
763 | 807 | ||
764 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 808 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
765 | if (smp_processor_id() == interrupt_cpu) | 809 | if (smp_processor_id() == interrupt_cpu) |
766 | fix_global_levels(); | 810 | fix_global_levels(); |
767 | #endif | 811 | #endif |
812 | |||
768 | /* If a task blocks we have no choice but to reschedule */ | 813 | /* If a task blocks we have no choice but to reschedule */ |
769 | if (blocks) | 814 | if (blocks) |
770 | remove_from_all(entry->scheduled); | 815 | remove_from_all(entry->scheduled); |
771 | /* Any task which exhausts its budget or sleeps waiting for its next | 816 | /* Any task which exhausts its budget or sleeps waiting for its next |
772 | * period completes unless its execution has been forcibly stopped. | 817 | * period completes unless its execution has been forcibly stopped. |
773 | */ | 818 | */ |
774 | if ((out_of_time || sleep) && !blocks && !preempt) | 819 | if ((out_of_time || sleep) && !blocks)/* && !preempt)*/ |
775 | job_completion(entry->scheduled, !sleep); | 820 | job_completion(entry->scheduled, !sleep); |
776 | /* Global scheduled tasks must wait for a deschedule before they | 821 | /* Global scheduled tasks must wait for a deschedule before they |
777 | * can rejoin the global state. Rejoin them here. | 822 | * can rejoin the global state. Rejoin them here. |
@@ -824,40 +869,12 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
824 | raw_spin_unlock(&entry->lock); | 869 | raw_spin_unlock(&entry->lock); |
825 | local_irq_restore(flags); | 870 | local_irq_restore(flags); |
826 | if (next) | 871 | if (next) |
827 | TRACE_TASK(next, "Scheduled at %llu", litmus_clock()); | 872 | TRACE_MC_TASK(next, "Scheduled at %llu", litmus_clock()); |
828 | else if (exists && !next) | 873 | else if (exists && !next) |
829 | TRACE("Becomes idle at %llu\n", litmus_clock()); | 874 | TRACE_ENTRY(entry, "Becomes idle at %llu", litmus_clock()); |
830 | return next; | 875 | return next; |
831 | } | 876 | } |
832 | 877 | ||
833 | static long mc_activate_plugin(void) | ||
834 | { | ||
835 | domain_data_t *dom_data; | ||
836 | domain_t *dom; | ||
837 | domain_data_t *our_domains[NR_CPUS]; | ||
838 | int cpu, n = 0; | ||
839 | long ret; | ||
840 | |||
841 | #ifdef CONFIG_RELEASE_MASTER | ||
842 | interrupt_cpu = atomic_read(&release_master_cpu); | ||
843 | if (interrupt_cpu == NO_CPU) | ||
844 | interrupt_cpu = 0; | ||
845 | #endif | ||
846 | for_each_online_cpu(cpu) { | ||
847 | BUG_ON(NR_CPUS <= n); | ||
848 | dom = cpus[cpu]->crit_entries[CRIT_LEVEL_A].domain; | ||
849 | dom_data = domain_data(dom); | ||
850 | our_domains[cpu] = dom_data; | ||
851 | n++; | ||
852 | } | ||
853 | ret = mc_ce_set_domains(n, our_domains); | ||
854 | if (ret) | ||
855 | goto out; | ||
856 | ret = mc_ce_activate_plugin_common(); | ||
857 | out: | ||
858 | return ret; | ||
859 | } | ||
860 | |||
861 | /* | 878 | /* |
862 | * This is the plugin's release at function, called by the release task-set | 879 | * This is the plugin's release at function, called by the release task-set |
863 | * system call. Other places in the file use the generic LITMUS release_at(), | 880 | * system call. Other places in the file use the generic LITMUS release_at(), |
@@ -884,20 +901,57 @@ long mc_deactivate_plugin(void) | |||
884 | /* Initialize values here so that they are allocated with the module | 901 | /* Initialize values here so that they are allocated with the module |
885 | * and destroyed when the module is unloaded. | 902 | * and destroyed when the module is unloaded. |
886 | */ | 903 | */ |
887 | DEFINE_PER_CPU(cpu_entry_t, _mc_cpus); | 904 | |
888 | /* LVL-A */ | 905 | /* LVL-A */ |
889 | DEFINE_PER_CPU(domain_data_t, _mc_crit_a); | 906 | DEFINE_PER_CPU(struct domain_data, _mc_crit_a); |
890 | DEFINE_PER_CPU(raw_spinlock_t, _mc_crit_a_lock); | 907 | DEFINE_PER_CPU(raw_spinlock_t, _mc_crit_a_lock); |
891 | DEFINE_PER_CPU(struct ce_dom_data, _mc_crit_a_ce_data); | 908 | DEFINE_PER_CPU(struct ce_dom_data, _mc_crit_a_ce_data); |
892 | /* LVL-B */ | 909 | /* LVL-B */ |
893 | DEFINE_PER_CPU(domain_data_t, _mc_crit_b); | 910 | DEFINE_PER_CPU(struct domain_data, _mc_crit_b); |
894 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt); | 911 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt); |
895 | /* LVL-C */ | 912 | /* LVL-C */ |
896 | static domain_data_t _mc_crit_c; | 913 | static struct domain_data _mc_crit_c; |
897 | static rt_domain_t _mc_crit_c_rt; | 914 | static rt_domain_t _mc_crit_c_rt; |
898 | struct bheap _mc_heap_c; | 915 | struct bheap _mc_heap_c; |
899 | struct bheap_node _mc_nodes_c[NR_CPUS]; | 916 | struct bheap_node _mc_nodes_c[NR_CPUS]; |
900 | 917 | ||
918 | #ifdef CONFIG_MERGE_TIMERS | ||
919 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
920 | struct event_group _mc_group; | ||
921 | #else | ||
922 | DEFINE_PER_CPU(struct event_group, _mc_groups); | ||
923 | #endif | ||
924 | #endif | ||
925 | |||
926 | static long mc_activate_plugin(void) | ||
927 | { | ||
928 | struct domain_data *dom_data; | ||
929 | struct domain *dom; | ||
930 | struct domain_data *our_domains[NR_CPUS]; | ||
931 | int cpu, n = 0; | ||
932 | long ret; | ||
933 | |||
934 | #ifdef CONFIG_RELEASE_MASTER | ||
935 | interrupt_cpu = atomic_read(&release_master_cpu); | ||
936 | if (interrupt_cpu == NO_CPU) | ||
937 | interrupt_cpu = 0; | ||
938 | #endif | ||
939 | |||
940 | for_each_online_cpu(cpu) { | ||
941 | BUG_ON(NR_CPUS <= n); | ||
942 | dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; | ||
943 | dom_data = domain_data(dom); | ||
944 | our_domains[cpu] = dom_data; | ||
945 | n++; | ||
946 | } | ||
947 | ret = mc_ce_set_domains(n, our_domains); | ||
948 | if (ret) | ||
949 | goto out; | ||
950 | ret = mc_ce_activate_plugin_common(); | ||
951 | out: | ||
952 | return ret; | ||
953 | } | ||
954 | |||
901 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | 955 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { |
902 | .plugin_name = "MC", | 956 | .plugin_name = "MC", |
903 | .task_new = mc_task_new, | 957 | .task_new = mc_task_new, |
@@ -912,8 +966,8 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | |||
912 | .deactivate_plugin = mc_deactivate_plugin, | 966 | .deactivate_plugin = mc_deactivate_plugin, |
913 | }; | 967 | }; |
914 | 968 | ||
915 | static void init_crit_entry(crit_entry_t *ce, enum crit_level level, | 969 | static void init_crit_entry(struct crit_entry *ce, enum crit_level level, |
916 | domain_data_t *dom_data, | 970 | struct domain_data *dom_data, |
917 | struct bheap_node *node) | 971 | struct bheap_node *node) |
918 | { | 972 | { |
919 | ce->level = level; | 973 | ce->level = level; |
@@ -921,12 +975,17 @@ static void init_crit_entry(crit_entry_t *ce, enum crit_level level, | |||
921 | ce->node = node; | 975 | ce->node = node; |
922 | ce->domain = &dom_data->domain; | 976 | ce->domain = &dom_data->domain; |
923 | ce->usable = 1; | 977 | ce->usable = 1; |
924 | atomic_set(&ce->dirty, 1); | 978 | #ifdef CONFIG_MERGE_TIMERS |
979 | init_event(&ce->event, level, mc_ghost_exhausted, | ||
980 | event_list_alloc(GFP_ATOMIC)); | ||
981 | #else | ||
925 | hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 982 | hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
926 | ce->timer.function = mc_ghost_exhausted; | 983 | ce->timer.function = mc_ghost_exhausted; |
984 | #endif | ||
985 | |||
927 | } | 986 | } |
928 | 987 | ||
929 | static void init_local_domain(cpu_entry_t *entry, domain_data_t *dom_data, | 988 | static void init_local_domain(struct cpu_entry *entry, struct domain_data *dom_data, |
930 | enum crit_level level) | 989 | enum crit_level level) |
931 | { | 990 | { |
932 | dom_data->heap = NULL; | 991 | dom_data->heap = NULL; |
@@ -934,12 +993,12 @@ static void init_local_domain(cpu_entry_t *entry, domain_data_t *dom_data, | |||
934 | init_crit_entry(dom_data->crit_entry, level, dom_data, NULL); | 993 | init_crit_entry(dom_data->crit_entry, level, dom_data, NULL); |
935 | } | 994 | } |
936 | 995 | ||
937 | static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | 996 | static void init_global_domain(struct domain_data *dom_data, enum crit_level level, |
938 | struct bheap *heap, struct bheap_node *nodes) | 997 | struct bheap *heap, struct bheap_node *nodes) |
939 | { | 998 | { |
940 | int cpu; | 999 | int cpu; |
941 | cpu_entry_t *entry; | 1000 | struct cpu_entry *entry; |
942 | crit_entry_t *ce; | 1001 | struct crit_entry *ce; |
943 | struct bheap_node *node; | 1002 | struct bheap_node *node; |
944 | 1003 | ||
945 | dom_data->crit_entry = NULL; | 1004 | dom_data->crit_entry = NULL; |
@@ -947,7 +1006,7 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | |||
947 | bheap_init(heap); | 1006 | bheap_init(heap); |
948 | 1007 | ||
949 | for_each_online_cpu(cpu) { | 1008 | for_each_online_cpu(cpu) { |
950 | entry = cpus[cpu]; | 1009 | entry = &per_cpu(cpus, cpu); |
951 | node = &nodes[cpu]; | 1010 | node = &nodes[cpu]; |
952 | ce = &entry->crit_entries[level]; | 1011 | ce = &entry->crit_entries[level]; |
953 | init_crit_entry(ce, level, dom_data, node); | 1012 | init_crit_entry(ce, level, dom_data, node); |
@@ -956,37 +1015,59 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | |||
956 | } | 1015 | } |
957 | } | 1016 | } |
958 | 1017 | ||
959 | static inline void init_edf_domain(domain_t *dom, rt_domain_t *rt) | 1018 | static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, |
1019 | int timer_cpu, int prio) | ||
960 | { | 1020 | { |
961 | pd_domain_init(dom, rt, edf_ready_order, NULL, | 1021 | pd_domain_init(dom, rt, edf_ready_order, NULL, |
962 | mc_release_jobs, mc_preempt_needed, | 1022 | mc_release_jobs, mc_preempt_needed, |
963 | edf_higher_prio); | 1023 | edf_higher_prio); |
1024 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
1025 | #ifdef CONFIG_MERGE_TIMERS | ||
1026 | rt->event_group = &_mc_group; | ||
1027 | rt->prio = prio; | ||
1028 | #else | ||
1029 | rt->release_master = interrupt_cpu; | ||
1030 | #endif | ||
1031 | #elif CONFIG_MERGE_TIMERS | ||
1032 | rt->event_group = &_mc_groups[timer_cpu]; | ||
1033 | rt->prio = prio; | ||
1034 | #endif | ||
964 | } | 1035 | } |
965 | 1036 | ||
966 | domain_data_t *ce_domain_for(int); | 1037 | struct domain_data *ce_domain_for(int); |
967 | static int __init init_mc(void) | 1038 | static int __init init_mc(void) |
968 | { | 1039 | { |
969 | int cpu; | 1040 | int cpu; |
970 | cpu_entry_t *entry; | 1041 | struct cpu_entry *entry; |
1042 | struct domain_data *dom_data; | ||
971 | rt_domain_t *rt; | 1043 | rt_domain_t *rt; |
972 | domain_data_t *dom_data; | 1044 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ |
973 | raw_spinlock_t *a_dom_lock, *b_dom, *c_dom; /* For lock debugger */ | ||
974 | struct ce_dom_data *ce_data; | 1045 | struct ce_dom_data *ce_data; |
975 | 1046 | ||
976 | for_each_online_cpu(cpu) { | 1047 | for_each_online_cpu(cpu) { |
977 | entry = &per_cpu(_mc_cpus, cpu); | 1048 | entry = &per_cpu(cpus, cpu); |
978 | cpus[cpu] = entry; | ||
979 | 1049 | ||
980 | /* CPU */ | 1050 | /* CPU */ |
981 | entry->cpu = cpu; | 1051 | entry->cpu = cpu; |
982 | entry->scheduled = NULL; | 1052 | entry->scheduled = NULL; |
983 | entry->linked = NULL; | 1053 | entry->linked = NULL; |
984 | raw_spin_lock_init(&entry->lock); | 1054 | raw_spin_lock_init(&entry->lock); |
1055 | |||
985 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 1056 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
986 | raw_spin_lock_init(&entry->redir_lock); | 1057 | raw_spin_lock_init(&entry->redir_lock); |
987 | INIT_LIST_HEAD(&entry->redir); | 1058 | INIT_LIST_HEAD(&entry->redir); |
988 | #endif | 1059 | #endif |
989 | 1060 | ||
1061 | #ifdef CONFIG_MERGE_TIMERS | ||
1062 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
1063 | entry->event_group = &_mc_group; | ||
1064 | #else | ||
1065 | init_event_group(&_mc_groups[cpu], | ||
1066 | CONFIG_MERGE_TIMERS_WINDOW, cpu); | ||
1067 | entry->event_group = &_mc_groups[cpu]; | ||
1068 | #endif | ||
1069 | #endif | ||
1070 | |||
990 | /* CRIT_LEVEL_A */ | 1071 | /* CRIT_LEVEL_A */ |
991 | dom_data = &per_cpu(_mc_crit_a, cpu); | 1072 | dom_data = &per_cpu(_mc_crit_a, cpu); |
992 | ce_data = &per_cpu(_mc_crit_a_ce_data, cpu); | 1073 | ce_data = &per_cpu(_mc_crit_a_ce_data, cpu); |
@@ -1004,18 +1085,27 @@ static int __init init_mc(void) | |||
1004 | dom_data = &per_cpu(_mc_crit_b, cpu); | 1085 | dom_data = &per_cpu(_mc_crit_b, cpu); |
1005 | rt = &per_cpu(_mc_crit_b_rt, cpu); | 1086 | rt = &per_cpu(_mc_crit_b_rt, cpu); |
1006 | init_local_domain(entry, dom_data, CRIT_LEVEL_B); | 1087 | init_local_domain(entry, dom_data, CRIT_LEVEL_B); |
1007 | init_edf_domain(&dom_data->domain, rt); | 1088 | init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_B); |
1008 | b_dom = dom_data->domain.lock; | 1089 | b_dom_lock = dom_data->domain.lock; |
1009 | raw_spin_lock_init(b_dom); | 1090 | raw_spin_lock_init(b_dom_lock); |
1010 | dom_data->domain.name = "LVL-B"; | 1091 | dom_data->domain.name = "LVL-B"; |
1011 | } | 1092 | } |
1012 | 1093 | ||
1094 | #ifdef CONFIG_MERGE_TIMERS | ||
1095 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
1096 | init_event_group(&_mc_group, CONFIG_MERGE_TIMERS_WINDOW, interrupt_cpu); | ||
1097 | global_group = &_mc_group; | ||
1098 | #else | ||
1099 | global_group = &_mc_groups[0]; | ||
1100 | #endif | ||
1101 | #endif | ||
1102 | |||
1013 | /* CRIT_LEVEL_C */ | 1103 | /* CRIT_LEVEL_C */ |
1014 | init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, | 1104 | init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, |
1015 | &_mc_heap_c, _mc_nodes_c); | 1105 | &_mc_heap_c, _mc_nodes_c); |
1016 | init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt); | 1106 | init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, 0, CRIT_LEVEL_C); |
1017 | c_dom = _mc_crit_c.domain.lock; | 1107 | c_dom_lock = _mc_crit_c.domain.lock; |
1018 | raw_spin_lock_init(c_dom); | 1108 | raw_spin_lock_init(c_dom_lock); |
1019 | _mc_crit_c.domain.name = "LVL-C"; | 1109 | _mc_crit_c.domain.name = "LVL-C"; |
1020 | 1110 | ||
1021 | return register_sched_plugin(&mc_plugin); | 1111 | return register_sched_plugin(&mc_plugin); |