diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-09 01:27:45 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-09 01:27:45 -0400 |
commit | b26432a616a3aaad55ea404cd88d37fd1e345af1 (patch) | |
tree | 37ddaea5ff67dc47b7d03ddb3b749f3a885e78f4 | |
parent | 6c264383dfabce5a6cedd9a3b51250757d534d97 (diff) |
A task's blocking time no longer takes from its server's budget.
-rw-r--r-- | include/litmus/domain.h | 2 | ||||
-rw-r--r-- | include/litmus/sched_mc.h | 3 | ||||
-rw-r--r-- | litmus/dgl.c | 6 | ||||
-rw-r--r-- | litmus/domain.c | 2 | ||||
-rw-r--r-- | litmus/jobs.c | 6 | ||||
-rw-r--r-- | litmus/sched_mc.c | 171 |
6 files changed, 112 insertions, 78 deletions
diff --git a/include/litmus/domain.h b/include/litmus/domain.h index 75271c4acd33..cc93fea8a996 100644 --- a/include/litmus/domain.h +++ b/include/litmus/domain.h | |||
@@ -14,7 +14,7 @@ typedef void (*remove_t)(struct domain*, struct task_struct*); | |||
14 | typedef struct task_struct* (*peek_ready_t)(struct domain*); | 14 | typedef struct task_struct* (*peek_ready_t)(struct domain*); |
15 | typedef struct task_struct* (*take_ready_t)(struct domain*); | 15 | typedef struct task_struct* (*take_ready_t)(struct domain*); |
16 | typedef int (*task_prio_t)(struct task_struct*, struct task_struct*); | 16 | typedef int (*task_prio_t)(struct task_struct*, struct task_struct*); |
17 | typedef void (*acquire_resources_t)(struct task_struct *t); | 17 | typedef int (*acquire_resources_t)(struct task_struct *t); |
18 | typedef void (*release_resources_t)(struct task_struct *t); | 18 | typedef void (*release_resources_t)(struct task_struct *t); |
19 | 19 | ||
20 | typedef struct domain { | 20 | typedef struct domain { |
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h index d969b18e0b36..5473ee30cb58 100644 --- a/include/litmus/sched_mc.h +++ b/include/litmus/sched_mc.h | |||
@@ -74,8 +74,9 @@ struct ce_dom_data { | |||
74 | * position adjusted in a global heap. This should be set when | 74 | * position adjusted in a global heap. This should be set when |
75 | * ONLY the CPU state is locked. | 75 | * ONLY the CPU state is locked. |
76 | * @CS_REMOVED The criticality entry has been removed from the crit level | 76 | * @CS_REMOVED The criticality entry has been removed from the crit level |
77 | * @CS_BLOCKED The criticality entry cannot run until an event occurs | ||
77 | */ | 78 | */ |
78 | enum crit_state { CS_ACTIVE, CS_ACTIVATE, CS_REMOVE, CS_REMOVED }; | 79 | enum crit_state { CS_ACTIVE, CS_ACTIVATE, CS_REMOVE, CS_REMOVED , CS_BLOCKED}; |
79 | 80 | ||
80 | /** | 81 | /** |
81 | * struct crit_entry - State of a CPU within each criticality level system. | 82 | * struct crit_entry - State of a CPU within each criticality level system. |
diff --git a/litmus/dgl.c b/litmus/dgl.c index cced7c259735..7331855d43f7 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c | |||
@@ -199,12 +199,6 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, | |||
199 | litmus_reschedule(greq->cpu); | 199 | litmus_reschedule(greq->cpu); |
200 | dgl->running++; | 200 | dgl->running++; |
201 | 201 | ||
202 | if (greq->task) { | ||
203 | BUG_ON(tsk_rt(greq->task)->linked_on == NO_CPU); | ||
204 | set_rt_flags(greq->task, RT_F_RUNNING); | ||
205 | sched_trace_task_resume(greq->task); | ||
206 | } | ||
207 | |||
208 | dgl->cpu_acquired(greq->cpu); | 202 | dgl->cpu_acquired(greq->cpu); |
209 | } | 203 | } |
210 | 204 | ||
diff --git a/litmus/domain.c b/litmus/domain.c index 0852f30b428e..4fc8705c1e81 100644 --- a/litmus/domain.c +++ b/litmus/domain.c | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <litmus/domain.h> | 4 | #include <litmus/domain.h> |
5 | 5 | ||
6 | void dummy_acquire(struct task_struct *t){}; | 6 | int dummy_acquire(struct task_struct *t){return 1;}; |
7 | void dummy_release(struct task_struct *t){}; | 7 | void dummy_release(struct task_struct *t){}; |
8 | 8 | ||
9 | void domain_init(domain_t *dom, | 9 | void domain_init(domain_t *dom, |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 6ba40db9639f..fa55283e2134 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -1,7 +1,5 @@ | |||
1 | /* litmus/jobs.c - common job control code | 1 | /* litmus/jobs.c - common job control code |
2 | * TODO: modified heavily for sched_mc | ||
3 | */ | 2 | */ |
4 | |||
5 | #include <linux/sched.h> | 3 | #include <linux/sched.h> |
6 | 4 | ||
7 | #include <litmus/litmus.h> | 5 | #include <litmus/litmus.h> |
@@ -34,13 +32,15 @@ static inline void setup_release(struct task_struct *t, struct rt_job *job, | |||
34 | 32 | ||
35 | static inline void setup_kernel_release(struct task_struct *t, lt_t release) | 33 | static inline void setup_kernel_release(struct task_struct *t, lt_t release) |
36 | { | 34 | { |
35 | lt_t now = litmus_clock(); | ||
36 | |||
37 | BUG_ON(!t); | 37 | BUG_ON(!t); |
38 | 38 | ||
39 | /* Record lateness before we set up the next job's | 39 | /* Record lateness before we set up the next job's |
40 | * release and deadline. Lateness may be negative. | 40 | * release and deadline. Lateness may be negative. |
41 | */ | 41 | */ |
42 | t->rt_param.job_params.lateness = | 42 | t->rt_param.job_params.lateness = |
43 | (long long)litmus_clock() - | 43 | (long long)now - |
44 | (long long)t->rt_param.job_params.deadline; | 44 | (long long)t->rt_param.job_params.deadline; |
45 | 45 | ||
46 | t->rt.time_slice = 1; | 46 | t->rt.time_slice = 1; |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 065c767be846..b34ec4ee9e59 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Implementation of the Mixed Criticality scheduling algorithm. | 3 | * Implementation of the Mixed Criticality scheduling algorithm. |
4 | * | 4 | * |
5 | * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) | 5 | * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) |
6 | * TODO: optimize reschedule | ||
6 | */ | 7 | */ |
7 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
8 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
@@ -40,7 +41,7 @@ | |||
40 | */ | 41 | */ |
41 | struct cpu_entry { | 42 | struct cpu_entry { |
42 | int cpu; | 43 | int cpu; |
43 | int lock_acquired; | 44 | enum crit_level crit_signal; |
44 | struct task_struct* scheduled; | 45 | struct task_struct* scheduled; |
45 | struct task_struct* will_schedule; | 46 | struct task_struct* will_schedule; |
46 | struct task_struct* linked; | 47 | struct task_struct* linked; |
@@ -85,12 +86,11 @@ static int interrupt_cpu; | |||
85 | #define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level]) | 86 | #define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level]) |
86 | 87 | ||
87 | /* | 88 | /* |
88 | * Put in requests for resources needed by @t. If @t is a server, this will | 89 | * Put in requests for resources needed by @t. |
89 | * set @t's np flag to reflect resources held by @t's children. | ||
90 | */ | 90 | */ |
91 | static void acquire_resources(struct task_struct *t) | 91 | static int acquire_resources(struct task_struct *t) |
92 | { | 92 | { |
93 | int cpu; | 93 | int cpu, acquired; |
94 | 94 | ||
95 | /* Can't contend for resources if not logically running */ | 95 | /* Can't contend for resources if not logically running */ |
96 | BUG_ON(tsk_rt(t)->linked_on == NO_CPU); | 96 | BUG_ON(tsk_rt(t)->linked_on == NO_CPU); |
@@ -111,7 +111,10 @@ static void acquire_resources(struct task_struct *t) | |||
111 | make_np(t); | 111 | make_np(t); |
112 | } | 112 | } |
113 | 113 | ||
114 | acquired = has_resources(t, cpu); | ||
114 | raw_spin_unlock(&dgl_lock); | 115 | raw_spin_unlock(&dgl_lock); |
116 | |||
117 | return acquired; | ||
115 | } | 118 | } |
116 | 119 | ||
117 | static void release_resources(struct task_struct *t) | 120 | static void release_resources(struct task_struct *t) |
@@ -244,17 +247,34 @@ static inline void cancel_ghost(struct crit_entry *ce) | |||
244 | /* | 247 | /* |
245 | * Arm ghost timer. Will merge timers if the option is specified. | 248 | * Arm ghost timer. Will merge timers if the option is specified. |
246 | */ | 249 | */ |
247 | static inline void arm_ghost(struct crit_entry *ce, lt_t fire) | 250 | static inline void start_crit(struct crit_entry *ce) |
248 | { | 251 | { |
252 | lt_t fire; | ||
253 | struct task_struct *task = ce->linked; | ||
254 | |||
255 | BUG_ON(ce->state != CS_ACTIVE); | ||
256 | |||
257 | if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { | ||
258 | /* There is a level-A timer that will force a | ||
259 | * preemption, so we don't set this for level-A | ||
260 | * tasks. Otherwise reset the budget timer | ||
261 | */ | ||
262 | task->se.exec_start = litmus_clock(); | ||
263 | fire = task->se.exec_start + budget_remaining(task); | ||
264 | |||
249 | #ifdef CONFIG_MERGE_TIMERS | 265 | #ifdef CONFIG_MERGE_TIMERS |
250 | add_event(crit_cpu(ce)->event_group, &ce->event, fire); | 266 | add_event(crit_cpu(ce)->event_group, &ce->event, fire); |
251 | #else | 267 | #else |
252 | __hrtimer_start_range_ns(&ce->timer, | 268 | __hrtimer_start_range_ns(&ce->timer, |
253 | ns_to_ktime(fire), | 269 | ns_to_ktime(fire), |
254 | 0 /* delta */, | 270 | 0 /* delta */, |
255 | HRTIMER_MODE_ABS_PINNED, | 271 | HRTIMER_MODE_ABS_PINNED, |
256 | 0 /* no wakeup */); | 272 | 0 /* no wakeup */); |
257 | #endif | 273 | #endif |
274 | } | ||
275 | |||
276 | sched_trace_server_switch_to(ce_sid(ce), 0, -task->pid, | ||
277 | get_rt_job(task)); | ||
258 | } | 278 | } |
259 | 279 | ||
260 | /* | 280 | /* |
@@ -313,21 +333,13 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
313 | /* Actually link task */ | 333 | /* Actually link task */ |
314 | ce->linked = task; | 334 | ce->linked = task; |
315 | if (task) { | 335 | if (task) { |
336 | /* Block if task cannot acquire resources */ | ||
316 | task->rt_param.linked_on = crit_cpu(ce)->cpu; | 337 | task->rt_param.linked_on = crit_cpu(ce)->cpu; |
317 | if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { | ||
318 | /* There is a level-A timer that will force a | ||
319 | * preemption, so we don't set this for level-A | ||
320 | * tasks. Otherwise reset the budget timer | ||
321 | */ | ||
322 | task->se.exec_start = litmus_clock(); | ||
323 | when_to_fire = task->se.exec_start + budget_remaining(task); | ||
324 | arm_ghost(ce, when_to_fire); | ||
325 | } | ||
326 | sched_trace_server_switch_to(ce_sid(ce), 0, -task->pid, | ||
327 | get_rt_job(ce->linked)); | ||
328 | 338 | ||
329 | if (!is_ghost(task)) | 339 | if (is_ghost(task) || ce->domain->acquire_resources(task)) |
330 | ce->domain->acquire_resources(task); | 340 | start_crit(ce); |
341 | else | ||
342 | ce->state = CS_BLOCKED; | ||
331 | } | 343 | } |
332 | } | 344 | } |
333 | 345 | ||
@@ -356,7 +368,10 @@ static void job_arrival(struct task_struct *task) | |||
356 | * causing the system to crash when the task is scheduled | 368 | * causing the system to crash when the task is scheduled |
357 | * in two places simultaneously. | 369 | * in two places simultaneously. |
358 | */ | 370 | */ |
359 | TRACE_MC_TASK(task, "Delayed arrival of scheduled task\n"); | 371 | TRACE_MC_TASK(task, "Delayed arrival of scheduled task, " |
372 | "linked: %d, sched: %d, queued: %d\n", | ||
373 | tsk_rt(task)->linked_on, tsk_rt(task)->scheduled_on, | ||
374 | is_queued(task)); | ||
360 | } | 375 | } |
361 | raw_spin_unlock(dom->lock); | 376 | raw_spin_unlock(dom->lock); |
362 | } | 377 | } |
@@ -592,9 +607,10 @@ static void check_for_preempt(struct domain *dom) | |||
592 | ce->linked != entry->linked; | 607 | ce->linked != entry->linked; |
593 | higher_prio = mc_preempt_needed(dom, ce->linked); | 608 | higher_prio = mc_preempt_needed(dom, ce->linked); |
594 | 609 | ||
595 | if (was_ghost) | 610 | if (was_ghost) { |
596 | preempt_cpu(entry, ce->linked); | 611 | preempt_cpu(entry, ce->linked); |
597 | else if (higher_prio) | 612 | start_crit(ce); |
613 | } else if (higher_prio) | ||
598 | preempt_crit(dom, ce); | 614 | preempt_crit(dom, ce); |
599 | 615 | ||
600 | if (was_ghost || higher_prio) { | 616 | if (was_ghost || higher_prio) { |
@@ -695,7 +711,8 @@ static void job_completion(struct task_struct *task, int forced) | |||
695 | } | 711 | } |
696 | 712 | ||
697 | now = litmus_clock(); | 713 | now = litmus_clock(); |
698 | if (lt_before(get_user_release(task), now) || forced) { | 714 | if (lt_before(get_user_release(task), now)) { |
715 | TRACE_TASK(task, "Executable task going back to running\n"); | ||
699 | set_rt_flags(task, RT_F_RUNNING); | 716 | set_rt_flags(task, RT_F_RUNNING); |
700 | } | 717 | } |
701 | 718 | ||
@@ -990,6 +1007,9 @@ out: | |||
990 | return ret; | 1007 | return ret; |
991 | } | 1008 | } |
992 | 1009 | ||
1010 | /* | ||
1011 | * Caller must hold the entry lock. | ||
1012 | */ | ||
993 | void pick_next_task(struct cpu_entry *entry) | 1013 | void pick_next_task(struct cpu_entry *entry) |
994 | { | 1014 | { |
995 | int i; | 1015 | int i; |
@@ -997,6 +1017,8 @@ void pick_next_task(struct cpu_entry *entry) | |||
997 | struct domain *dom; | 1017 | struct domain *dom; |
998 | struct task_struct *dtask, *ready_task; | 1018 | struct task_struct *dtask, *ready_task; |
999 | 1019 | ||
1020 | STRACE("Picking next task\n"); | ||
1021 | |||
1000 | for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) { | 1022 | for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) { |
1001 | ce = &entry->crit_entries[i]; | 1023 | ce = &entry->crit_entries[i]; |
1002 | dom = ce->domain; | 1024 | dom = ce->domain; |
@@ -1043,10 +1065,10 @@ void pick_next_task(struct cpu_entry *entry) | |||
1043 | static struct task_struct* mc_schedule(struct task_struct* prev) | 1065 | static struct task_struct* mc_schedule(struct task_struct* prev) |
1044 | { | 1066 | { |
1045 | unsigned long flags; | 1067 | unsigned long flags; |
1046 | struct crit_entry *ce; | 1068 | int out_of_time, sleep, preempt, exists, blocks, global, lower; |
1047 | struct cpu_entry* entry = &__get_cpu_var(cpus); | 1069 | struct cpu_entry* entry = &__get_cpu_var(cpus); |
1048 | int out_of_time, sleep, preempt, exists, blocks, global, lower, update; | ||
1049 | struct task_struct *next = NULL; | 1070 | struct task_struct *next = NULL; |
1071 | struct crit_entry *ce; | ||
1050 | 1072 | ||
1051 | local_irq_save(flags); | 1073 | local_irq_save(flags); |
1052 | 1074 | ||
@@ -1061,7 +1083,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1061 | 1083 | ||
1062 | raw_spin_lock(&entry->lock); | 1084 | raw_spin_lock(&entry->lock); |
1063 | 1085 | ||
1064 | /* Sanity checking */ | ||
1065 | BUG_ON(entry->scheduled && entry->scheduled != prev); | 1086 | BUG_ON(entry->scheduled && entry->scheduled != prev); |
1066 | BUG_ON(entry->scheduled && !is_realtime(prev)); | 1087 | BUG_ON(entry->scheduled && !is_realtime(prev)); |
1067 | BUG_ON(is_realtime(prev) && !entry->scheduled); | 1088 | BUG_ON(is_realtime(prev) && !entry->scheduled); |
@@ -1069,15 +1090,14 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1069 | /* Determine state */ | 1090 | /* Determine state */ |
1070 | exists = entry->scheduled != NULL; | 1091 | exists = entry->scheduled != NULL; |
1071 | blocks = exists && !is_running(entry->scheduled); | 1092 | blocks = exists && !is_running(entry->scheduled); |
1072 | out_of_time = exists && budget_enforced(entry->scheduled) && | 1093 | out_of_time = exists && budget_exhausted(entry->scheduled); |
1073 | budget_exhausted(entry->scheduled); | ||
1074 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 1094 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; |
1075 | global = exists && is_global_task(entry->scheduled); | 1095 | global = exists && is_global_task(entry->scheduled); |
1076 | preempt = entry->scheduled != entry->linked; | 1096 | preempt = entry->scheduled != entry->linked; |
1077 | lower = exists && preempt && entry->linked && | 1097 | lower = exists && preempt && entry->linked && |
1078 | tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked); | 1098 | tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked); |
1079 | 1099 | ||
1080 | TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d, now: %llu\n", | 1100 | TRACE(TS " block:%d oot:%d sleep:%d preempt:%d, now: %llu\n", |
1081 | TA(prev), blocks, out_of_time, sleep, preempt, litmus_clock()); | 1101 | TA(prev), blocks, out_of_time, sleep, preempt, litmus_clock()); |
1082 | 1102 | ||
1083 | if (exists) | 1103 | if (exists) |
@@ -1085,7 +1105,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1085 | 1105 | ||
1086 | raw_spin_unlock(&entry->lock); | 1106 | raw_spin_unlock(&entry->lock); |
1087 | 1107 | ||
1088 | |||
1089 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 1108 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
1090 | if (smp_processor_id() == interrupt_cpu) | 1109 | if (smp_processor_id() == interrupt_cpu) |
1091 | fix_global_levels(); | 1110 | fix_global_levels(); |
@@ -1109,47 +1128,39 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1109 | job_arrival(entry->scheduled); | 1128 | job_arrival(entry->scheduled); |
1110 | } | 1129 | } |
1111 | 1130 | ||
1131 | /* Call before processing signals so any subsequent signal will cause | ||
1132 | * a reschedule. | ||
1133 | */ | ||
1134 | sched_state_task_picked(); | ||
1135 | |||
1136 | /* A remote processor unblocked one of our crit levels */ | ||
1137 | if (entry->crit_signal != NUM_CRIT_LEVELS) { | ||
1138 | ce = &entry->crit_entries[entry->crit_signal]; | ||
1139 | check_for_preempt(ce->domain); | ||
1140 | entry->crit_signal = NUM_CRIT_LEVELS; | ||
1141 | } | ||
1142 | |||
1112 | raw_spin_lock(&entry->lock); | 1143 | raw_spin_lock(&entry->lock); |
1113 | 1144 | ||
1114 | /* Pick next task if none is linked */ | 1145 | /* Pick next task if none is linked */ |
1115 | if (!entry->linked) | 1146 | if (!entry->linked) |
1116 | pick_next_task(entry); | 1147 | pick_next_task(entry); |
1117 | 1148 | ||
1118 | /* Set this now so that any reschedule signals received after this | ||
1119 | * point will cause another reschedule | ||
1120 | */ | ||
1121 | sched_state_task_picked(); | ||
1122 | |||
1123 | /* Ghost task acquired lock, is no longer ghost */ | ||
1124 | update = 0; | ||
1125 | if (entry->lock_acquired < NUM_CRIT_LEVELS) { | ||
1126 | ce = &entry->crit_entries[entry->lock_acquired]; | ||
1127 | if (ce->linked && !is_ghost(ce->linked) && | ||
1128 | ce->linked != entry->linked) { | ||
1129 | link_task_to_cpu(entry, ce->linked); | ||
1130 | update = 1; | ||
1131 | } | ||
1132 | entry->lock_acquired = NUM_CRIT_LEVELS; | ||
1133 | } | ||
1134 | |||
1135 | /* Schedule next task */ | 1149 | /* Schedule next task */ |
1136 | next = entry->linked; | 1150 | next = entry->linked; |
1137 | if (next) | 1151 | if (next) |
1138 | next->rt_param.scheduled_on = entry->cpu; | 1152 | next->rt_param.scheduled_on = entry->cpu; |
1139 | entry->will_schedule = next; | 1153 | entry->will_schedule = next; |
1140 | 1154 | ||
1141 | if (update) | 1155 | raw_spin_unlock_irqrestore(&entry->lock, flags); |
1142 | update_crit_levels(entry); /* Will release lock */ | ||
1143 | else | ||
1144 | raw_spin_unlock(&entry->lock); | ||
1145 | 1156 | ||
1146 | local_irq_restore(flags); | ||
1147 | if (next) { | 1157 | if (next) { |
1148 | BUG_ON(!get_rt_job(next)); | 1158 | BUG_ON(!is_released(next, litmus_clock())); |
1149 | TRACE_MC_TASK(next, "Picked this task\n"); | 1159 | TRACE_MC_TASK(next, "Picked this task\n"); |
1150 | } else if (exists && !next) | 1160 | } else if (exists && !next) |
1151 | STRACE("CPU %d becomes idle at %llu\n", | 1161 | STRACE("CPU %d becomes idle at %llu\n", |
1152 | entry->cpu, litmus_clock()); | 1162 | entry->cpu, litmus_clock()); |
1163 | |||
1153 | return next; | 1164 | return next; |
1154 | } | 1165 | } |
1155 | 1166 | ||
@@ -1370,23 +1381,45 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, | |||
1370 | #endif | 1381 | #endif |
1371 | } | 1382 | } |
1372 | 1383 | ||
1384 | /* | ||
1385 | * Setup and send signal to CPU for resource acquisition. To avoid touching | ||
1386 | * CPU locks, all CPU state modifications are delayed until the signal is | ||
1387 | * processed. | ||
1388 | */ | ||
1373 | static void cpu_acquired(int cpu) | 1389 | static void cpu_acquired(int cpu) |
1374 | { | 1390 | { |
1375 | struct cpu_entry *entry = &per_cpu(cpus, cpu); | 1391 | struct cpu_entry *entry = &per_cpu(cpus, cpu); |
1376 | STRACE("Lock acquired for cpu %d\n", cpu); | 1392 | struct crit_entry *ce = &entry->crit_entries[CRIT_LEVEL_B]; |
1377 | entry->lock_acquired = CRIT_LEVEL_B; | 1393 | |
1378 | litmus_reschedule(entry->cpu); | 1394 | TRACE_CRIT_ENTRY(ce, "Acquired lock\n"); |
1395 | |||
1396 | BUG_ON(!ce->linked); | ||
1397 | BUG_ON(get_rt_flags(ce->linked) & RT_F_SLEEP); | ||
1398 | |||
1399 | set_rt_flags(ce->linked, RT_F_RUNNING); | ||
1400 | sched_trace_task_resume(ce->linked); | ||
1401 | |||
1402 | if (ce->state == CS_BLOCKED) { | ||
1403 | entry->crit_signal = CRIT_LEVEL_B; | ||
1404 | /* Yes this is ok for race conditions, but only because no other | ||
1405 | * state will ever apply to a partitioned crit entry | ||
1406 | */ | ||
1407 | ce->state = CS_ACTIVE; | ||
1408 | litmus_reschedule(cpu); | ||
1409 | } | ||
1379 | } | 1410 | } |
1380 | 1411 | ||
1381 | struct domain_data *ce_domain_for(int); | 1412 | struct domain_data *ce_domain_for(int); |
1382 | static int __init init_mc(void) | 1413 | static int __init init_mc(void) |
1383 | { | 1414 | { |
1384 | int cpu; | 1415 | int cpu, name_size; |
1385 | struct cpu_entry *entry; | 1416 | char *lock_name; |
1386 | struct domain_data *dom_data; | ||
1387 | rt_domain_t *rt; | 1417 | rt_domain_t *rt; |
1388 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ | 1418 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ |
1419 | struct cpu_entry *entry; | ||
1420 | struct domain_data *dom_data; | ||
1389 | struct ce_dom_data *ce_data; | 1421 | struct ce_dom_data *ce_data; |
1422 | struct lock_class_key *lock_key; | ||
1390 | 1423 | ||
1391 | for_each_online_cpu(cpu) { | 1424 | for_each_online_cpu(cpu) { |
1392 | entry = &per_cpu(cpus, cpu); | 1425 | entry = &per_cpu(cpus, cpu); |
@@ -1395,9 +1428,15 @@ static int __init init_mc(void) | |||
1395 | entry->cpu = cpu; | 1428 | entry->cpu = cpu; |
1396 | entry->scheduled = NULL; | 1429 | entry->scheduled = NULL; |
1397 | entry->linked = NULL; | 1430 | entry->linked = NULL; |
1398 | entry->lock_acquired = NUM_CRIT_LEVELS; | 1431 | entry->crit_signal = NUM_CRIT_LEVELS; |
1399 | 1432 | ||
1433 | /* Trick lockdep for CPU locks */ | ||
1434 | name_size = sizeof(*lock_name) * LITMUS_LOCKDEP_NAME_MAX_LEN; | ||
1435 | lock_name = kmalloc(name_size, GFP_ATOMIC); | ||
1436 | lock_key = kmalloc(sizeof(*lock_key), GFP_ATOMIC); | ||
1400 | raw_spin_lock_init(&entry->lock); | 1437 | raw_spin_lock_init(&entry->lock); |
1438 | LOCKDEP_DYNAMIC_ALLOC(&entry->lock, lock_key, lock_name, | ||
1439 | "entry%d", cpu); | ||
1401 | 1440 | ||
1402 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 1441 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
1403 | raw_spin_lock_init(&entry->redir_lock); | 1442 | raw_spin_lock_init(&entry->redir_lock); |