aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-03-30 11:21:59 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-03-30 11:21:59 -0400
commitb2ecb9f8d20baa3edfb305d263a7f0902ac019f3 (patch)
tree77af90862b09542fa6fbcae7117b41b19086e552 /litmus/sched_mc.c
parentc31763ecf41cbcdb61e8960f0354d8b2e39a8645 (diff)
Removed ARM-specific hacks which disabled less common mixed-criticality features.wip-mc
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r--litmus/sched_mc.c245
1 files changed, 154 insertions, 91 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 6edf86935a29..c8e50d30a483 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -30,7 +30,7 @@
30#include <litmus/dgl.h> 30#include <litmus/dgl.h>
31#include <litmus/color.h> 31#include <litmus/color.h>
32#include <litmus/way_tracker.h> 32#include <litmus/way_tracker.h>
33#warning "MUST ADD CHECK FOR MAX WAYS" 33
34struct mc_signal { 34struct mc_signal {
35 int update:1; 35 int update:1;
36 int preempt:1; 36 int preempt:1;
@@ -207,6 +207,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
207static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) 207static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
208{ 208{
209 struct task_struct *next = dom->peek_ready(dom); 209 struct task_struct *next = dom->peek_ready(dom);
210
210 if (!next || !curr) { 211 if (!next || !curr) {
211 return next && !curr; 212 return next && !curr;
212 } else { 213 } else {
@@ -223,10 +224,13 @@ static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
223static void update_crit_position(struct crit_entry *ce) 224static void update_crit_position(struct crit_entry *ce)
224{ 225{
225 struct bheap *heap; 226 struct bheap *heap;
227
226 if (is_global(ce->domain)) { 228 if (is_global(ce->domain)) {
227 heap = domain_data(ce->domain)->heap; 229 heap = domain_data(ce->domain)->heap;
230
228 BUG_ON(!heap); 231 BUG_ON(!heap);
229 BUG_ON(!bheap_node_in_heap(ce->node)); 232 BUG_ON(!bheap_node_in_heap(ce->node));
233
230 bheap_delete(cpu_lower_prio, heap, ce->node); 234 bheap_delete(cpu_lower_prio, heap, ce->node);
231 bheap_insert(cpu_lower_prio, heap, ce->node); 235 bheap_insert(cpu_lower_prio, heap, ce->node);
232 } 236 }
@@ -239,6 +243,7 @@ static void update_crit_position(struct crit_entry *ce)
239static void fix_crit_position(struct crit_entry *ce) 243static void fix_crit_position(struct crit_entry *ce)
240{ 244{
241 struct server *server = &ce->server; 245 struct server *server = &ce->server;
246
242 if (is_global(ce->domain) && server->in_transit) { 247 if (is_global(ce->domain) && server->in_transit) {
243 server_state_change(server, server->state, 0); 248 server_state_change(server, server->state, 0);
244 update_crit_position(ce); 249 update_crit_position(ce);
@@ -368,7 +373,7 @@ static void link_task_to_crit(struct crit_entry *ce,
368 server_state_change(ce_server, SS_ACTIVE, 0); 373 server_state_change(ce_server, SS_ACTIVE, 0);
369 } 374 }
370 375
371 TRACE_MC_TASK(ce->server.linked, "Unlinking\n"); 376 /* TRACE_MC_TASK(ce->server.linked, "Unlinking\n"); */
372 377
373 stop_crit(ce); 378 stop_crit(ce);
374 tsk_rt(ce->server.linked)->server.parent = 0; 379 tsk_rt(ce->server.linked)->server.parent = 0;
@@ -552,9 +557,7 @@ static struct task_struct* preempt_crit(struct domain *dom, struct crit_entry *c
552 557
553 /* Per-domain preemption */ 558 /* Per-domain preemption */
554 link_task_to_crit(ce, task); 559 link_task_to_crit(ce, task);
555 /* if (old && can_requeue(old)) { */ 560
556 /* dom->requeue(dom, old); */
557 /* } */
558 update_crit_position(ce); 561 update_crit_position(ce);
559 562
560 /* Preempt actual execution if this is a running task. 563 /* Preempt actual execution if this is a running task.
@@ -574,6 +577,7 @@ static struct task_struct* preempt_crit(struct domain *dom, struct crit_entry *c
574 577
575/** 578/**
576 * update_crit_levels() - Update criticality entries for the new cpu state. 579 * update_crit_levels() - Update criticality entries for the new cpu state.
580 * Disables criticality levels lower than @entry's currenly linked task.
577 * This should be called after a new task has been linked to @entry. 581 * This should be called after a new task has been linked to @entry.
578 * The caller must hold the @entry->lock, but this method will release it. 582 * The caller must hold the @entry->lock, but this method will release it.
579 */ 583 */
@@ -585,6 +589,8 @@ static void update_crit_levels(struct cpu_entry *entry)
585 struct task_struct *readmit[NUM_CRIT_LEVELS]; 589 struct task_struct *readmit[NUM_CRIT_LEVELS];
586 enum crit_level level = entry_level(entry); 590 enum crit_level level = entry_level(entry);
587 591
592 STRACE("Updating crit levels for cpu %d\n", entry->cpu);
593
588 /* Remove lower priority tasks from the entry */ 594 /* Remove lower priority tasks from the entry */
589 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { 595 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
590 ce = &entry->crit_entries[i]; 596 ce = &entry->crit_entries[i];
@@ -609,8 +615,10 @@ static void update_crit_levels(struct cpu_entry *entry)
609 link_task_to_crit(ce, NULL); 615 link_task_to_crit(ce, NULL);
610 } 616 }
611 TRACE_CRIT_ENTRY(ce, "Removing lower crit\n"); 617 TRACE_CRIT_ENTRY(ce, "Removing lower crit\n");
612 server_state_change(server, SS_REMOVED, 1); 618 server_state_change(server, SS_REMOVED,
613 619 is_global(ce->domain)?1:0);
620 } else {
621 TRACE_CRIT_ENTRY(ce, "Already removed!\n");
614 } 622 }
615 } 623 }
616 /* Need to unlock so we can access domains */ 624 /* Need to unlock so we can access domains */
@@ -669,21 +677,26 @@ static void check_global_preempt(struct domain *dom)
669 } 677 }
670} 678}
671 679
672static void check_partitioned_preempt(struct domain *dom) 680static void check_partitioned_preempt(struct cpu_entry *entry,
681 struct domain *dom)
673{ 682{
674 struct cpu_entry *entry; 683 struct crit_entry *ce = domain_data(dom)->crit_entry;
675 struct crit_entry *ce;
676 684
677 ce = domain_data(dom)->crit_entry; 685 /* Cache next task */
678 entry = crit_cpu(ce); 686 dom->peek_ready(dom);
687
688 raw_spin_lock(&entry->lock);
679 689
680 if (ce->server.state == SS_REMOVED || 690 if (ce->server.state == SS_REMOVED ||
681 !mc_preempt_needed(dom, ce->server.linked)) { 691 !mc_preempt_needed(dom, ce->server.linked)) {
682 return; 692 goto out_unlock;
683 } 693 }
684 694
685 entry->signal.preempt = 1; 695 entry->signal.preempt = 1;
686 litmus_reschedule(entry->cpu); 696 litmus_reschedule(entry->cpu);
697
698 out_unlock:
699 raw_spin_unlock(&entry->lock);
687} 700}
688 701
689/** 702/**
@@ -701,12 +714,7 @@ static void check_for_preempt(struct domain *dom)
701 ce = domain_data(dom)->crit_entry; 714 ce = domain_data(dom)->crit_entry;
702 entry = crit_cpu(ce); 715 entry = crit_cpu(ce);
703 716
704 /* Cache next task */ 717 check_partitioned_preempt(entry, dom);
705 dom->peek_ready(dom);
706
707 raw_spin_lock(&entry->lock);
708 check_partitioned_preempt(dom);
709 raw_spin_unlock(&entry->lock);
710 } 718 }
711} 719}
712 720
@@ -798,25 +806,24 @@ static void job_completion(struct task_struct *task, int forced)
798 806
799 if (lt_before(get_user_release(task), litmus_clock()) || 807 if (lt_before(get_user_release(task), litmus_clock()) ||
800 (release_server && tsk_rt(task)->completed)){ 808 (release_server && tsk_rt(task)->completed)){
801 TRACE_TASK(task, "Executable task going back to running\n"); 809 TRACE_MC_TASK(task, "Executable task going back to running\n");
802 tsk_rt(task)->completed = 0; 810 tsk_rt(task)->completed = 0;
803 } 811 }
804 812
805 if (release_server || forced) { 813 if (release_server || forced) {
806 /* TODO: Level A does this independently and should not */ 814 prepare_for_next_period(task);
807 if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) {
808 prepare_for_next_period(task);
809 }
810 815
811 TRACE_TASK(task, "Is released: %d, now: %llu, rel: %llu\n", 816 TRACE_MC_TASK(task, "Is released: %d, now: %llu, rel: %llu\n",
812 is_released(task, litmus_clock()), litmus_clock(), 817 is_released(task, litmus_clock()), litmus_clock(),
813 get_release(task)); 818 get_release(task));
814 819
815 /* Requeue non-blocking tasks */ 820 /* Requeue non-blocking tasks */
816 if (is_running(task)) { 821 if (is_running(task)) {
817 job_arrival(task); 822 job_arrival(task);
818 } 823 }
819 } else if (is_ghost(task)) { 824 } else if (is_ghost(task)) {
825 BUG_ON(tsk_rt(task)->linked_on == NO_CPU);
826
820 entry = &per_cpu(cpus, tsk_rt(task)->linked_on); 827 entry = &per_cpu(cpus, tsk_rt(task)->linked_on);
821 ce = &entry->crit_entries[tsk_mc_crit(task)]; 828 ce = &entry->crit_entries[tsk_mc_crit(task)];
822 829
@@ -847,24 +854,27 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
847#endif 854#endif
848 struct task_struct *tmp = NULL; 855 struct task_struct *tmp = NULL;
849 struct cpu_entry *entry = crit_cpu(ce); 856 struct cpu_entry *entry = crit_cpu(ce);
850 TRACE("Firing here at %llu\n", litmus_clock()); 857 int resched = 0;
851 TRACE_CRIT_ENTRY(ce, "For this\n"); 858
859 TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock());
852 860
853 raw_spin_lock(&entry->lock); 861 raw_spin_lock(&entry->lock);
854 862
855 if (is_ghost(ce->server.linked)) { 863 if (ce->server.linked && is_ghost(ce->server.linked)) {
856 update_server_time(ce->server.linked); 864 update_server_time(ce->server.linked);
857 if (budget_exhausted(ce->server.linked)) { 865 if (budget_exhausted(ce->server.linked)) {
858 tmp = ce->server.linked; 866 tmp = ce->server.linked;
859 } 867 }
860 } else { 868 } else {
861 litmus_reschedule(crit_cpu(ce)->cpu); 869 resched = 1;
862 } 870 }
863 871
864 raw_spin_unlock(&entry->lock); 872 raw_spin_unlock(&entry->lock);
865 873
866 if (tmp) 874 if (tmp)
867 job_completion(tmp, 1); 875 job_completion(tmp, 1);
876 else if (resched)
877 litmus_reschedule(entry->cpu);
868 878
869#ifndef CONFIG_MERGE_TIMERS 879#ifndef CONFIG_MERGE_TIMERS
870 return HRTIMER_NORESTART; 880 return HRTIMER_NORESTART;
@@ -891,8 +901,6 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
891 ce->server.linked == ce_data->should_schedule) 901 ce->server.linked == ce_data->should_schedule)
892 { 902 {
893 old_link = ce->server.linked; 903 old_link = ce->server.linked;
894 link_task_to_crit(ce, NULL);
895 mc_ce_job_completion(dom, old_link);
896 } 904 }
897 raw_spin_unlock(&crit_cpu(ce)->lock); 905 raw_spin_unlock(&crit_cpu(ce)->lock);
898 906
@@ -900,7 +908,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
900 908
901 /* Job completion will check for preemptions by means of calling job 909 /* Job completion will check for preemptions by means of calling job
902 * arrival if the task is not blocked */ 910 * arrival if the task is not blocked */
903 if (NULL != old_link) { 911 if (old_link) {
904 STRACE("old_link " TS " so will call job completion\n", TA(old_link)); 912 STRACE("old_link " TS " so will call job completion\n", TA(old_link));
905 raw_spin_unlock(dom->lock); 913 raw_spin_unlock(dom->lock);
906 job_completion(old_link, 1); 914 job_completion(old_link, 1);
@@ -993,8 +1001,10 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
993 entry = &per_cpu(cpus, task_cpu(t)); 1001 entry = &per_cpu(cpus, task_cpu(t));
994 t->rt_param._domain = entry->crit_entries[level].domain; 1002 t->rt_param._domain = entry->crit_entries[level].domain;
995 1003
1004#ifdef CONFIG_SCHED_TASK_TRACE
996 tsk_rt(t)->flush = 0; 1005 tsk_rt(t)->flush = 0;
997 tsk_rt(t)->load = 0; 1006 tsk_rt(t)->load = 0;
1007#endif
998 1008
999 /* Userspace and kernelspace view of task state may differ. 1009 /* Userspace and kernelspace view of task state may differ.
1000 * Model kernel state as a budget enforced container 1010 * Model kernel state as a budget enforced container
@@ -1098,16 +1108,17 @@ static void mc_task_exit(struct task_struct *task)
1098 color_sched_out_task(task); 1108 color_sched_out_task(task);
1099 } 1109 }
1100 1110
1111 /* TODO: restore. This was geting triggered by race conditions even when
1112 * no level-A task was executing */
1113 if (CRIT_LEVEL_A == tsk_mc_crit(task))
1114 mc_ce_task_exit_common(task);
1115
1101 remove_from_all(task); 1116 remove_from_all(task);
1102 if (tsk_rt(task)->scheduled_on != NO_CPU) { 1117 if (tsk_rt(task)->scheduled_on != NO_CPU) {
1103 per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL; 1118 per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL;
1104 tsk_rt(task)->scheduled_on = NO_CPU; 1119 tsk_rt(task)->scheduled_on = NO_CPU;
1105 } 1120 }
1106 1121
1107 /* TODO: restore. This was geting triggered by race conditions even when
1108 * no level-A task was executing */
1109 /* if (CRIT_LEVEL_A == tsk_mc_crit(task)) */
1110 /* mc_ce_task_exit_common(task); */
1111 1122
1112 local_irq_restore(flags); 1123 local_irq_restore(flags);
1113} 1124}
@@ -1259,30 +1270,30 @@ static void process_update_signal(struct cpu_entry *entry)
1259 stop_crit(ce); 1270 stop_crit(ce);
1260 server_state_change(crit_server, SS_BLOCKED, 0); 1271 server_state_change(crit_server, SS_BLOCKED, 0);
1261 } 1272 }
1262
1263
1264} 1273}
1265 1274
1266static void process_signals(struct cpu_entry *entry) 1275static void process_preempt_signal(struct cpu_entry *entry)
1267{ 1276{
1277 int i;
1268 struct domain *dom; 1278 struct domain *dom;
1269 struct crit_entry *ce; 1279 struct crit_entry *ce;
1270 struct mc_signal signal; 1280 struct task_struct *preempted = NULL;
1271 struct task_struct *preempted; 1281 struct server *server;
1272 1282
1273 ce = &entry->crit_entries[CRIT_LEVEL_B]; 1283 STRACE("Reading preempt signal\n");
1274 dom = ce->domain;
1275 1284
1276 /* Load signals */ 1285 for (i = 0; i < NUM_CRIT_LEVELS; i++) {
1277 raw_spin_lock(&entry->signal_lock); 1286 ce = &entry->crit_entries[i];
1278 signal = entry->signal; 1287 dom = ce->domain;
1279 clear_signal(&entry->signal); 1288 server = &ce->server;
1280 raw_spin_unlock(&entry->signal_lock); 1289 preempted = NULL;
1281 1290
1282 if (signal.preempt) { 1291 /* Swap locks. We cannot acquire a domain lock while
1292 * holding an entry lock or deadlocks will happen
1293 */
1283 raw_spin_lock(dom->lock); 1294 raw_spin_lock(dom->lock);
1284 /* A higher-priority task may exist */ 1295
1285 STRACE("Reading preempt signal\n"); 1296 /* Do domain stuff before grabbing CPU locks */
1286 dom->peek_ready(dom); 1297 dom->peek_ready(dom);
1287 1298
1288 raw_spin_lock(&entry->lock); 1299 raw_spin_lock(&entry->lock);
@@ -1308,26 +1319,106 @@ static void process_signals(struct cpu_entry *entry)
1308 raw_spin_unlock(dom->lock); 1319 raw_spin_unlock(dom->lock);
1309 } 1320 }
1310 1321
1311 raw_spin_lock(&entry->lock); 1322 break;
1312 } else { 1323 } else {
1324 raw_spin_unlock(&entry->lock);
1313 raw_spin_unlock(dom->lock); 1325 raw_spin_unlock(dom->lock);
1314 } 1326 }
1315 } else { 1327 }
1316 raw_spin_lock(&entry->lock); 1328}
1329
1330static void process_signals(struct cpu_entry *entry)
1331{
1332 struct mc_signal signal;
1333
1334 /* Load signals */
1335 raw_spin_lock(&entry->signal_lock);
1336 signal = entry->signal;
1337 clear_signal(&entry->signal);
1338 raw_spin_unlock(&entry->signal_lock);
1339
1340 if (signal.preempt) {
1341 process_preempt_signal(entry);
1317 } 1342 }
1318 1343
1344 raw_spin_lock(&entry->lock);
1345
1319 if (signal.update) { 1346 if (signal.update) {
1320 process_update_signal(entry); 1347 process_update_signal(entry);
1321 } 1348 }
1322} 1349}
1323 1350
1351static void reschedule_if_signaled(struct cpu_entry *entry)
1352{
1353 struct mc_signal signal;
1354
1355 raw_spin_lock(&entry->signal_lock);
1356 signal = entry->signal;
1357 raw_spin_unlock(&entry->signal_lock);
1358
1359 if (signal.update || signal.preempt) {
1360 litmus_reschedule_local();
1361 }
1362}
1363
1364static void pre_schedule(struct task_struct *prev)
1365{
1366 lt_t exec, start = litmus_clock();
1367
1368 /* Update userspace exec time */
1369 if (prev && tsk_rt(prev)->last_exec_time) {
1370 exec = start - tsk_rt(prev)->last_exec_time;
1371 tsk_rt(prev)->user_job.exec_time += exec;
1372 }
1373
1374 /* Flush task pages */
1375 if (prev && tsk_mc_crit(prev) == CRIT_LEVEL_B &&
1376 is_realtime(prev) && get_rt_job(prev) > 1 && lock_cache) {
1377 color_sched_out_task(prev);
1378
1379#ifdef CONFIG_SCHED_TASK_TRACE
1380 tsk_rt(prev)->load += litmus_clock() - start;
1381#endif
1382 }
1383
1384 TS_LVLA_SCHED_START;
1385 TS_LVLB_SCHED_START;
1386 TS_LVLC_SCHED_START;
1387}
1388
1389static void post_schedule(struct task_struct *next)
1390{
1391 lt_t start;
1392
1393 switch (tsk_mc_crit(next)) {
1394 case CRIT_LEVEL_A: TS_LVLA_SCHED_END(next); break;
1395 case CRIT_LEVEL_B: TS_LVLB_SCHED_END(next); break;
1396 case CRIT_LEVEL_C: TS_LVLC_SCHED_END(next); break;
1397 }
1398
1399 /* Cache in task pages */
1400 if (tsk_mc_crit(next) == CRIT_LEVEL_B && lock_cache &&
1401 get_rt_job(next) > 1) {
1402 start = litmus_clock();
1403
1404 color_sched_in_task(next);
1405
1406#ifdef CONFIG_SCHED_TASK_TRACE
1407 BUG_ON(tsk_rt(next)->load);
1408 tsk_rt(next)->load = litmus_clock() - start;
1409#endif
1410 }
1411
1412 tsk_rt(next)->last_exec_time = litmus_clock();
1413}
1414
1324/** 1415/**
1325 * mc_schedule() - Return next task which should be scheduled. 1416 * mc_schedule() - Return next task which should be scheduled.
1326 */ 1417 */
1327static struct task_struct* mc_schedule(struct task_struct* prev) 1418static struct task_struct* mc_schedule(struct task_struct* prev)
1328{ 1419{
1329 lt_t start, exec; 1420
1330 int out_of_time, sleep, preempt, exists, blocks, global, lower, work; 1421 int out_of_time, sleep, preempt, exists, blocks, global, lower;
1331 struct cpu_entry* entry = &__get_cpu_var(cpus); 1422 struct cpu_entry* entry = &__get_cpu_var(cpus);
1332 struct task_struct *next = NULL; 1423 struct task_struct *next = NULL;
1333 1424
@@ -1340,22 +1431,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1340 low_prio_arrival(entry->will_schedule); 1431 low_prio_arrival(entry->will_schedule);
1341 } 1432 }
1342 1433
1343 if (prev && tsk_rt(prev)->last_exec_time) { 1434 pre_schedule(prev);
1344 exec = litmus_clock() - tsk_rt(prev)->last_exec_time;
1345 tsk_rt(prev)->user_job.exec_time += exec;
1346 }
1347
1348 if (prev && tsk_mc_crit(prev) == CRIT_LEVEL_B &&
1349 is_realtime(prev) && get_rt_job(prev) > 1 && lock_cache) {
1350 start = litmus_clock();
1351 work = color_sched_out_task(prev);
1352 tsk_rt(prev)->flush = litmus_clock() - start;
1353 ++tsk_rt(prev)->flush_work;
1354 }
1355
1356 TS_LVLA_SCHED_START;
1357 TS_LVLB_SCHED_START;
1358 TS_LVLC_SCHED_START;
1359 1435
1360 raw_spin_lock(&entry->lock); 1436 raw_spin_lock(&entry->lock);
1361 1437
@@ -1406,9 +1482,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1406 job_arrival(entry->scheduled); 1482 job_arrival(entry->scheduled);
1407 } 1483 }
1408 1484
1409 /* TODO: move this down somehow */ 1485 /* Acquires the entry lock */
1410 sched_state_task_picked();
1411
1412 process_signals(entry); 1486 process_signals(entry);
1413 1487
1414 /* Pick next task if none is linked */ 1488 /* Pick next task if none is linked */
@@ -1424,23 +1498,12 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1424 1498
1425 raw_spin_unlock(&entry->lock); 1499 raw_spin_unlock(&entry->lock);
1426 1500
1427 if (next) { 1501 sched_state_task_picked();
1428 switch (tsk_mc_crit(next)) {
1429 case CRIT_LEVEL_A: TS_LVLA_SCHED_END(next); break;
1430 case CRIT_LEVEL_B: TS_LVLB_SCHED_END(next); break;
1431 case CRIT_LEVEL_C: TS_LVLC_SCHED_END(next); break;
1432 }
1433 }
1434 1502
1435 if (next && tsk_mc_crit(next) == CRIT_LEVEL_B && lock_cache && get_rt_job(next) > 1) { 1503 reschedule_if_signaled(entry);
1436 start = litmus_clock();
1437 work = color_sched_in_task(next);
1438 tsk_rt(next)->load = litmus_clock() - start;
1439 tsk_rt(next)->load_work = work;
1440 }
1441 1504
1442 if (next) { 1505 if (next) {
1443 tsk_rt(next)->last_exec_time = litmus_clock(); 1506 post_schedule(next);
1444 TRACE_MC_TASK(next, "Picked this task\n"); 1507 TRACE_MC_TASK(next, "Picked this task\n");
1445 } else { 1508 } else {
1446 STRACE("CPU %d idles at %llu\n", entry->cpu, litmus_clock()); 1509 STRACE("CPU %d idles at %llu\n", entry->cpu, litmus_clock());