diff options
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 133 |
1 files changed, 96 insertions, 37 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 5e977dd2fef0..87f8bc9bb50b 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -44,6 +44,10 @@ | |||
44 | 44 | ||
45 | #include <litmus/bheap.h> | 45 | #include <litmus/bheap.h> |
46 | 46 | ||
47 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
48 | #include <litmus/affinity.h> | ||
49 | #endif | ||
50 | |||
47 | /* to configure the cluster size */ | 51 | /* to configure the cluster size */ |
48 | #include <litmus/litmus_proc.h> | 52 | #include <litmus/litmus_proc.h> |
49 | 53 | ||
@@ -95,7 +99,7 @@ typedef struct clusterdomain { | |||
95 | struct bheap_node *heap_node; | 99 | struct bheap_node *heap_node; |
96 | struct bheap cpu_heap; | 100 | struct bheap cpu_heap; |
97 | /* lock for this cluster */ | 101 | /* lock for this cluster */ |
98 | #define cedf_lock domain.ready_lock | 102 | #define cluster_lock domain.ready_lock |
99 | } cedf_domain_t; | 103 | } cedf_domain_t; |
100 | 104 | ||
101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | 105 | /* a cedf_domain per cluster; allocation is done at init/activation time */ |
@@ -204,7 +208,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
204 | } | 208 | } |
205 | 209 | ||
206 | /* unlink - Make sure a task is not linked any longer to an entry | 210 | /* unlink - Make sure a task is not linked any longer to an entry |
207 | * where it was linked before. Must hold cedf_lock. | 211 | * where it was linked before. Must hold cluster_lock. |
208 | */ | 212 | */ |
209 | static noinline void unlink(struct task_struct* t) | 213 | static noinline void unlink(struct task_struct* t) |
210 | { | 214 | { |
@@ -240,7 +244,7 @@ static void preempt(cpu_entry_t *entry) | |||
240 | } | 244 | } |
241 | 245 | ||
242 | /* requeue - Put an unlinked task into gsn-edf domain. | 246 | /* requeue - Put an unlinked task into gsn-edf domain. |
243 | * Caller must hold cedf_lock. | 247 | * Caller must hold cluster_lock. |
244 | */ | 248 | */ |
245 | static noinline void requeue(struct task_struct* task) | 249 | static noinline void requeue(struct task_struct* task) |
246 | { | 250 | { |
@@ -257,11 +261,34 @@ static noinline void requeue(struct task_struct* task) | |||
257 | } | 261 | } |
258 | } | 262 | } |
259 | 263 | ||
264 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
265 | static cpu_entry_t* cedf_get_nearest_available_cpu( | ||
266 | cedf_domain_t *cluster, cpu_entry_t *start) | ||
267 | { | ||
268 | cpu_entry_t *affinity; | ||
269 | |||
270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, | ||
271 | #ifdef CONFIG_RELEASE_MASTER | ||
272 | cluster->domain.release_master | ||
273 | #else | ||
274 | NO_CPU | ||
275 | #endif | ||
276 | ); | ||
277 | |||
278 | /* make sure CPU is in our cluster */ | ||
279 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | ||
280 | return(affinity); | ||
281 | else | ||
282 | return(NULL); | ||
283 | } | ||
284 | #endif | ||
285 | |||
286 | |||
260 | /* check for any necessary preemptions */ | 287 | /* check for any necessary preemptions */ |
261 | static void check_for_preemptions(cedf_domain_t *cluster) | 288 | static void check_for_preemptions(cedf_domain_t *cluster) |
262 | { | 289 | { |
263 | struct task_struct *task; | 290 | struct task_struct *task; |
264 | cpu_entry_t* last; | 291 | cpu_entry_t *last; |
265 | 292 | ||
266 | for(last = lowest_prio_cpu(cluster); | 293 | for(last = lowest_prio_cpu(cluster); |
267 | edf_preemption_needed(&cluster->domain, last->linked); | 294 | edf_preemption_needed(&cluster->domain, last->linked); |
@@ -270,8 +297,20 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
270 | task = __take_ready(&cluster->domain); | 297 | task = __take_ready(&cluster->domain); |
271 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 298 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", |
272 | task->pid, last->cpu); | 299 | task->pid, last->cpu); |
300 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
301 | { | ||
302 | cpu_entry_t *affinity = | ||
303 | cedf_get_nearest_available_cpu(cluster, | ||
304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | ||
305 | if(affinity) | ||
306 | last = affinity; | ||
307 | else if(last->linked) | ||
308 | requeue(last->linked); | ||
309 | } | ||
310 | #else | ||
273 | if (last->linked) | 311 | if (last->linked) |
274 | requeue(last->linked); | 312 | requeue(last->linked); |
313 | #endif | ||
275 | link_task_to_cpu(task, last); | 314 | link_task_to_cpu(task, last); |
276 | preempt(last); | 315 | preempt(last); |
277 | } | 316 | } |
@@ -292,15 +331,15 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 331 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
293 | unsigned long flags; | 332 | unsigned long flags; |
294 | 333 | ||
295 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); | 334 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
296 | 335 | ||
297 | __merge_ready(&cluster->domain, tasks); | 336 | __merge_ready(&cluster->domain, tasks); |
298 | check_for_preemptions(cluster); | 337 | check_for_preemptions(cluster); |
299 | 338 | ||
300 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); | 339 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
301 | } | 340 | } |
302 | 341 | ||
303 | /* caller holds cedf_lock */ | 342 | /* caller holds cluster_lock */ |
304 | static noinline void job_completion(struct task_struct *t, int forced) | 343 | static noinline void job_completion(struct task_struct *t, int forced) |
305 | { | 344 | { |
306 | BUG_ON(!t); | 345 | BUG_ON(!t); |
@@ -378,7 +417,18 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
378 | int out_of_time, sleep, preempt, np, exists, blocks; | 417 | int out_of_time, sleep, preempt, np, exists, blocks; |
379 | struct task_struct* next = NULL; | 418 | struct task_struct* next = NULL; |
380 | 419 | ||
381 | raw_spin_lock(&cluster->cedf_lock); | 420 | #ifdef CONFIG_RELEASE_MASTER |
421 | /* Bail out early if we are the release master. | ||
422 | * The release master never schedules any real-time tasks. | ||
423 | */ | ||
424 | if (unlikely(cluster->domain.release_master == entry->cpu)) { | ||
425 | sched_state_task_picked(); | ||
426 | return NULL; | ||
427 | } | ||
428 | #endif | ||
429 | |||
430 | raw_spin_lock(&cluster->cluster_lock); | ||
431 | |||
382 | clear_will_schedule(); | 432 | clear_will_schedule(); |
383 | 433 | ||
384 | /* sanity checking */ | 434 | /* sanity checking */ |
@@ -462,10 +512,10 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
462 | next = prev; | 512 | next = prev; |
463 | 513 | ||
464 | sched_state_task_picked(); | 514 | sched_state_task_picked(); |
465 | raw_spin_unlock(&cluster->cedf_lock); | 515 | raw_spin_unlock(&cluster->cluster_lock); |
466 | 516 | ||
467 | #ifdef WANT_ALL_SCHED_EVENTS | 517 | #ifdef WANT_ALL_SCHED_EVENTS |
468 | TRACE("cedf_lock released, next=0x%p\n", next); | 518 | TRACE("cluster_lock released, next=0x%p\n", next); |
469 | 519 | ||
470 | if (next) | 520 | if (next) |
471 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 521 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
@@ -504,7 +554,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
504 | /* the cluster doesn't change even if t is running */ | 554 | /* the cluster doesn't change even if t is running */ |
505 | cluster = task_cpu_cluster(t); | 555 | cluster = task_cpu_cluster(t); |
506 | 556 | ||
507 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); | 557 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
508 | 558 | ||
509 | /* setup job params */ | 559 | /* setup job params */ |
510 | release_at(t, litmus_clock()); | 560 | release_at(t, litmus_clock()); |
@@ -513,15 +563,25 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
513 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | 563 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); |
514 | BUG_ON(entry->scheduled); | 564 | BUG_ON(entry->scheduled); |
515 | 565 | ||
516 | entry->scheduled = t; | 566 | #ifdef CONFIG_RELEASE_MASTER |
517 | tsk_rt(t)->scheduled_on = task_cpu(t); | 567 | if (entry->cpu != cluster->domain.release_master) { |
568 | #endif | ||
569 | entry->scheduled = t; | ||
570 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
571 | #ifdef CONFIG_RELEASE_MASTER | ||
572 | } else { | ||
573 | /* do not schedule on release master */ | ||
574 | preempt(entry); /* force resched */ | ||
575 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
576 | } | ||
577 | #endif | ||
518 | } else { | 578 | } else { |
519 | t->rt_param.scheduled_on = NO_CPU; | 579 | t->rt_param.scheduled_on = NO_CPU; |
520 | } | 580 | } |
521 | t->rt_param.linked_on = NO_CPU; | 581 | t->rt_param.linked_on = NO_CPU; |
522 | 582 | ||
523 | cedf_job_arrival(t); | 583 | cedf_job_arrival(t); |
524 | raw_spin_unlock_irqrestore(&(cluster->cedf_lock), flags); | 584 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); |
525 | } | 585 | } |
526 | 586 | ||
527 | static void cedf_task_wake_up(struct task_struct *task) | 587 | static void cedf_task_wake_up(struct task_struct *task) |
@@ -534,7 +594,8 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
534 | 594 | ||
535 | cluster = task_cpu_cluster(task); | 595 | cluster = task_cpu_cluster(task); |
536 | 596 | ||
537 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); | 597 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
598 | |||
538 | /* We need to take suspensions because of semaphores into | 599 | /* We need to take suspensions because of semaphores into |
539 | * account! If a job resumes after being suspended due to acquiring | 600 | * account! If a job resumes after being suspended due to acquiring |
540 | * a semaphore, it should never be treated as a new job release. | 601 | * a semaphore, it should never be treated as a new job release. |
@@ -557,7 +618,8 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
557 | } | 618 | } |
558 | } | 619 | } |
559 | cedf_job_arrival(task); | 620 | cedf_job_arrival(task); |
560 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); | 621 | |
622 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
561 | } | 623 | } |
562 | 624 | ||
563 | static void cedf_task_block(struct task_struct *t) | 625 | static void cedf_task_block(struct task_struct *t) |
@@ -570,9 +632,9 @@ static void cedf_task_block(struct task_struct *t) | |||
570 | cluster = task_cpu_cluster(t); | 632 | cluster = task_cpu_cluster(t); |
571 | 633 | ||
572 | /* unlink if necessary */ | 634 | /* unlink if necessary */ |
573 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); | 635 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
574 | unlink(t); | 636 | unlink(t); |
575 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); | 637 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
576 | 638 | ||
577 | BUG_ON(!is_realtime(t)); | 639 | BUG_ON(!is_realtime(t)); |
578 | } | 640 | } |
@@ -584,7 +646,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
584 | cedf_domain_t *cluster = task_cpu_cluster(t); | 646 | cedf_domain_t *cluster = task_cpu_cluster(t); |
585 | 647 | ||
586 | /* unlink if necessary */ | 648 | /* unlink if necessary */ |
587 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); | 649 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
588 | unlink(t); | 650 | unlink(t); |
589 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 651 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
590 | cpu_entry_t *cpu; | 652 | cpu_entry_t *cpu; |
@@ -592,7 +654,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
592 | cpu->scheduled = NULL; | 654 | cpu->scheduled = NULL; |
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 655 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } | 656 | } |
595 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); | 657 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
596 | 658 | ||
597 | BUG_ON(!is_realtime(t)); | 659 | BUG_ON(!is_realtime(t)); |
598 | TRACE_TASK(t, "RIP\n"); | 660 | TRACE_TASK(t, "RIP\n"); |
@@ -605,16 +667,6 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
605 | 667 | ||
606 | 668 | ||
607 | 669 | ||
608 | |||
609 | |||
610 | |||
611 | |||
612 | |||
613 | |||
614 | |||
615 | |||
616 | |||
617 | |||
618 | #ifdef CONFIG_LITMUS_LOCKING | 670 | #ifdef CONFIG_LITMUS_LOCKING |
619 | 671 | ||
620 | #include <litmus/fdso.h> | 672 | #include <litmus/fdso.h> |
@@ -692,11 +744,11 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
692 | { | 744 | { |
693 | cedf_domain_t* cluster = task_cpu_cluster(t); | 745 | cedf_domain_t* cluster = task_cpu_cluster(t); |
694 | 746 | ||
695 | raw_spin_lock(&cluster->cedf_lock); | 747 | raw_spin_lock(&cluster->cluster_lock); |
696 | 748 | ||
697 | __set_priority_inheritance(t, prio_inh); | 749 | __set_priority_inheritance(t, prio_inh); |
698 | 750 | ||
699 | raw_spin_unlock(&cluster->cedf_lock); | 751 | raw_spin_unlock(&cluster->cluster_lock); |
700 | } | 752 | } |
701 | 753 | ||
702 | 754 | ||
@@ -727,9 +779,9 @@ static void clear_priority_inheritance(struct task_struct* t) | |||
727 | { | 779 | { |
728 | cedf_domain_t* cluster = task_cpu_cluster(t); | 780 | cedf_domain_t* cluster = task_cpu_cluster(t); |
729 | 781 | ||
730 | raw_spin_lock(&cluster->cedf_lock); | 782 | raw_spin_lock(&cluster->cluster_lock); |
731 | __clear_priority_inheritance(t); | 783 | __clear_priority_inheritance(t); |
732 | raw_spin_unlock(&cluster->cedf_lock); | 784 | raw_spin_unlock(&cluster->cluster_lock); |
733 | } | 785 | } |
734 | 786 | ||
735 | 787 | ||
@@ -857,7 +909,7 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | |||
857 | 909 | ||
858 | cluster = task_cpu_cluster(max_hp); | 910 | cluster = task_cpu_cluster(max_hp); |
859 | 911 | ||
860 | raw_spin_lock(&cluster->cedf_lock); | 912 | raw_spin_lock(&cluster->cluster_lock); |
861 | 913 | ||
862 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | 914 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) |
863 | { | 915 | { |
@@ -867,7 +919,7 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | |||
867 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | 919 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); |
868 | } | 920 | } |
869 | } | 921 | } |
870 | raw_spin_unlock(&cluster->cedf_lock); | 922 | raw_spin_unlock(&cluster->cluster_lock); |
871 | 923 | ||
872 | list_for_each(pos, &my_queue->wait.task_list) | 924 | list_for_each(pos, &my_queue->wait.task_list) |
873 | { | 925 | { |
@@ -1270,6 +1322,9 @@ static long cedf_activate_plugin(void) | |||
1270 | 1322 | ||
1271 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | 1323 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) |
1272 | return -ENOMEM; | 1324 | return -ENOMEM; |
1325 | #ifdef CONFIG_RELEASE_MASTER | ||
1326 | cedf[i].domain.release_master = atomic_read(&release_master_cpu); | ||
1327 | #endif | ||
1273 | } | 1328 | } |
1274 | 1329 | ||
1275 | /* cycle through cluster and add cpus to them */ | 1330 | /* cycle through cluster and add cpus to them */ |
@@ -1312,7 +1367,11 @@ static long cedf_activate_plugin(void) | |||
1312 | 1367 | ||
1313 | entry->linked = NULL; | 1368 | entry->linked = NULL; |
1314 | entry->scheduled = NULL; | 1369 | entry->scheduled = NULL; |
1315 | update_cpu_position(entry); | 1370 | #ifdef CONFIG_RELEASE_MASTER |
1371 | /* only add CPUs that should schedule jobs */ | ||
1372 | if (entry->cpu != entry->cluster->domain.release_master) | ||
1373 | #endif | ||
1374 | update_cpu_position(entry); | ||
1316 | } | 1375 | } |
1317 | /* done with this cluster */ | 1376 | /* done with this cluster */ |
1318 | break; | 1377 | break; |