aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-08 01:31:21 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-08 01:31:21 -0400
commit291683fd08df67dfcc753f6a9f07853205ca5e01 (patch)
treecbf3f58dd26ca223be32928f731de7c82603b016 /kernel
parentb2c6eb04b91da2f09d6c33cf8cbd821f28cb0204 (diff)
adaptive: take out all synchronization support
Get rid of inherited GSN synchronization functions.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_adaptive.c141
1 files changed, 14 insertions, 127 deletions
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c
index e36d74ae2c..b53de45908 100644
--- a/kernel/sched_adaptive.c
+++ b/kernel/sched_adaptive.c
@@ -586,7 +586,6 @@ static reschedule_check_t adaptive_scheduler_tick(void)
586 if (is_realtime(t)) 586 if (is_realtime(t))
587 t->rt_param.times.exec_time++; 587 t->rt_param.times.exec_time++;
588 588
589
590 /* only the first CPU needs to release jobs */ 589 /* only the first CPU needs to release jobs */
591 if (get_rt_mode() == MODE_RT_RUN) { 590 if (get_rt_mode() == MODE_RT_RUN) {
592 queue_lock_irqsave(&adaptive_lock, flags); 591 queue_lock_irqsave(&adaptive_lock, flags);
@@ -666,8 +665,6 @@ static noinline void job_completion(struct task_struct *t)
666 * - !is_running(scheduled) // the job blocks 665 * - !is_running(scheduled) // the job blocks
667 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) 666 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall)
668 * - linked != scheduled // we need to reschedule (for any reason) 667 * - linked != scheduled // we need to reschedule (for any reason)
669 * - is_np(scheduled) // rescheduling must be delayed,
670 * sys_exit_np must be requested
671 * 668 *
672 * Any of these can occur together. 669 * Any of these can occur together.
673 */ 670 */
@@ -676,7 +673,7 @@ static int adaptive_schedule(struct task_struct * prev,
676 runqueue_t * rq) 673 runqueue_t * rq)
677{ 674{
678 cpu_entry_t* entry = &__get_cpu_var(adaptive_cpu_entries); 675 cpu_entry_t* entry = &__get_cpu_var(adaptive_cpu_entries);
679 int sleep, preempt, np, exists, 676 int sleep, preempt, exists,
680 rt, blocks; 677 rt, blocks;
681 struct task_struct* linked; 678 struct task_struct* linked;
682 679
@@ -691,7 +688,6 @@ static int adaptive_schedule(struct task_struct * prev,
691 /* (0) Determine state */ 688 /* (0) Determine state */
692 exists = entry->scheduled != NULL; 689 exists = entry->scheduled != NULL;
693 blocks = exists && !is_running(entry->scheduled); 690 blocks = exists && !is_running(entry->scheduled);
694 np = exists && is_np(entry->scheduled);
695 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; 691 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
696 preempt = entry->scheduled != entry->linked; 692 preempt = entry->scheduled != entry->linked;
697 rt = get_rt_mode() == MODE_RT_RUN; 693 rt = get_rt_mode() == MODE_RT_RUN;
@@ -701,21 +697,9 @@ static int adaptive_schedule(struct task_struct * prev,
701 if (blocks) 697 if (blocks)
702 unlink(entry->scheduled); 698 unlink(entry->scheduled);
703 699
704 /* Request a sys_exit_np() call if we would like to preempt but cannot. 700 /* Task wants to sleep -> job is done.
705 * We need to make sure to update the link structure anyway in case
706 * that we are still linked. Multiple calls to request_exit_np() don't
707 * hurt.
708 */
709 if (np && (preempt || sleep)) {
710 unlink(entry->scheduled);
711 request_exit_np(entry->scheduled);
712 }
713
714 /* Any task that is preemptable and either exhausts its execution
715 * budget or wants to sleep completes. We may have to reschedule after
716 * this.
717 */ 701 */
718 if (!np && sleep) 702 if (sleep)
719 job_completion(entry->scheduled); 703 job_completion(entry->scheduled);
720 704
721 /* Stop real-time tasks when we leave real-time mode 705 /* Stop real-time tasks when we leave real-time mode
@@ -737,16 +721,14 @@ static int adaptive_schedule(struct task_struct * prev,
737 /* The final scheduling decision. Do we need to switch for some reason? 721 /* The final scheduling decision. Do we need to switch for some reason?
738 * If linked different from scheduled select linked as next. 722 * If linked different from scheduled select linked as next.
739 */ 723 */
740 if ((!np || blocks) && 724 if (entry->linked != entry->scheduled) {
741 entry->linked != entry->scheduled) {
742 /* Take care of a previously scheduled 725 /* Take care of a previously scheduled
743 * job by taking it out of the Linux runqueue. 726 * job by taking it out of the Linux runqueue.
744 */ 727 */
745 if (entry->scheduled) { 728 if (entry->scheduled)
746 if (prev->array) 729 if (prev->array)
747 /* take it out of the run queue */ 730 /* take it out of the run queue */
748 deactivate_task(prev, rq); 731 deactivate_task(prev, rq);
749 }
750 732
751 /* Schedule a linked job? */ 733 /* Schedule a linked job? */
752 if (entry->linked) { 734 if (entry->linked) {
@@ -842,23 +824,14 @@ static void adaptive_wake_up_task(struct task_struct *task)
842 task->pid, task->time_slice); 824 task->pid, task->time_slice);
843 task->state = TASK_RUNNING; 825 task->state = TASK_RUNNING;
844 826
845 /* We need to take suspensions because of semaphores into 827 if (is_tardy(task)) {
846 * account! If a job resumes after being suspended due to acquiring 828 /* new sporadic release */
847 * a semaphore, it should never be treated as a new job release. 829 edf_release_now(task);
848 */ 830 sched_trace_job_release(task);
849 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
850 set_rt_flags(task, RT_F_RUNNING);
851 } else {
852 if (is_tardy(task)) {
853 /* new sporadic release */
854 edf_release_now(task);
855 sched_trace_job_release(task);
856 }
857 else if (task->time_slice)
858 /* came back in time before deadline
859 */
860 set_rt_flags(task, RT_F_RUNNING);
861 } 831 }
832 else if (task->time_slice)
833 /* came back in time before deadline */
834 set_rt_flags(task, RT_F_RUNNING);
862 835
863 queue_lock_irqsave(&adaptive_lock, flags); 836 queue_lock_irqsave(&adaptive_lock, flags);
864 adaptive_job_arrival(task); 837 adaptive_job_arrival(task);
@@ -895,90 +868,6 @@ static long adaptive_tear_down(struct task_struct * t)
895 return 0; 868 return 0;
896} 869}
897 870
898static long adaptive_pi_block(struct pi_semaphore *sem,
899 struct task_struct *new_waiter)
900{
901 /* This callback has to handle the situation where a new waiter is
902 * added to the wait queue of the semaphore.
903 *
904 * We must check if has a higher priority than the currently
905 * highest-priority task, and then potentially reschedule.
906 */
907
908 BUG_ON(!new_waiter);
909
910 if (edf_higher_prio(new_waiter, sem->hp.task)) {
911 TRACE_TASK(new_waiter, " boosts priority\n");
912 /* called with IRQs disabled */
913 queue_lock(&adaptive_lock);
914 /* store new highest-priority task */
915 sem->hp.task = new_waiter;
916 if (sem->holder) {
917 /* let holder inherit */
918 sem->holder->rt_param.inh_task = new_waiter;
919 unlink(sem->holder);
920 adaptive_job_arrival(sem->holder);
921 }
922 queue_unlock(&adaptive_lock);
923 }
924
925 return 0;
926}
927
928static long adaptive_inherit_priority(struct pi_semaphore *sem,
929 struct task_struct *new_owner)
930{
931 /* We don't need to acquire the adaptive_lock since at the time of this
932 * call new_owner isn't actually scheduled yet (it's still sleeping)
933 * and since the calling function already holds sem->wait.lock, which
934 * prevents concurrent sem->hp.task changes.
935 */
936
937 if (sem->hp.task && sem->hp.task != new_owner) {
938 new_owner->rt_param.inh_task = sem->hp.task;
939 TRACE_TASK(new_owner, "inherited priority from %s/%d\n",
940 sem->hp.task->comm, sem->hp.task->pid);
941 } else
942 TRACE_TASK(new_owner,
943 "cannot inherit priority, "
944 "no higher priority job waits.\n");
945 return 0;
946}
947
948/* This function is called on a semaphore release, and assumes that
949 * the current task is also the semaphore holder.
950 */
951static long adaptive_return_priority(struct pi_semaphore *sem)
952{
953 struct task_struct* t = current;
954 int ret = 0;
955
956 /* Find new highest-priority semaphore task
957 * if holder task is the current hp.task.
958 *
959 * Calling function holds sem->wait.lock.
960 */
961 if (t == sem->hp.task)
962 edf_set_hp_task(sem);
963
964 TRACE_CUR("adaptive_return_priority for lock %p\n", sem);
965
966 if (t->rt_param.inh_task) {
967 /* interrupts already disabled by PI code */
968 queue_lock(&adaptive_lock);
969
970 /* Reset inh_task to NULL. */
971 t->rt_param.inh_task = NULL;
972
973 /* Check if rescheduling is necessary */
974 unlink(t);
975 adaptive_job_arrival(t);
976 queue_unlock(&adaptive_lock);
977 }
978
979 return ret;
980}
981
982static int adaptive_mode_change(int new_mode) 871static int adaptive_mode_change(int new_mode)
983{ 872{
984 unsigned long flags; 873 unsigned long flags;
@@ -1034,9 +923,6 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = {
1034 .mode_change = adaptive_mode_change, \ 923 .mode_change = adaptive_mode_change, \
1035 .wake_up_task = adaptive_wake_up_task, \ 924 .wake_up_task = adaptive_wake_up_task, \
1036 .task_blocks = adaptive_task_blocks, \ 925 .task_blocks = adaptive_task_blocks, \
1037 .inherit_priority = adaptive_inherit_priority, \
1038 .return_priority = adaptive_return_priority, \
1039 .pi_block = adaptive_pi_block \
1040} 926}
1041 927
1042 928
@@ -1045,6 +931,7 @@ sched_plugin_t *__init init_adaptive_plugin(void)
1045 int cpu; 931 int cpu;
1046 cpu_entry_t *entry; 932 cpu_entry_t *entry;
1047 933
934 /* magic values given in the paper */
1048 fc_a = _frac( 102, 1000); 935 fc_a = _frac( 102, 1000);
1049 fc_b = _frac(-1975, 1000); 936 fc_b = _frac(-1975, 1000);
1050 937