aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c106
1 files changed, 96 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dc85ceb90832..6777dc7942a0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -79,6 +79,9 @@
79#include "sched_cpupri.h" 79#include "sched_cpupri.h"
80#include "workqueue_sched.h" 80#include "workqueue_sched.h"
81 81
82#include <litmus/sched_trace.h>
83#include <litmus/trace.h>
84
82#define CREATE_TRACE_POINTS 85#define CREATE_TRACE_POINTS
83#include <trace/events/sched.h> 86#include <trace/events/sched.h>
84 87
@@ -405,6 +408,12 @@ struct rt_rq {
405#endif 408#endif
406}; 409};
407 410
411/* Litmus related fields in a runqueue */
412struct litmus_rq {
413 unsigned long nr_running;
414 struct task_struct *prev;
415};
416
408#ifdef CONFIG_SMP 417#ifdef CONFIG_SMP
409 418
410/* 419/*
@@ -471,6 +480,7 @@ struct rq {
471 480
472 struct cfs_rq cfs; 481 struct cfs_rq cfs;
473 struct rt_rq rt; 482 struct rt_rq rt;
483 struct litmus_rq litmus;
474 484
475#ifdef CONFIG_FAIR_GROUP_SCHED 485#ifdef CONFIG_FAIR_GROUP_SCHED
476 /* list of leaf cfs_rq on this cpu: */ 486 /* list of leaf cfs_rq on this cpu: */
@@ -1840,7 +1850,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1840 1850
1841static const struct sched_class rt_sched_class; 1851static const struct sched_class rt_sched_class;
1842 1852
1843#define sched_class_highest (&rt_sched_class) 1853#define sched_class_highest (&litmus_sched_class)
1844#define for_each_class(class) \ 1854#define for_each_class(class) \
1845 for (class = sched_class_highest; class; class = class->next) 1855 for (class = sched_class_highest; class; class = class->next)
1846 1856
@@ -1920,6 +1930,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1920#include "sched_idletask.c" 1930#include "sched_idletask.c"
1921#include "sched_fair.c" 1931#include "sched_fair.c"
1922#include "sched_rt.c" 1932#include "sched_rt.c"
1933#include "../litmus/sched_litmus.c"
1923#ifdef CONFIG_SCHED_DEBUG 1934#ifdef CONFIG_SCHED_DEBUG
1924# include "sched_debug.c" 1935# include "sched_debug.c"
1925#endif 1936#endif
@@ -2352,6 +2363,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2352 unsigned long en_flags = ENQUEUE_WAKEUP; 2363 unsigned long en_flags = ENQUEUE_WAKEUP;
2353 struct rq *rq; 2364 struct rq *rq;
2354 2365
2366 if (is_realtime(p))
2367 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state);
2368
2355 this_cpu = get_cpu(); 2369 this_cpu = get_cpu();
2356 2370
2357 smp_wmb(); 2371 smp_wmb();
@@ -2366,7 +2380,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2366 orig_cpu = cpu; 2380 orig_cpu = cpu;
2367 2381
2368#ifdef CONFIG_SMP 2382#ifdef CONFIG_SMP
2369 if (unlikely(task_running(rq, p))) 2383 if (unlikely(task_running(rq, p)) || is_realtime(p))
2370 goto out_activate; 2384 goto out_activate;
2371 2385
2372 /* 2386 /*
@@ -2428,6 +2442,8 @@ out_activate:
2428out_running: 2442out_running:
2429 ttwu_post_activation(p, rq, wake_flags, success); 2443 ttwu_post_activation(p, rq, wake_flags, success);
2430out: 2444out:
2445 if (is_realtime(p))
2446 TRACE_TASK(p, "try_to_wake_up() done state:%d\n", p->state);
2431 task_rq_unlock(rq, &flags); 2447 task_rq_unlock(rq, &flags);
2432 put_cpu(); 2448 put_cpu();
2433 2449
@@ -2748,6 +2764,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2748 */ 2764 */
2749 prev_state = prev->state; 2765 prev_state = prev->state;
2750 finish_arch_switch(prev); 2766 finish_arch_switch(prev);
2767 litmus->finish_switch(prev);
2768 prev->rt_param.stack_in_use = NO_CPU;
2751#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 2769#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2752 local_irq_disable(); 2770 local_irq_disable();
2753#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 2771#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -2777,6 +2795,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2777{ 2795{
2778 if (prev->sched_class->pre_schedule) 2796 if (prev->sched_class->pre_schedule)
2779 prev->sched_class->pre_schedule(rq, prev); 2797 prev->sched_class->pre_schedule(rq, prev);
2798
2799 /* LITMUS^RT not very clean hack: we need to save the prev task
2800 * as our scheduling decision rely on it (as we drop the rq lock
2801 * something in prev can change...); there is no way to escape
2802 * this ack apart from modifying pick_nex_task(rq, _prev_) or
2803 * falling back on the previous solution of decoupling
2804 * scheduling decisions
2805 */
2806 rq->litmus.prev = prev;
2780} 2807}
2781 2808
2782/* rq->lock is NOT held, but preemption is disabled */ 2809/* rq->lock is NOT held, but preemption is disabled */
@@ -3578,18 +3605,26 @@ void scheduler_tick(void)
3578 3605
3579 sched_clock_tick(); 3606 sched_clock_tick();
3580 3607
3608 TS_TICK_START(current);
3609
3581 raw_spin_lock(&rq->lock); 3610 raw_spin_lock(&rq->lock);
3582 update_rq_clock(rq); 3611 update_rq_clock(rq);
3583 update_cpu_load_active(rq); 3612 update_cpu_load_active(rq);
3584 curr->sched_class->task_tick(rq, curr, 0); 3613 curr->sched_class->task_tick(rq, curr, 0);
3614
3615 /* litmus_tick may force current to resched */
3616 litmus_tick(rq, curr);
3617
3585 raw_spin_unlock(&rq->lock); 3618 raw_spin_unlock(&rq->lock);
3586 3619
3587 perf_event_task_tick(curr); 3620 perf_event_task_tick(curr);
3588 3621
3589#ifdef CONFIG_SMP 3622#ifdef CONFIG_SMP
3590 rq->idle_at_tick = idle_cpu(cpu); 3623 rq->idle_at_tick = idle_cpu(cpu);
3591 trigger_load_balance(rq, cpu); 3624 if (!is_realtime(current))
3625 trigger_load_balance(rq, cpu);
3592#endif 3626#endif
3627 TS_TICK_END(current);
3593} 3628}
3594 3629
3595notrace unsigned long get_parent_ip(unsigned long addr) 3630notrace unsigned long get_parent_ip(unsigned long addr)
@@ -3716,12 +3751,20 @@ pick_next_task(struct rq *rq)
3716 /* 3751 /*
3717 * Optimization: we know that if all tasks are in 3752 * Optimization: we know that if all tasks are in
3718 * the fair class we can call that function directly: 3753 * the fair class we can call that function directly:
3719 */ 3754
3720 if (likely(rq->nr_running == rq->cfs.nr_running)) { 3755 * NOT IN LITMUS^RT!
3756
3757 * This breaks many assumptions in the plugins.
3758 * Do not uncomment without thinking long and hard
3759 * about how this affects global plugins such as GSN-EDF.
3760
3761 if (rq->nr_running == rq->cfs.nr_running) {
3762 TRACE("taking shortcut in pick_next_task()\n");
3721 p = fair_sched_class.pick_next_task(rq); 3763 p = fair_sched_class.pick_next_task(rq);
3722 if (likely(p)) 3764 if (likely(p))
3723 return p; 3765 return p;
3724 } 3766 }
3767 */
3725 3768
3726 class = sched_class_highest; 3769 class = sched_class_highest;
3727 for ( ; ; ) { 3770 for ( ; ; ) {
@@ -3755,6 +3798,8 @@ need_resched:
3755 3798
3756 release_kernel_lock(prev); 3799 release_kernel_lock(prev);
3757need_resched_nonpreemptible: 3800need_resched_nonpreemptible:
3801 TS_SCHED_START;
3802 sched_trace_task_switch_away(prev);
3758 3803
3759 schedule_debug(prev); 3804 schedule_debug(prev);
3760 3805
@@ -3803,7 +3848,10 @@ need_resched_nonpreemptible:
3803 rq->curr = next; 3848 rq->curr = next;
3804 ++*switch_count; 3849 ++*switch_count;
3805 3850
3851 TS_SCHED_END(next);
3852 TS_CXS_START(next);
3806 context_switch(rq, prev, next); /* unlocks the rq */ 3853 context_switch(rq, prev, next); /* unlocks the rq */
3854 TS_CXS_END(current);
3807 /* 3855 /*
3808 * The context switch have flipped the stack from under us 3856 * The context switch have flipped the stack from under us
3809 * and restored the local variables which were saved when 3857 * and restored the local variables which were saved when
@@ -3812,8 +3860,12 @@ need_resched_nonpreemptible:
3812 */ 3860 */
3813 cpu = smp_processor_id(); 3861 cpu = smp_processor_id();
3814 rq = cpu_rq(cpu); 3862 rq = cpu_rq(cpu);
3815 } else 3863 } else {
3864 TS_SCHED_END(prev);
3816 raw_spin_unlock_irq(&rq->lock); 3865 raw_spin_unlock_irq(&rq->lock);
3866 }
3867
3868 sched_trace_task_switch_to(current);
3817 3869
3818 post_schedule(rq); 3870 post_schedule(rq);
3819 3871
@@ -3823,6 +3875,9 @@ need_resched_nonpreemptible:
3823 preempt_enable_no_resched(); 3875 preempt_enable_no_resched();
3824 if (need_resched()) 3876 if (need_resched())
3825 goto need_resched; 3877 goto need_resched;
3878
3879 if (srp_active())
3880 srp_ceiling_block();
3826} 3881}
3827EXPORT_SYMBOL(schedule); 3882EXPORT_SYMBOL(schedule);
3828 3883
@@ -4108,6 +4163,17 @@ void complete_all(struct completion *x)
4108} 4163}
4109EXPORT_SYMBOL(complete_all); 4164EXPORT_SYMBOL(complete_all);
4110 4165
4166void complete_n(struct completion *x, int n)
4167{
4168 unsigned long flags;
4169
4170 spin_lock_irqsave(&x->wait.lock, flags);
4171 x->done += n;
4172 __wake_up_common(&x->wait, TASK_NORMAL, n, 0, NULL);
4173 spin_unlock_irqrestore(&x->wait.lock, flags);
4174}
4175EXPORT_SYMBOL(complete_n);
4176
4111static inline long __sched 4177static inline long __sched
4112do_wait_for_common(struct completion *x, long timeout, int state) 4178do_wait_for_common(struct completion *x, long timeout, int state)
4113{ 4179{
@@ -4550,7 +4616,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4550 p->normal_prio = normal_prio(p); 4616 p->normal_prio = normal_prio(p);
4551 /* we are holding p->pi_lock already */ 4617 /* we are holding p->pi_lock already */
4552 p->prio = rt_mutex_getprio(p); 4618 p->prio = rt_mutex_getprio(p);
4553 if (rt_prio(p->prio)) 4619 if (p->policy == SCHED_LITMUS)
4620 p->sched_class = &litmus_sched_class;
4621 else if (rt_prio(p->prio))
4554 p->sched_class = &rt_sched_class; 4622 p->sched_class = &rt_sched_class;
4555 else 4623 else
4556 p->sched_class = &fair_sched_class; 4624 p->sched_class = &fair_sched_class;
@@ -4595,7 +4663,7 @@ recheck:
4595 4663
4596 if (policy != SCHED_FIFO && policy != SCHED_RR && 4664 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4597 policy != SCHED_NORMAL && policy != SCHED_BATCH && 4665 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4598 policy != SCHED_IDLE) 4666 policy != SCHED_IDLE && policy != SCHED_LITMUS)
4599 return -EINVAL; 4667 return -EINVAL;
4600 } 4668 }
4601 4669
@@ -4610,6 +4678,8 @@ recheck:
4610 return -EINVAL; 4678 return -EINVAL;
4611 if (rt_policy(policy) != (param->sched_priority != 0)) 4679 if (rt_policy(policy) != (param->sched_priority != 0))
4612 return -EINVAL; 4680 return -EINVAL;
4681 if (policy == SCHED_LITMUS && policy == p->policy)
4682 return -EINVAL;
4613 4683
4614 /* 4684 /*
4615 * Allow unprivileged RT tasks to decrease priority: 4685 * Allow unprivileged RT tasks to decrease priority:
@@ -4650,6 +4720,12 @@ recheck:
4650 return retval; 4720 return retval;
4651 } 4721 }
4652 4722
4723 if (policy == SCHED_LITMUS) {
4724 retval = litmus_admit_task(p);
4725 if (retval)
4726 return retval;
4727 }
4728
4653 /* 4729 /*
4654 * make sure no PI-waiters arrive (or leave) while we are 4730 * make sure no PI-waiters arrive (or leave) while we are
4655 * changing the priority of the task: 4731 * changing the priority of the task:
@@ -4692,10 +4768,19 @@ recheck:
4692 4768
4693 p->sched_reset_on_fork = reset_on_fork; 4769 p->sched_reset_on_fork = reset_on_fork;
4694 4770
4771 if (p->policy == SCHED_LITMUS)
4772 litmus_exit_task(p);
4773
4695 oldprio = p->prio; 4774 oldprio = p->prio;
4696 prev_class = p->sched_class; 4775 prev_class = p->sched_class;
4697 __setscheduler(rq, p, policy, param->sched_priority); 4776 __setscheduler(rq, p, policy, param->sched_priority);
4698 4777
4778 if (policy == SCHED_LITMUS) {
4779 p->rt_param.stack_in_use = running ? rq->cpu : NO_CPU;
4780 p->rt_param.present = running;
4781 litmus->task_new(p, on_rq, running);
4782 }
4783
4699 if (running) 4784 if (running)
4700 p->sched_class->set_curr_task(rq); 4785 p->sched_class->set_curr_task(rq);
4701 if (on_rq) { 4786 if (on_rq) {
@@ -4865,10 +4950,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4865 rcu_read_lock(); 4950 rcu_read_lock();
4866 4951
4867 p = find_process_by_pid(pid); 4952 p = find_process_by_pid(pid);
4868 if (!p) { 4953 /* Don't set affinity if task not found and for LITMUS tasks */
4954 if (!p || is_realtime(p)) {
4869 rcu_read_unlock(); 4955 rcu_read_unlock();
4870 put_online_cpus(); 4956 put_online_cpus();
4871 return -ESRCH; 4957 return p ? -EPERM : -ESRCH;
4872 } 4958 }
4873 4959
4874 /* Prevent p going away */ 4960 /* Prevent p going away */