aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:35:01 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:35:01 -0400
commit6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (patch)
tree69a05892a41e7f7400fa598ee0bdf8027c8f0fd6 /kernel/sched.c
parente40152ee1e1c7a63f4777791863215e3faa37a86 (diff)
parent7c1ff4c544dd650cceff3cd69a04bcba60856678 (diff)
Merge branch 'master' into wip-merge-2.6.34
Simple merge between master and 2.6.34 with conflicts resolved. This commit does not compile, the following main problems are still unresolved: - spinlock -> raw_spinlock API changes - kfifo API changes - sched_class API changes Conflicts: Makefile arch/x86/include/asm/hw_irq.h arch/x86/include/asm/unistd_32.h arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c106
1 files changed, 96 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c2a54f70ff..5e3c509e0ef 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -78,6 +78,9 @@
78 78
79#include "sched_cpupri.h" 79#include "sched_cpupri.h"
80 80
81#include <litmus/sched_trace.h>
82#include <litmus/trace.h>
83
81#define CREATE_TRACE_POINTS 84#define CREATE_TRACE_POINTS
82#include <trace/events/sched.h> 85#include <trace/events/sched.h>
83 86
@@ -450,6 +453,12 @@ struct rt_rq {
450#endif 453#endif
451}; 454};
452 455
456/* Litmus related fields in a runqueue */
457struct litmus_rq {
458 unsigned long nr_running;
459 struct task_struct *prev;
460};
461
453#ifdef CONFIG_SMP 462#ifdef CONFIG_SMP
454 463
455/* 464/*
@@ -512,6 +521,7 @@ struct rq {
512 521
513 struct cfs_rq cfs; 522 struct cfs_rq cfs;
514 struct rt_rq rt; 523 struct rt_rq rt;
524 struct litmus_rq litmus;
515 525
516#ifdef CONFIG_FAIR_GROUP_SCHED 526#ifdef CONFIG_FAIR_GROUP_SCHED
517 /* list of leaf cfs_rq on this cpu: */ 527 /* list of leaf cfs_rq on this cpu: */
@@ -1833,7 +1843,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1833 1843
1834static const struct sched_class rt_sched_class; 1844static const struct sched_class rt_sched_class;
1835 1845
1836#define sched_class_highest (&rt_sched_class) 1846#define sched_class_highest (&litmus_sched_class)
1837#define for_each_class(class) \ 1847#define for_each_class(class) \
1838 for (class = sched_class_highest; class; class = class->next) 1848 for (class = sched_class_highest; class; class = class->next)
1839 1849
@@ -1932,6 +1942,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1932#include "sched_idletask.c" 1942#include "sched_idletask.c"
1933#include "sched_fair.c" 1943#include "sched_fair.c"
1934#include "sched_rt.c" 1944#include "sched_rt.c"
1945#include "../litmus/sched_litmus.c"
1935#ifdef CONFIG_SCHED_DEBUG 1946#ifdef CONFIG_SCHED_DEBUG
1936# include "sched_debug.c" 1947# include "sched_debug.c"
1937#endif 1948#endif
@@ -2372,6 +2383,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2372 unsigned long flags; 2383 unsigned long flags;
2373 struct rq *rq; 2384 struct rq *rq;
2374 2385
2386 if (is_realtime(p))
2387 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state);
2388
2375 if (!sched_feat(SYNC_WAKEUPS)) 2389 if (!sched_feat(SYNC_WAKEUPS))
2376 wake_flags &= ~WF_SYNC; 2390 wake_flags &= ~WF_SYNC;
2377 2391
@@ -2390,7 +2404,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2390 orig_cpu = cpu; 2404 orig_cpu = cpu;
2391 2405
2392#ifdef CONFIG_SMP 2406#ifdef CONFIG_SMP
2393 if (unlikely(task_running(rq, p))) 2407 if (unlikely(task_running(rq, p)) || is_realtime(p))
2394 goto out_activate; 2408 goto out_activate;
2395 2409
2396 /* 2410 /*
@@ -2497,6 +2511,8 @@ out_running:
2497 } 2511 }
2498#endif 2512#endif
2499out: 2513out:
2514 if (is_realtime(p))
2515 TRACE_TASK(p, "try_to_wake_up() done state:%d\n", p->state);
2500 task_rq_unlock(rq, &flags); 2516 task_rq_unlock(rq, &flags);
2501 put_cpu(); 2517 put_cpu();
2502 2518
@@ -2814,6 +2830,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2814 */ 2830 */
2815 prev_state = prev->state; 2831 prev_state = prev->state;
2816 finish_arch_switch(prev); 2832 finish_arch_switch(prev);
2833 litmus->finish_switch(prev);
2834 prev->rt_param.stack_in_use = NO_CPU;
2817#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 2835#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2818 local_irq_disable(); 2836 local_irq_disable();
2819#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 2837#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -2843,6 +2861,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2843{ 2861{
2844 if (prev->sched_class->pre_schedule) 2862 if (prev->sched_class->pre_schedule)
2845 prev->sched_class->pre_schedule(rq, prev); 2863 prev->sched_class->pre_schedule(rq, prev);
2864
2865 /* LITMUS^RT not very clean hack: we need to save the prev task
2866 * as our scheduling decision rely on it (as we drop the rq lock
2867 * something in prev can change...); there is no way to escape
2868 * this ack apart from modifying pick_nex_task(rq, _prev_) or
2869 * falling back on the previous solution of decoupling
2870 * scheduling decisions
2871 */
2872 rq->litmus.prev = prev;
2846} 2873}
2847 2874
2848/* rq->lock is NOT held, but preemption is disabled */ 2875/* rq->lock is NOT held, but preemption is disabled */
@@ -3520,18 +3547,26 @@ void scheduler_tick(void)
3520 3547
3521 sched_clock_tick(); 3548 sched_clock_tick();
3522 3549
3550 TS_TICK_START(current);
3551
3523 raw_spin_lock(&rq->lock); 3552 raw_spin_lock(&rq->lock);
3524 update_rq_clock(rq); 3553 update_rq_clock(rq);
3525 update_cpu_load(rq); 3554 update_cpu_load(rq);
3526 curr->sched_class->task_tick(rq, curr, 0); 3555 curr->sched_class->task_tick(rq, curr, 0);
3556
3557 /* litmus_tick may force current to resched */
3558 litmus_tick(rq, curr);
3559
3527 raw_spin_unlock(&rq->lock); 3560 raw_spin_unlock(&rq->lock);
3528 3561
3529 perf_event_task_tick(curr); 3562 perf_event_task_tick(curr);
3530 3563
3531#ifdef CONFIG_SMP 3564#ifdef CONFIG_SMP
3532 rq->idle_at_tick = idle_cpu(cpu); 3565 rq->idle_at_tick = idle_cpu(cpu);
3533 trigger_load_balance(rq, cpu); 3566 if (!is_realtime(current))
3567 trigger_load_balance(rq, cpu);
3534#endif 3568#endif
3569 TS_TICK_END(current);
3535} 3570}
3536 3571
3537notrace unsigned long get_parent_ip(unsigned long addr) 3572notrace unsigned long get_parent_ip(unsigned long addr)
@@ -3672,12 +3707,20 @@ pick_next_task(struct rq *rq)
3672 /* 3707 /*
3673 * Optimization: we know that if all tasks are in 3708 * Optimization: we know that if all tasks are in
3674 * the fair class we can call that function directly: 3709 * the fair class we can call that function directly:
3675 */ 3710
3676 if (likely(rq->nr_running == rq->cfs.nr_running)) { 3711 * NOT IN LITMUS^RT!
3712
3713 * This breaks many assumptions in the plugins.
3714 * Do not uncomment without thinking long and hard
3715 * about how this affects global plugins such as GSN-EDF.
3716
3717 if (rq->nr_running == rq->cfs.nr_running) {
3718 TRACE("taking shortcut in pick_next_task()\n");
3677 p = fair_sched_class.pick_next_task(rq); 3719 p = fair_sched_class.pick_next_task(rq);
3678 if (likely(p)) 3720 if (likely(p))
3679 return p; 3721 return p;
3680 } 3722 }
3723 */
3681 3724
3682 class = sched_class_highest; 3725 class = sched_class_highest;
3683 for ( ; ; ) { 3726 for ( ; ; ) {
@@ -3712,6 +3755,8 @@ need_resched:
3712 3755
3713 release_kernel_lock(prev); 3756 release_kernel_lock(prev);
3714need_resched_nonpreemptible: 3757need_resched_nonpreemptible:
3758 TS_SCHED_START;
3759 sched_trace_task_switch_away(prev);
3715 3760
3716 schedule_debug(prev); 3761 schedule_debug(prev);
3717 3762
@@ -3746,15 +3791,22 @@ need_resched_nonpreemptible:
3746 rq->curr = next; 3791 rq->curr = next;
3747 ++*switch_count; 3792 ++*switch_count;
3748 3793
3794 TS_SCHED_END(next);
3795 TS_CXS_START(next);
3749 context_switch(rq, prev, next); /* unlocks the rq */ 3796 context_switch(rq, prev, next); /* unlocks the rq */
3797 TS_CXS_END(current);
3750 /* 3798 /*
3751 * the context switch might have flipped the stack from under 3799 * the context switch might have flipped the stack from under
3752 * us, hence refresh the local variables. 3800 * us, hence refresh the local variables.
3753 */ 3801 */
3754 cpu = smp_processor_id(); 3802 cpu = smp_processor_id();
3755 rq = cpu_rq(cpu); 3803 rq = cpu_rq(cpu);
3756 } else 3804 } else {
3805 TS_SCHED_END(prev);
3757 raw_spin_unlock_irq(&rq->lock); 3806 raw_spin_unlock_irq(&rq->lock);
3807 }
3808
3809 sched_trace_task_switch_to(current);
3758 3810
3759 post_schedule(rq); 3811 post_schedule(rq);
3760 3812
@@ -3767,6 +3819,9 @@ need_resched_nonpreemptible:
3767 preempt_enable_no_resched(); 3819 preempt_enable_no_resched();
3768 if (need_resched()) 3820 if (need_resched())
3769 goto need_resched; 3821 goto need_resched;
3822
3823 if (srp_active())
3824 srp_ceiling_block();
3770} 3825}
3771EXPORT_SYMBOL(schedule); 3826EXPORT_SYMBOL(schedule);
3772 3827
@@ -4043,6 +4098,17 @@ void complete_all(struct completion *x)
4043} 4098}
4044EXPORT_SYMBOL(complete_all); 4099EXPORT_SYMBOL(complete_all);
4045 4100
4101void complete_n(struct completion *x, int n)
4102{
4103 unsigned long flags;
4104
4105 spin_lock_irqsave(&x->wait.lock, flags);
4106 x->done += n;
4107 __wake_up_common(&x->wait, TASK_NORMAL, n, 0, NULL);
4108 spin_unlock_irqrestore(&x->wait.lock, flags);
4109}
4110EXPORT_SYMBOL(complete_n);
4111
4046static inline long __sched 4112static inline long __sched
4047do_wait_for_common(struct completion *x, long timeout, int state) 4113do_wait_for_common(struct completion *x, long timeout, int state)
4048{ 4114{
@@ -4471,7 +4537,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4471 p->normal_prio = normal_prio(p); 4537 p->normal_prio = normal_prio(p);
4472 /* we are holding p->pi_lock already */ 4538 /* we are holding p->pi_lock already */
4473 p->prio = rt_mutex_getprio(p); 4539 p->prio = rt_mutex_getprio(p);
4474 if (rt_prio(p->prio)) 4540 if (p->policy == SCHED_LITMUS)
4541 p->sched_class = &litmus_sched_class;
4542 else if (rt_prio(p->prio))
4475 p->sched_class = &rt_sched_class; 4543 p->sched_class = &rt_sched_class;
4476 else 4544 else
4477 p->sched_class = &fair_sched_class; 4545 p->sched_class = &fair_sched_class;
@@ -4516,7 +4584,7 @@ recheck:
4516 4584
4517 if (policy != SCHED_FIFO && policy != SCHED_RR && 4585 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4518 policy != SCHED_NORMAL && policy != SCHED_BATCH && 4586 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4519 policy != SCHED_IDLE) 4587 policy != SCHED_IDLE && policy != SCHED_LITMUS)
4520 return -EINVAL; 4588 return -EINVAL;
4521 } 4589 }
4522 4590
@@ -4531,6 +4599,8 @@ recheck:
4531 return -EINVAL; 4599 return -EINVAL;
4532 if (rt_policy(policy) != (param->sched_priority != 0)) 4600 if (rt_policy(policy) != (param->sched_priority != 0))
4533 return -EINVAL; 4601 return -EINVAL;
4602 if (policy == SCHED_LITMUS && policy == p->policy)
4603 return -EINVAL;
4534 4604
4535 /* 4605 /*
4536 * Allow unprivileged RT tasks to decrease priority: 4606 * Allow unprivileged RT tasks to decrease priority:
@@ -4585,6 +4655,12 @@ recheck:
4585 return retval; 4655 return retval;
4586 } 4656 }
4587 4657
4658 if (policy == SCHED_LITMUS) {
4659 retval = litmus_admit_task(p);
4660 if (retval)
4661 return retval;
4662 }
4663
4588 /* 4664 /*
4589 * make sure no PI-waiters arrive (or leave) while we are 4665 * make sure no PI-waiters arrive (or leave) while we are
4590 * changing the priority of the task: 4666 * changing the priority of the task:
@@ -4612,10 +4688,19 @@ recheck:
4612 4688
4613 p->sched_reset_on_fork = reset_on_fork; 4689 p->sched_reset_on_fork = reset_on_fork;
4614 4690
4691 if (p->policy == SCHED_LITMUS)
4692 litmus_exit_task(p);
4693
4615 oldprio = p->prio; 4694 oldprio = p->prio;
4616 prev_class = p->sched_class; 4695 prev_class = p->sched_class;
4617 __setscheduler(rq, p, policy, param->sched_priority); 4696 __setscheduler(rq, p, policy, param->sched_priority);
4618 4697
4698 if (policy == SCHED_LITMUS) {
4699 p->rt_param.stack_in_use = running ? rq->cpu : NO_CPU;
4700 p->rt_param.present = running;
4701 litmus->task_new(p, on_rq, running);
4702 }
4703
4619 if (running) 4704 if (running)
4620 p->sched_class->set_curr_task(rq); 4705 p->sched_class->set_curr_task(rq);
4621 if (on_rq) { 4706 if (on_rq) {
@@ -4785,10 +4870,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4785 rcu_read_lock(); 4870 rcu_read_lock();
4786 4871
4787 p = find_process_by_pid(pid); 4872 p = find_process_by_pid(pid);
4788 if (!p) { 4873 /* Don't set affinity if task not found and for LITMUS tasks */
4874 if (!p || is_realtime(p)) {
4789 rcu_read_unlock(); 4875 rcu_read_unlock();
4790 put_online_cpus(); 4876 put_online_cpus();
4791 return -ESRCH; 4877 return p ? -EPERM : -ESRCH;
4792 } 4878 }
4793 4879
4794 /* Prevent p going away */ 4880 /* Prevent p going away */