diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 24 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c4b6bd5151ff..e29a97235f26 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -86,6 +86,10 @@ | |||
86 | #include <litmus/sched_trace.h> | 86 | #include <litmus/sched_trace.h> |
87 | #include <litmus/trace.h> | 87 | #include <litmus/trace.h> |
88 | 88 | ||
89 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
90 | #include <litmus/litmus_softirq.h> | ||
91 | #endif | ||
92 | |||
89 | static void litmus_tick(struct rq*, struct task_struct*); | 93 | static void litmus_tick(struct rq*, struct task_struct*); |
90 | 94 | ||
91 | /* | 95 | /* |
@@ -2703,8 +2707,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
2703 | unsigned long flags; | 2707 | unsigned long flags; |
2704 | int cpu, success = 0; | 2708 | int cpu, success = 0; |
2705 | 2709 | ||
2706 | if (is_realtime(p)) | 2710 | if (is_realtime(p)) { |
2711 | //WARN_ON(1); | ||
2707 | TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); | 2712 | TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); |
2713 | } | ||
2708 | 2714 | ||
2709 | smp_wmb(); | 2715 | smp_wmb(); |
2710 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 2716 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
@@ -4319,6 +4325,7 @@ pick_next_task(struct rq *rq) | |||
4319 | BUG(); /* the idle class will always have a runnable task */ | 4325 | BUG(); /* the idle class will always have a runnable task */ |
4320 | } | 4326 | } |
4321 | 4327 | ||
4328 | |||
4322 | /* | 4329 | /* |
4323 | * schedule() is the main scheduler function. | 4330 | * schedule() is the main scheduler function. |
4324 | */ | 4331 | */ |
@@ -4434,10 +4441,16 @@ litmus_need_resched_nonpreemptible: | |||
4434 | if (need_resched()) | 4441 | if (need_resched()) |
4435 | goto need_resched; | 4442 | goto need_resched; |
4436 | 4443 | ||
4444 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
4445 | litmus->run_tasklets(prev); | ||
4446 | #endif | ||
4447 | |||
4437 | srp_ceiling_block(); | 4448 | srp_ceiling_block(); |
4438 | } | 4449 | } |
4439 | EXPORT_SYMBOL(schedule); | 4450 | EXPORT_SYMBOL(schedule); |
4440 | 4451 | ||
4452 | |||
4453 | |||
4441 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4454 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4442 | 4455 | ||
4443 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | 4456 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
@@ -4581,6 +4594,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |||
4581 | } | 4594 | } |
4582 | } | 4595 | } |
4583 | 4596 | ||
4597 | |||
4584 | /** | 4598 | /** |
4585 | * __wake_up - wake up threads blocked on a waitqueue. | 4599 | * __wake_up - wake up threads blocked on a waitqueue. |
4586 | * @q: the waitqueue | 4600 | * @q: the waitqueue |
@@ -4756,6 +4770,12 @@ void __sched wait_for_completion(struct completion *x) | |||
4756 | } | 4770 | } |
4757 | EXPORT_SYMBOL(wait_for_completion); | 4771 | EXPORT_SYMBOL(wait_for_completion); |
4758 | 4772 | ||
4773 | void __sched __wait_for_completion_locked(struct completion *x) | ||
4774 | { | ||
4775 | do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | ||
4776 | } | ||
4777 | EXPORT_SYMBOL(__wait_for_completion_locked); | ||
4778 | |||
4759 | /** | 4779 | /** |
4760 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) | 4780 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) |
4761 | * @x: holds the state of this particular completion | 4781 | * @x: holds the state of this particular completion |
@@ -5258,6 +5278,9 @@ recheck: | |||
5258 | if (retval) | 5278 | if (retval) |
5259 | return retval; | 5279 | return retval; |
5260 | } | 5280 | } |
5281 | else if (p->policy == SCHED_LITMUS) { | ||
5282 | litmus_pre_exit_task(p); | ||
5283 | } | ||
5261 | 5284 | ||
5262 | /* | 5285 | /* |
5263 | * make sure no PI-waiters arrive (or leave) while we are | 5286 | * make sure no PI-waiters arrive (or leave) while we are |