aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:23:36 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:05:45 -0400
commit4b38febbd59fd33542a343991262119eb9860f5e (patch)
tree1af88a0d354abe344c2c2869631f76a1806d75c3 /kernel
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
[ported from 2008.3] Core LITMUS^RT infrastructure
Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32 litmus_sched_class implements 4 new methods: - prio_changed: void - switched_to: void - get_rr_interval: return infinity (i.e., 0) - select_task_rq: return current cpu
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/sched.c92
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c2
4 files changed, 93 insertions, 10 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 166b8c49257c..889730cce3ad 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -74,6 +74,9 @@
74 74
75#include <trace/events/sched.h> 75#include <trace/events/sched.h>
76 76
77#include <litmus/litmus.h>
78#include <litmus/sched_plugin.h>
79
77/* 80/*
78 * Protected counters by write_lock_irq(&tasklist_lock) 81 * Protected counters by write_lock_irq(&tasklist_lock)
79 */ 82 */
@@ -162,6 +165,7 @@ void __put_task_struct(struct task_struct *tsk)
162 WARN_ON(atomic_read(&tsk->usage)); 165 WARN_ON(atomic_read(&tsk->usage));
163 WARN_ON(tsk == current); 166 WARN_ON(tsk == current);
164 167
168 exit_litmus(tsk);
165 exit_creds(tsk); 169 exit_creds(tsk);
166 delayacct_tsk_free(tsk); 170 delayacct_tsk_free(tsk);
167 171
@@ -244,6 +248,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
244 248
245 tsk->stack = ti; 249 tsk->stack = ti;
246 250
251 /* Don't let the new task be a real-time task. */
252 memset(&tsk->rt_param, 0, sizeof(struct rt_task));
253
247 err = prop_local_init_single(&tsk->dirties); 254 err = prop_local_init_single(&tsk->dirties);
248 if (err) 255 if (err)
249 goto out; 256 goto out;
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c11ae0a948d..fcaed6b96442 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -77,6 +77,9 @@
77 77
78#include "sched_cpupri.h" 78#include "sched_cpupri.h"
79 79
80#include <litmus/sched_trace.h>
81#include <litmus/trace.h>
82
80#define CREATE_TRACE_POINTS 83#define CREATE_TRACE_POINTS
81#include <trace/events/sched.h> 84#include <trace/events/sched.h>
82 85
@@ -571,6 +574,8 @@ struct rq {
571 574
572 atomic_t nr_iowait; 575 atomic_t nr_iowait;
573 576
577 struct task_struct *litmus_next;
578
574#ifdef CONFIG_SMP 579#ifdef CONFIG_SMP
575 struct root_domain *rd; 580 struct root_domain *rd;
576 struct sched_domain *sd; 581 struct sched_domain *sd;
@@ -1815,11 +1820,12 @@ static void calc_load_account_active(struct rq *this_rq);
1815#include "sched_idletask.c" 1820#include "sched_idletask.c"
1816#include "sched_fair.c" 1821#include "sched_fair.c"
1817#include "sched_rt.c" 1822#include "sched_rt.c"
1823#include "../litmus/sched_litmus.c"
1818#ifdef CONFIG_SCHED_DEBUG 1824#ifdef CONFIG_SCHED_DEBUG
1819# include "sched_debug.c" 1825# include "sched_debug.c"
1820#endif 1826#endif
1821 1827
1822#define sched_class_highest (&rt_sched_class) 1828#define sched_class_highest (&litmus_sched_class)
1823#define for_each_class(class) \ 1829#define for_each_class(class) \
1824 for (class = sched_class_highest; class; class = class->next) 1830 for (class = sched_class_highest; class; class = class->next)
1825 1831
@@ -2343,6 +2349,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2343 unsigned long flags; 2349 unsigned long flags;
2344 struct rq *rq, *orig_rq; 2350 struct rq *rq, *orig_rq;
2345 2351
2352 if (is_realtime(p))
2353 TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state);
2354
2346 if (!sched_feat(SYNC_WAKEUPS)) 2355 if (!sched_feat(SYNC_WAKEUPS))
2347 wake_flags &= ~WF_SYNC; 2356 wake_flags &= ~WF_SYNC;
2348 2357
@@ -2361,7 +2370,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2361 orig_cpu = cpu; 2370 orig_cpu = cpu;
2362 2371
2363#ifdef CONFIG_SMP 2372#ifdef CONFIG_SMP
2364 if (unlikely(task_running(rq, p))) 2373 if (unlikely(task_running(rq, p)) || is_realtime(p))
2365 goto out_activate; 2374 goto out_activate;
2366 2375
2367 /* 2376 /*
@@ -2442,6 +2451,8 @@ out_running:
2442 p->sched_class->task_wake_up(rq, p); 2451 p->sched_class->task_wake_up(rq, p);
2443#endif 2452#endif
2444out: 2453out:
2454 if (is_realtime(p))
2455 TRACE_TASK(p, "try_to_wake_up() done state:%d\n", p->state);
2445 task_rq_unlock(rq, &flags); 2456 task_rq_unlock(rq, &flags);
2446 put_cpu(); 2457 put_cpu();
2447 2458
@@ -2750,6 +2761,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2750 */ 2761 */
2751 prev_state = prev->state; 2762 prev_state = prev->state;
2752 finish_arch_switch(prev); 2763 finish_arch_switch(prev);
2764 litmus->finish_switch(prev);
2765 prev->rt_param.stack_in_use = NO_CPU;
2753 perf_event_task_sched_in(current, cpu_of(rq)); 2766 perf_event_task_sched_in(current, cpu_of(rq));
2754 finish_lock_switch(rq, prev); 2767 finish_lock_switch(rq, prev);
2755 2768
@@ -5232,18 +5245,31 @@ void scheduler_tick(void)
5232 5245
5233 sched_clock_tick(); 5246 sched_clock_tick();
5234 5247
5248 TS_TICK_START(current);
5249
5235 spin_lock(&rq->lock); 5250 spin_lock(&rq->lock);
5236 update_rq_clock(rq); 5251 update_rq_clock(rq);
5237 update_cpu_load(rq); 5252 update_cpu_load(rq);
5238 curr->sched_class->task_tick(rq, curr, 0); 5253 curr->sched_class->task_tick(rq, curr, 0);
5254
5255 /*
5256 * LITMUS_TODO: can we move litmus_tick inside task_tick
5257 * or will deadlock ?
5258 */
5259 TS_PLUGIN_TICK_START;
5260 litmus_tick(rq, curr);
5261 TS_PLUGIN_TICK_END;
5262
5239 spin_unlock(&rq->lock); 5263 spin_unlock(&rq->lock);
5240 5264
5241 perf_event_task_tick(curr, cpu); 5265 perf_event_task_tick(curr, cpu);
5242 5266
5243#ifdef CONFIG_SMP 5267#ifdef CONFIG_SMP
5244 rq->idle_at_tick = idle_cpu(cpu); 5268 rq->idle_at_tick = idle_cpu(cpu);
5245 trigger_load_balance(rq, cpu); 5269 if (!is_realtime(current))
5270 trigger_load_balance(rq, cpu);
5246#endif 5271#endif
5272 TS_TICK_END(current);
5247} 5273}
5248 5274
5249notrace unsigned long get_parent_ip(unsigned long addr) 5275notrace unsigned long get_parent_ip(unsigned long addr)
@@ -5387,11 +5413,17 @@ pick_next_task(struct rq *rq)
5387 * Optimization: we know that if all tasks are in 5413 * Optimization: we know that if all tasks are in
5388 * the fair class we can call that function directly: 5414 * the fair class we can call that function directly:
5389 */ 5415 */
5416 /*
5417 * LITMUS_TODO: can we move processes out of fair class?
5418 * i.e., create a litmus_rq
5419 */
5420 /* Don't do this for LITMUS
5390 if (likely(rq->nr_running == rq->cfs.nr_running)) { 5421 if (likely(rq->nr_running == rq->cfs.nr_running)) {
5391 p = fair_sched_class.pick_next_task(rq); 5422 p = fair_sched_class.pick_next_task(rq);
5392 if (likely(p)) 5423 if (likely(p))
5393 return p; 5424 return p;
5394 } 5425 }
5426 */
5395 5427
5396 class = sched_class_highest; 5428 class = sched_class_highest;
5397 for ( ; ; ) { 5429 for ( ; ; ) {
@@ -5426,6 +5458,8 @@ need_resched:
5426 5458
5427 release_kernel_lock(prev); 5459 release_kernel_lock(prev);
5428need_resched_nonpreemptible: 5460need_resched_nonpreemptible:
5461 TS_SCHED_START;
5462 sched_trace_task_switch_away(prev);
5429 5463
5430 schedule_debug(prev); 5464 schedule_debug(prev);
5431 5465
@@ -5436,6 +5470,14 @@ need_resched_nonpreemptible:
5436 update_rq_clock(rq); 5470 update_rq_clock(rq);
5437 clear_tsk_need_resched(prev); 5471 clear_tsk_need_resched(prev);
5438 5472
5473 /*
5474 * LITMUS_TODO: can we integrate litmus_schedule in
5475 * pick_next_task?
5476 */
5477 TS_PLUGIN_SCHED_START;
5478 litmus_schedule(rq, prev);
5479 TS_PLUGIN_SCHED_END;
5480
5439 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 5481 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
5440 if (unlikely(signal_pending_state(prev->state, prev))) 5482 if (unlikely(signal_pending_state(prev->state, prev)))
5441 prev->state = TASK_RUNNING; 5483 prev->state = TASK_RUNNING;
@@ -5460,22 +5502,35 @@ need_resched_nonpreemptible:
5460 rq->curr = next; 5502 rq->curr = next;
5461 ++*switch_count; 5503 ++*switch_count;
5462 5504
5505 TS_SCHED_END(next);
5506 TS_CXS_START(next);
5463 context_switch(rq, prev, next); /* unlocks the rq */ 5507 context_switch(rq, prev, next); /* unlocks the rq */
5508 TS_CXS_END(current);
5464 /* 5509 /*
5465 * the context switch might have flipped the stack from under 5510 * the context switch might have flipped the stack from under
5466 * us, hence refresh the local variables. 5511 * us, hence refresh the local variables.
5467 */ 5512 */
5468 cpu = smp_processor_id(); 5513 cpu = smp_processor_id();
5469 rq = cpu_rq(cpu); 5514 rq = cpu_rq(cpu);
5470 } else 5515 } else {
5516 TS_SCHED_END(prev);
5471 spin_unlock_irq(&rq->lock); 5517 spin_unlock_irq(&rq->lock);
5518 }
5519
5520 TS_SCHED2_START(current);
5521 sched_trace_task_switch_to(current);
5472 5522
5473 post_schedule(rq); 5523 post_schedule(rq);
5474 5524
5475 if (unlikely(reacquire_kernel_lock(current) < 0)) 5525 if (unlikely(reacquire_kernel_lock(current) < 0)) {
5526 TS_SCHED2_END(current);
5476 goto need_resched_nonpreemptible; 5527 goto need_resched_nonpreemptible;
5528 }
5477 5529
5478 preempt_enable_no_resched(); 5530 preempt_enable_no_resched();
5531
5532 TS_SCHED2_END(current);
5533
5479 if (need_resched()) 5534 if (need_resched())
5480 goto need_resched; 5535 goto need_resched;
5481} 5536}
@@ -6185,6 +6240,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
6185 case SCHED_RR: 6240 case SCHED_RR:
6186 p->sched_class = &rt_sched_class; 6241 p->sched_class = &rt_sched_class;
6187 break; 6242 break;
6243 case SCHED_LITMUS:
6244 p->sched_class = &litmus_sched_class;
6245 break;
6188 } 6246 }
6189 6247
6190 p->rt_priority = prio; 6248 p->rt_priority = prio;
@@ -6232,7 +6290,7 @@ recheck:
6232 6290
6233 if (policy != SCHED_FIFO && policy != SCHED_RR && 6291 if (policy != SCHED_FIFO && policy != SCHED_RR &&
6234 policy != SCHED_NORMAL && policy != SCHED_BATCH && 6292 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
6235 policy != SCHED_IDLE) 6293 policy != SCHED_IDLE && policy != SCHED_LITMUS)
6236 return -EINVAL; 6294 return -EINVAL;
6237 } 6295 }
6238 6296
@@ -6247,6 +6305,8 @@ recheck:
6247 return -EINVAL; 6305 return -EINVAL;
6248 if (rt_policy(policy) != (param->sched_priority != 0)) 6306 if (rt_policy(policy) != (param->sched_priority != 0))
6249 return -EINVAL; 6307 return -EINVAL;
6308 if (policy == SCHED_LITMUS && policy == p->policy)
6309 return -EINVAL;
6250 6310
6251 /* 6311 /*
6252 * Allow unprivileged RT tasks to decrease priority: 6312 * Allow unprivileged RT tasks to decrease priority:
@@ -6301,6 +6361,12 @@ recheck:
6301 return retval; 6361 return retval;
6302 } 6362 }
6303 6363
6364 if (policy == SCHED_LITMUS) {
6365 retval = litmus_admit_task(p);
6366 if (retval)
6367 return retval;
6368 }
6369
6304 /* 6370 /*
6305 * make sure no PI-waiters arrive (or leave) while we are 6371 * make sure no PI-waiters arrive (or leave) while we are
6306 * changing the priority of the task: 6372 * changing the priority of the task:
@@ -6328,9 +6394,18 @@ recheck:
6328 6394
6329 p->sched_reset_on_fork = reset_on_fork; 6395 p->sched_reset_on_fork = reset_on_fork;
6330 6396
6397 if (p->policy == SCHED_LITMUS)
6398 litmus_exit_task(p);
6399
6331 oldprio = p->prio; 6400 oldprio = p->prio;
6332 __setscheduler(rq, p, policy, param->sched_priority); 6401 __setscheduler(rq, p, policy, param->sched_priority);
6333 6402
6403 if (policy == SCHED_LITMUS) {
6404 p->rt_param.stack_in_use = running ? rq->cpu : NO_CPU;
6405 p->rt_param.present = running;
6406 litmus->task_new(p, on_rq, running);
6407 }
6408
6334 if (running) 6409 if (running)
6335 p->sched_class->set_curr_task(rq); 6410 p->sched_class->set_curr_task(rq);
6336 if (on_rq) { 6411 if (on_rq) {
@@ -6500,10 +6575,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6500 read_lock(&tasklist_lock); 6575 read_lock(&tasklist_lock);
6501 6576
6502 p = find_process_by_pid(pid); 6577 p = find_process_by_pid(pid);
6503 if (!p) { 6578 /* Don't set affinity if task not found and for LITMUS tasks */
6579 if (!p || is_realtime(p)) {
6504 read_unlock(&tasklist_lock); 6580 read_unlock(&tasklist_lock);
6505 put_online_cpus(); 6581 put_online_cpus();
6506 return -ESRCH; 6582 return p ? -EPERM : -ESRCH;
6507 } 6583 }
6508 6584
6509 /* 6585 /*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 37087a7fac22..ef43ff95999d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1598,7 +1598,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1598 1598
1599 update_curr(cfs_rq); 1599 update_curr(cfs_rq);
1600 1600
1601 if (unlikely(rt_prio(p->prio))) { 1601 if (unlikely(rt_prio(p->prio)) || p->policy == SCHED_LITMUS) {
1602 resched_task(curr); 1602 resched_task(curr);
1603 return; 1603 return;
1604 } 1604 }
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a4d790cddb19..f622880e918f 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1004,7 +1004,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1004 */ 1004 */
1005static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1005static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1006{ 1006{
1007 if (p->prio < rq->curr->prio) { 1007 if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS) {
1008 resched_task(rq->curr); 1008 resched_task(rq->curr);
1009 return; 1009 return;
1010 } 1010 }