aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-02-13 14:13:15 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-02-13 14:13:15 -0500
commit8ce9b0cb97d9266b3b64b2b57835e17f6e03f585 (patch)
treea6ef1acaf9c9dc116ccc9f24f5233fa7d25cd426 /kernel
parent49914084e797530d9baaf51df9eda77babc98fa8 (diff)
LITMUS 2008: Initial Port
This introduces the core changes ported from LITMUS 2007. The kernel seems to work under QEMU, but many bugs probably remain.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c2
5 files changed, 42 insertions, 5 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 549c0558ba..bc313b74a1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -52,6 +52,8 @@
52 52
53extern void sem_exit (void); 53extern void sem_exit (void);
54 54
55extern void exit_od_table(struct task_struct* t);
56
55static void exit_mm(struct task_struct * tsk); 57static void exit_mm(struct task_struct * tsk);
56 58
57static void __unhash_process(struct task_struct *p) 59static void __unhash_process(struct task_struct *p)
@@ -987,6 +989,8 @@ fastcall NORET_TYPE void do_exit(long code)
987 if (unlikely(tsk->audit_context)) 989 if (unlikely(tsk->audit_context))
988 audit_free(tsk); 990 audit_free(tsk);
989 991
992 exit_od_table(tsk);
993
990 tsk->exit_code = code; 994 tsk->exit_code = code;
991 taskstats_exit(tsk, group_dead); 995 taskstats_exit(tsk, group_dead);
992 996
diff --git a/kernel/fork.c b/kernel/fork.c
index 8dd8ff2810..9e42d3a207 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -59,6 +59,9 @@
59#include <asm/cacheflush.h> 59#include <asm/cacheflush.h>
60#include <asm/tlbflush.h> 60#include <asm/tlbflush.h>
61 61
62#include <litmus/litmus.h>
63#include <litmus/sched_plugin.h>
64
62/* 65/*
63 * Protected counters by write_lock_irq(&tasklist_lock) 66 * Protected counters by write_lock_irq(&tasklist_lock)
64 */ 67 */
@@ -121,6 +124,8 @@ void __put_task_struct(struct task_struct *tsk)
121 WARN_ON(atomic_read(&tsk->usage)); 124 WARN_ON(atomic_read(&tsk->usage));
122 WARN_ON(tsk == current); 125 WARN_ON(tsk == current);
123 126
127 exit_litmus(tsk);
128
124 security_task_free(tsk); 129 security_task_free(tsk);
125 free_uid(tsk->user); 130 free_uid(tsk->user);
126 put_group_info(tsk->group_info); 131 put_group_info(tsk->group_info);
diff --git a/kernel/sched.c b/kernel/sched.c
index e76b11ca6d..4890a12786 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -324,6 +324,8 @@ struct rq {
324 324
325 atomic_t nr_iowait; 325 atomic_t nr_iowait;
326 326
327 struct task_struct* litmus_next;
328
327#ifdef CONFIG_SMP 329#ifdef CONFIG_SMP
328 struct sched_domain *sd; 330 struct sched_domain *sd;
329 331
@@ -875,11 +877,12 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
875#include "sched_idletask.c" 877#include "sched_idletask.c"
876#include "sched_fair.c" 878#include "sched_fair.c"
877#include "sched_rt.c" 879#include "sched_rt.c"
880#include "../litmus/sched_litmus.c"
878#ifdef CONFIG_SCHED_DEBUG 881#ifdef CONFIG_SCHED_DEBUG
879# include "sched_debug.c" 882# include "sched_debug.c"
880#endif 883#endif
881 884
882#define sched_class_highest (&rt_sched_class) 885#define sched_class_highest (&litmus_sched_class)
883 886
884/* 887/*
885 * Update delta_exec, delta_fair fields for rq. 888 * Update delta_exec, delta_fair fields for rq.
@@ -1529,7 +1532,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1529 this_cpu = smp_processor_id(); 1532 this_cpu = smp_processor_id();
1530 1533
1531#ifdef CONFIG_SMP 1534#ifdef CONFIG_SMP
1532 if (unlikely(task_running(rq, p))) 1535 if (unlikely(task_running(rq, p) || is_realtime(p)))
1533 goto out_activate; 1536 goto out_activate;
1534 1537
1535 new_cpu = cpu; 1538 new_cpu = cpu;
@@ -1890,6 +1893,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1890 */ 1893 */
1891 prev_state = prev->state; 1894 prev_state = prev->state;
1892 finish_arch_switch(prev); 1895 finish_arch_switch(prev);
1896 litmus->finish_switch(prev);
1893 finish_lock_switch(rq, prev); 1897 finish_lock_switch(rq, prev);
1894 fire_sched_in_preempt_notifiers(current); 1898 fire_sched_in_preempt_notifiers(current);
1895 if (mm) 1899 if (mm)
@@ -3491,6 +3495,7 @@ void scheduler_tick(void)
3491 update_cpu_load(rq); 3495 update_cpu_load(rq);
3492 if (curr != rq->idle) /* FIXME: needed? */ 3496 if (curr != rq->idle) /* FIXME: needed? */
3493 curr->sched_class->task_tick(rq, curr); 3497 curr->sched_class->task_tick(rq, curr);
3498 litmus_tick(rq, curr);
3494 spin_unlock(&rq->lock); 3499 spin_unlock(&rq->lock);
3495 3500
3496#ifdef CONFIG_SMP 3501#ifdef CONFIG_SMP
@@ -3641,6 +3646,10 @@ need_resched_nonpreemptible:
3641 */ 3646 */
3642 local_irq_disable(); 3647 local_irq_disable();
3643 __update_rq_clock(rq); 3648 __update_rq_clock(rq);
3649 /* do litmus scheduling outside of rq lock, so that we
3650 * can do proper migrations for global schedulers
3651 */
3652 litmus_schedule(rq, prev);
3644 spin_lock(&rq->lock); 3653 spin_lock(&rq->lock);
3645 clear_tsk_need_resched(prev); 3654 clear_tsk_need_resched(prev);
3646 3655
@@ -4236,6 +4245,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4236 case SCHED_RR: 4245 case SCHED_RR:
4237 p->sched_class = &rt_sched_class; 4246 p->sched_class = &rt_sched_class;
4238 break; 4247 break;
4248 case SCHED_LITMUS:
4249 p->sched_class = &litmus_sched_class;
4250 break;
4239 } 4251 }
4240 4252
4241 p->rt_priority = prio; 4253 p->rt_priority = prio;
@@ -4268,7 +4280,7 @@ recheck:
4268 policy = oldpolicy = p->policy; 4280 policy = oldpolicy = p->policy;
4269 else if (policy != SCHED_FIFO && policy != SCHED_RR && 4281 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
4270 policy != SCHED_NORMAL && policy != SCHED_BATCH && 4282 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4271 policy != SCHED_IDLE) 4283 policy != SCHED_IDLE && policy != SCHED_LITMUS)
4272 return -EINVAL; 4284 return -EINVAL;
4273 /* 4285 /*
4274 * Valid priorities for SCHED_FIFO and SCHED_RR are 4286 * Valid priorities for SCHED_FIFO and SCHED_RR are
@@ -4282,6 +4294,9 @@ recheck:
4282 if (rt_policy(policy) != (param->sched_priority != 0)) 4294 if (rt_policy(policy) != (param->sched_priority != 0))
4283 return -EINVAL; 4295 return -EINVAL;
4284 4296
4297 if (policy == SCHED_LITMUS && policy == p->policy)
4298 return -EINVAL;
4299
4285 /* 4300 /*
4286 * Allow unprivileged RT tasks to decrease priority: 4301 * Allow unprivileged RT tasks to decrease priority:
4287 */ 4302 */
@@ -4316,6 +4331,12 @@ recheck:
4316 return -EPERM; 4331 return -EPERM;
4317 } 4332 }
4318 4333
4334 if (policy == SCHED_LITMUS) {
4335 retval = litmus_admit_task(p);
4336 if (retval)
4337 return retval;
4338 }
4339
4319 retval = security_task_setscheduler(p, policy, param); 4340 retval = security_task_setscheduler(p, policy, param);
4320 if (retval) 4341 if (retval)
4321 return retval; 4342 return retval;
@@ -4345,9 +4366,15 @@ recheck:
4345 p->sched_class->put_prev_task(rq, p); 4366 p->sched_class->put_prev_task(rq, p);
4346 } 4367 }
4347 4368
4369 if (p->policy == SCHED_LITMUS)
4370 litmus_exit_task(p);
4371
4348 oldprio = p->prio; 4372 oldprio = p->prio;
4349 __setscheduler(rq, p, policy, param->sched_priority); 4373 __setscheduler(rq, p, policy, param->sched_priority);
4350 4374
4375 if (policy == SCHED_LITMUS)
4376 litmus->task_new(p, on_rq, running);
4377
4351 if (on_rq) { 4378 if (on_rq) {
4352 if (running) 4379 if (running)
4353 p->sched_class->set_curr_task(rq); 4380 p->sched_class->set_curr_task(rq);
@@ -4364,6 +4391,7 @@ recheck:
4364 check_preempt_curr(rq, p); 4391 check_preempt_curr(rq, p);
4365 } 4392 }
4366 } 4393 }
4394
4367 __task_rq_unlock(rq); 4395 __task_rq_unlock(rq);
4368 spin_unlock_irqrestore(&p->pi_lock, flags); 4396 spin_unlock_irqrestore(&p->pi_lock, flags);
4369 4397
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index da7c061e72..de30496263 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -845,7 +845,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
845 struct sched_entity *se = &curr->se, *pse = &p->se; 845 struct sched_entity *se = &curr->se, *pse = &p->se;
846 unsigned long gran; 846 unsigned long gran;
847 847
848 if (unlikely(rt_prio(p->prio))) { 848 if (unlikely(rt_prio(p->prio) || p->policy == SCHED_LITMUS)) {
849 update_rq_clock(rq); 849 update_rq_clock(rq);
850 update_curr(cfs_rq); 850 update_curr(cfs_rq);
851 resched_task(curr); 851 resched_task(curr);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9ba3daa034..c7c938cee2 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -70,7 +70,7 @@ yield_task_rt(struct rq *rq)
70 */ 70 */
71static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) 71static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
72{ 72{
73 if (p->prio < rq->curr->prio) 73 if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS)
74 resched_task(rq->curr); 74 resched_task(rq->curr);
75} 75}
76 76