aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern Brandenburg <bb@DS-12.(none)>2007-02-05 14:37:19 -0500
committerBjoern Brandenburg <bb@DS-12.(none)>2007-02-05 14:37:19 -0500
commit8370c02df9081469ce07a1689a9fb1c970d166a3 (patch)
treec84b85428740df049fa1fc9580efb3e8637080a1 /kernel
parente82442320ac062ac83149cd37daeb746f6f25b2c (diff)
Fix a bunch of bugs that crept in while porting.
* Our old clone flag is already taken in 2.6.20. * Fix wrong is_running() macro. * Remove double ->finish_switch() call. * Move sched_trace_scheduled to non-preemtible section. * Allow next = idle task in RT mode.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/litmus.c4
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/sched_global_edf.c10
3 files changed, 16 insertions, 23 deletions
diff --git a/kernel/litmus.c b/kernel/litmus.c
index faf82931f5..04a28159e1 100644
--- a/kernel/litmus.c
+++ b/kernel/litmus.c
@@ -82,6 +82,8 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, rt_param_t __user * param)
82 struct task_struct *target; 82 struct task_struct *target;
83 int retval = -EINVAL; 83 int retval = -EINVAL;
84 84
85 printk("Setting up rt task parameters for process %d.\n", pid);
86
85 if (pid < 0 || param == 0) { 87 if (pid < 0 || param == 0) {
86 goto out; 88 goto out;
87 } 89 }
@@ -253,7 +255,7 @@ reschedule_check_t __sched rt_scheduler_tick(void)
253 255
254 /* update rt start time */ 256 /* update rt start time */
255 rt_start_time = jiffies; 257 rt_start_time = jiffies;
256 TRACE("Real-Time mode enabled\n"); 258 printk(KERN_INFO "Real-Time mode enabled\n");
257 if (curr_sched_plugin->mode_change) 259 if (curr_sched_plugin->mode_change)
258 curr_sched_plugin-> 260 curr_sched_plugin->
259 mode_change(atomic_read(&new_mode)); 261 mode_change(atomic_read(&new_mode));
diff --git a/kernel/sched.c b/kernel/sched.c
index 784a97fe33..308e624b43 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1679,10 +1679,10 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1679 unsigned long flags; 1679 unsigned long flags;
1680 int this_cpu, cpu; 1680 int this_cpu, cpu;
1681 1681
1682 if(clone_flags & CLONE_REALTIME) { 1682 if (clone_flags & CLONE_REALTIME) {
1683 /* just mark the task as stopped */ 1683 /* just mark the task as stopped */
1684 /* CLEANUP: Do we have to remove the task from the rq? */ 1684 /* CLEANUP: Do we have to remove the task from the rq? */
1685 p->state=TASK_STOPPED; 1685 p->state = TASK_STOPPED;
1686 return; 1686 return;
1687 } 1687 }
1688 1688
@@ -1848,6 +1848,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1848 */ 1848 */
1849 if (is_realtime(prev) && is_running(prev)) 1849 if (is_realtime(prev) && is_running(prev))
1850 curr_sched_plugin->finish_switch(prev); 1850 curr_sched_plugin->finish_switch(prev);
1851 sched_trace_task_scheduled(current);
1851 finish_lock_switch(rq, prev); 1852 finish_lock_switch(rq, prev);
1852 if (mm) 1853 if (mm)
1853 mmdrop(mm); 1854 mmdrop(mm);
@@ -1870,9 +1871,6 @@ asmlinkage void schedule_tail(struct task_struct *prev)
1870{ 1871{
1871 struct rq *rq = this_rq(); 1872 struct rq *rq = this_rq();
1872 1873
1873 if (is_realtime(prev) && is_running(prev))
1874 curr_sched_plugin->finish_switch(prev);
1875
1876 finish_task_switch(rq, prev); 1874 finish_task_switch(rq, prev);
1877#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1875#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1878 /* In this case, finish_task_switch does not reenable preemption */ 1876 /* In this case, finish_task_switch does not reenable preemption */
@@ -1880,8 +1878,6 @@ asmlinkage void schedule_tail(struct task_struct *prev)
1880#endif 1878#endif
1881 if (current->set_child_tid) 1879 if (current->set_child_tid)
1882 put_user(current->pid, current->set_child_tid); 1880 put_user(current->pid, current->set_child_tid);
1883
1884 sched_trace_task_scheduled(current);
1885} 1881}
1886 1882
1887/* 1883/*
@@ -3591,12 +3587,13 @@ need_resched_nonpreemptible:
3591 goto switch_tasks; 3587 goto switch_tasks;
3592 3588
3593 cpu = smp_processor_id(); 3589 cpu = smp_processor_id();
3594 /* only load-balance if we are not in RT mode 3590 if (unlikely(!rq->nr_running)) {
3595 * TODO: Maybe this can be relaxed by modifiying the load-balancing 3591 /* only load-balance if we are not in RT mode
3596 * routines in such a way that they never touch real-time tasks. 3592 * TODO: Maybe this can be relaxed by modifiying the load-balancing
3597 */ 3593 * routines in such a way that they never touch real-time tasks.
3598 if (unlikely(!rq->nr_running) && get_rt_mode() == MODE_NON_RT) { 3594 */
3599 idle_balance(cpu, rq); 3595 if (get_rt_mode() == MODE_NON_RT)
3596 idle_balance(cpu, rq);
3600 if (!rq->nr_running) { 3597 if (!rq->nr_running) {
3601 next = rq->idle; 3598 next = rq->idle;
3602 rq->expired_timestamp = 0; 3599 rq->expired_timestamp = 0;
@@ -3684,8 +3681,6 @@ switch_tasks:
3684 preempt_enable_no_resched(); 3681 preempt_enable_no_resched();
3685 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 3682 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3686 goto need_resched; 3683 goto need_resched;
3687
3688 sched_trace_task_scheduled(current);
3689} 3684}
3690EXPORT_SYMBOL(schedule); 3685EXPORT_SYMBOL(schedule);
3691 3686
diff --git a/kernel/sched_global_edf.c b/kernel/sched_global_edf.c
index 0b761c4230..a37709a561 100644
--- a/kernel/sched_global_edf.c
+++ b/kernel/sched_global_edf.c
@@ -197,10 +197,6 @@ static int gedf_schedule(struct task_struct * prev,
197 if (get_rt_mode() == MODE_RT_RUN) { 197 if (get_rt_mode() == MODE_RT_RUN) {
198 write_lock_irqsave(&gedf.ready_lock, flags); 198 write_lock_irqsave(&gedf.ready_lock, flags);
199 199
200 if (is_realtime(prev))
201 DUMP(" [%d] is_realtime(prev) = %d\n",
202 smp_processor_id(), is_realtime(prev));
203
204 clear_will_schedule(); 200 clear_will_schedule();
205 201
206 if (is_realtime(prev) && is_released(prev) && is_running(prev) 202 if (is_realtime(prev) && is_released(prev) && is_running(prev)
@@ -260,7 +256,7 @@ static int gedf_schedule(struct task_struct * prev,
260 */ 256 */
261static void gedf_finish_switch(struct task_struct *prev) 257static void gedf_finish_switch(struct task_struct *prev)
262{ 258{
263 DUMP("finish switch for %d\n", prev->pid); 259 printk(KERN_INFO "gedf finish switch for %d\n", prev->pid);
264 if (get_rt_flags(prev) == RT_F_SLEEP || 260 if (get_rt_flags(prev) == RT_F_SLEEP ||
265 get_rt_mode() != MODE_RT_RUN) { 261 get_rt_mode() != MODE_RT_RUN) {
266 /* this task has expired 262 /* this task has expired
@@ -386,7 +382,7 @@ static int gedf_mode_change(int new_mode)
386 int cpu; 382 int cpu;
387 cpu_entry_t *entry; 383 cpu_entry_t *entry;
388 384
389 TRACE("[%d] global edf: mode changed to %d\n", smp_processor_id(), 385 printk(KERN_INFO "[%d] global edf: mode changed to %d\n", smp_processor_id(),
390 new_mode); 386 new_mode);
391 if (new_mode == MODE_RT_RUN) { 387 if (new_mode == MODE_RT_RUN) {
392 prepare_new_releases(&gedf, jiffies + 10); 388 prepare_new_releases(&gedf, jiffies + 10);
@@ -413,7 +409,7 @@ static int gedf_mode_change(int new_mode)
413 } 409 }
414 spin_unlock(&gedf_cpu_lock); 410 spin_unlock(&gedf_cpu_lock);
415 } 411 }
416 TRACE("[%d] global edf: mode change done\n", smp_processor_id()); 412 printk(KERN_INFO "[%d] global edf: mode change done\n", smp_processor_id());
417 return 0; 413 return 0;
418} 414}
419 415