aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-02-02 12:59:13 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-02-02 12:59:13 -0500
commit4fc10fcc4f4fc5758b1df9d2c9935a2a963e1510 (patch)
tree1993287e0fffc2953252792a3d60c5a56c01ed8e
parentf2d65d2183cdb65f2d716760d9a527837bff44ce (diff)
litmus: get rid of WANT_RESCHED and make dummy functions static
-rw-r--r--include/linux/sched_plugin.h13
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_gsn_edf.c21
-rw-r--r--kernel/sched_plugin.c32
-rw-r--r--kernel/sched_psn_edf.c20
5 files changed, 28 insertions, 61 deletions
diff --git a/include/linux/sched_plugin.h b/include/linux/sched_plugin.h
index 6bc2fc3788..1fcc9d558b 100644
--- a/include/linux/sched_plugin.h
+++ b/include/linux/sched_plugin.h
@@ -34,14 +34,8 @@ typedef void runqueue_t;
34 34
35/********************* scheduler invocation ******************/ 35/********************* scheduler invocation ******************/
36 36
37typedef enum {
38 NO_RESCHED = 0,
39 FORCE_RESCHED = 1
40} reschedule_check_t;
41
42
43/* Plugin-specific realtime tick handler */ 37/* Plugin-specific realtime tick handler */
44typedef reschedule_check_t (*scheduler_tick_t) (void); 38typedef void (*scheduler_tick_t) (void);
45/* Novell make sched decision function */ 39/* Novell make sched decision function */
46typedef int (*schedule_t) (struct task_struct * prev, 40typedef int (*schedule_t) (struct task_struct * prev,
47 struct task_struct ** next, 41 struct task_struct ** next,
@@ -114,11 +108,6 @@ struct sched_plugin {
114 108
115extern struct sched_plugin *curr_sched_plugin; 109extern struct sched_plugin *curr_sched_plugin;
116 110
117
118/* common scheduler tick */
119reschedule_check_t rt_scheduler_tick(void);
120
121
122/* Don't pull in our definitions on top of the real ones 111/* Don't pull in our definitions on top of the real ones
123 * in sched.c! 112 * in sched.c!
124 */ 113 */
diff --git a/kernel/sched.c b/kernel/sched.c
index ec59d583b3..dd38df5d88 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3321,8 +3321,7 @@ void scheduler_tick(void)
3321 * schedule 3321 * schedule
3322 */ 3322 */
3323 TS_PLUGIN_TICK_START; 3323 TS_PLUGIN_TICK_START;
3324 if (curr_sched_plugin->scheduler_tick() == FORCE_RESCHED) 3324 curr_sched_plugin->scheduler_tick();
3325 set_tsk_need_resched(p);
3326 TS_PLUGIN_TICK_END; 3325 TS_PLUGIN_TICK_END;
3327 3326
3328 send_scheduler_signals(); 3327 send_scheduler_signals();
diff --git a/kernel/sched_gsn_edf.c b/kernel/sched_gsn_edf.c
index 423c5468a0..3958cb2f37 100644
--- a/kernel/sched_gsn_edf.c
+++ b/kernel/sched_gsn_edf.c
@@ -333,26 +333,17 @@ static noinline void gsnedf_release_jobs(void)
333 * checks whether the current task has expired and checks 333 * checks whether the current task has expired and checks
334 * whether we need to preempt it if it has not expired 334 * whether we need to preempt it if it has not expired
335 */ 335 */
336static reschedule_check_t gsnedf_scheduler_tick(void) 336static void gsnedf_scheduler_tick(void)
337{ 337{
338 unsigned long flags; 338 unsigned long flags;
339 struct task_struct* t = current; 339 struct task_struct* t = current;
340 reschedule_check_t want_resched = NO_RESCHED;
341
342 /* expire tasks even if not in real-time mode
343 * this makes sure that at the end of real-time mode
344 * no task "runs away forever".
345 */
346 if (is_realtime(t)) {
347 TRACE_CUR("before dec: time_slice == %u\n", t->time_slice);
348 }
349 340
350 if (is_realtime(t) && get_exec_time(t) > get_exec_cost(t)) { 341 if (is_realtime(t) && get_exec_time(t) > get_exec_cost(t)) {
351 if (!is_np(t)) { 342 if (!is_np(t)) {
352 /* np tasks will be preempted when they become 343 /* np tasks will be preempted when they become
353 * preemptable again 344 * preemptable again
354 */ 345 */
355 want_resched = FORCE_RESCHED; 346 set_tsk_need_resched(t);
356 set_will_schedule(); 347 set_will_schedule();
357 TRACE("gsnedf_scheduler_tick: " 348 TRACE("gsnedf_scheduler_tick: "
358 "%d is preemptable " 349 "%d is preemptable "
@@ -369,17 +360,15 @@ static reschedule_check_t gsnedf_scheduler_tick(void)
369 if (smp_processor_id() == 0) { 360 if (smp_processor_id() == 0) {
370 spin_lock_irqsave(&gsnedf_lock, flags); 361 spin_lock_irqsave(&gsnedf_lock, flags);
371 362
372 /* (1) try to release pending jobs */ 363 /* Try to release pending jobs */
373 gsnedf_release_jobs(); 364 gsnedf_release_jobs();
374 365
375 /* we don't need to check linked != scheduled since 366 /* We don't need to check linked != scheduled since
376 * set_tsk_need_resched has been set by preempt() if necessary 367 * set_tsk_need_resched has been set by preempt() if necessary.
377 */ 368 */
378 369
379 spin_unlock_irqrestore(&gsnedf_lock, flags); 370 spin_unlock_irqrestore(&gsnedf_lock, flags);
380 } 371 }
381
382 return want_resched;
383} 372}
384 373
385/* caller holds gsnedf_lock */ 374/* caller holds gsnedf_lock */
diff --git a/kernel/sched_plugin.c b/kernel/sched_plugin.c
index 10abdb5df7..eb236b3a3e 100644
--- a/kernel/sched_plugin.c
+++ b/kernel/sched_plugin.c
@@ -15,63 +15,61 @@
15 * Dummy plugin functions * 15 * Dummy plugin functions *
16 *************************************************************/ 16 *************************************************************/
17 17
18void litmus_dummy_finish_switch(struct task_struct * prev) 18static void litmus_dummy_finish_switch(struct task_struct * prev)
19{ 19{
20} 20}
21 21
22int litmus_dummy_schedule(struct task_struct * prev, 22static int litmus_dummy_schedule(struct task_struct * prev,
23 struct task_struct** next, 23 struct task_struct** next,
24 runqueue_t* q) 24 runqueue_t* q)
25{ 25{
26 return 0; 26 return 0;
27} 27}
28 28
29reschedule_check_t litmus_dummy_scheduler_tick(void) 29static void litmus_dummy_scheduler_tick(void)
30{ 30{
31 return NO_RESCHED;
32} 31}
33 32
34 33static long litmus_dummy_prepare_task(struct task_struct *t)
35long litmus_dummy_prepare_task(struct task_struct *t)
36{ 34{
37 return 0; 35 return 0;
38} 36}
39 37
40void litmus_dummy_wake_up_task(struct task_struct *task) 38static void litmus_dummy_wake_up_task(struct task_struct *task)
41{ 39{
42 printk(KERN_WARNING "task %d: unhandled real-time wake up!\n", 40 printk(KERN_WARNING "task %d: unhandled real-time wake up!\n",
43 task->pid); 41 task->pid);
44} 42}
45 43
46void litmus_dummy_task_blocks(struct task_struct *task) 44static void litmus_dummy_task_blocks(struct task_struct *task)
47{ 45{
48} 46}
49 47
50long litmus_dummy_tear_down(struct task_struct *task) 48static long litmus_dummy_tear_down(struct task_struct *task)
51{ 49{
52 return 0; 50 return 0;
53} 51}
54 52
55long litmus_dummy_sleep_next_period(void) 53static long litmus_dummy_sleep_next_period(void)
56{ 54{
57 return -EPERM; 55 return -ENOSYS;
58} 56}
59 57
60long litmus_dummy_inherit_priority(struct pi_semaphore *sem, 58static long litmus_dummy_inherit_priority(struct pi_semaphore *sem,
61 struct task_struct *new_owner) 59 struct task_struct *new_owner)
62{ 60{
63 return -EPERM; 61 return -ENOSYS;
64} 62}
65 63
66long litmus_dummy_return_priority(struct pi_semaphore *sem) 64static long litmus_dummy_return_priority(struct pi_semaphore *sem)
67{ 65{
68 return -EPERM; 66 return -ENOSYS;
69} 67}
70 68
71long litmus_dummy_pi_block(struct pi_semaphore *sem, 69static long litmus_dummy_pi_block(struct pi_semaphore *sem,
72 struct task_struct *new_waiter) 70 struct task_struct *new_waiter)
73{ 71{
74 return -EPERM; 72 return -ENOSYS;
75} 73}
76 74
77 75
diff --git a/kernel/sched_psn_edf.c b/kernel/sched_psn_edf.c
index 8689c53d98..cfe329506e 100644
--- a/kernel/sched_psn_edf.c
+++ b/kernel/sched_psn_edf.c
@@ -99,11 +99,10 @@ static int psnedf_check_resched(rt_domain_t *edf)
99} 99}
100 100
101 101
102static reschedule_check_t psnedf_scheduler_tick(void) 102static void psnedf_scheduler_tick(void)
103{ 103{
104 unsigned long flags; 104 unsigned long flags;
105 struct task_struct *t = current; 105 struct task_struct *t = current;
106 reschedule_check_t want_resched = NO_RESCHED;
107 rt_domain_t *edf = local_edf; 106 rt_domain_t *edf = local_edf;
108 psnedf_domain_t *pedf = local_pedf; 107 psnedf_domain_t *pedf = local_pedf;
109 108
@@ -113,13 +112,10 @@ static reschedule_check_t psnedf_scheduler_tick(void)
113 */ 112 */
114 BUG_ON(is_realtime(t) && t != pedf->scheduled); 113 BUG_ON(is_realtime(t) && t != pedf->scheduled);
115 114
116 if (is_realtime(t))
117 TRACE("%s/%d was hit by scheduler tick\n", t->comm, t->pid);
118
119 if (is_realtime(t) && get_exec_time(t) > get_exec_cost(t)) { 115 if (is_realtime(t) && get_exec_time(t) > get_exec_cost(t)) {
120 if (!is_np(t)) { 116 if (!is_np(t))
121 want_resched = FORCE_RESCHED; 117 set_tsk_need_resched(t);
122 } else { 118 else {
123 TRACE("psnedf_scheduler_tick: " 119 TRACE("psnedf_scheduler_tick: "
124 "%d is non-preemptable, " 120 "%d is non-preemptable, "
125 "preemption delayed.\n", t->pid); 121 "preemption delayed.\n", t->pid);
@@ -129,13 +125,9 @@ static reschedule_check_t psnedf_scheduler_tick(void)
129 125
130 spin_lock_irqsave(&pedf->lock, flags); 126 spin_lock_irqsave(&pedf->lock, flags);
131 __release_pending(edf); 127 __release_pending(edf);
132 if (want_resched != FORCE_RESCHED && 128 if (edf_preemption_needed(edf, t))
133 edf_preemption_needed(edf, t)) 129 set_tsk_need_resched(t);
134 want_resched = FORCE_RESCHED;
135
136 spin_unlock_irqrestore(&pedf->lock, flags); 130 spin_unlock_irqrestore(&pedf->lock, flags);
137
138 return want_resched;
139} 131}
140 132
141static void job_completion(struct task_struct* t) 133static void job_completion(struct task_struct* t)