aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-10 16:10:29 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-10 16:10:29 -0400
commit47f0c01efae04bdcc370ec2ba6f3d7607c8cbcaa (patch)
treea637d8091beb741cb02edbb9cb43cd0a9296165a
parentaa8383edcae1c5709e42528b7f4845f5862636a6 (diff)
Some random code cleanup.
- IRQs are disabled in _schedule - use TRACE - coding style
-rw-r--r--kernel/sched_part_edf.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/kernel/sched_part_edf.c b/kernel/sched_part_edf.c
index 792e022833..cd4dd0e621 100644
--- a/kernel/sched_part_edf.c
+++ b/kernel/sched_part_edf.c
@@ -26,7 +26,7 @@ typedef struct {
26#define local_pedf (&__get_cpu_var(part_edf_domains)) 26#define local_pedf (&__get_cpu_var(part_edf_domains))
27#define remote_edf(cpu) (&per_cpu(part_edf_domains, cpu).domain) 27#define remote_edf(cpu) (&per_cpu(part_edf_domains, cpu).domain)
28#define remote_pedf(cpu) (&per_cpu(part_edf_domains, cpu)) 28#define remote_pedf(cpu) (&per_cpu(part_edf_domains, cpu))
29 29#define task_edf(task) remote_edf(get_partition(task))
30 30
31static void part_edf_domain_init(part_edf_domain_t* pedf, 31static void part_edf_domain_init(part_edf_domain_t* pedf,
32 edf_check_resched_needed_t check, 32 edf_check_resched_needed_t check,
@@ -71,7 +71,14 @@ static reschedule_check_t part_edf_scheduler_tick(void)
71 unsigned long flags; 71 unsigned long flags;
72 struct task_struct *t = current; 72 struct task_struct *t = current;
73 reschedule_check_t want_resched = NO_RESCHED; 73 reschedule_check_t want_resched = NO_RESCHED;
74 edf_domain_t *edf = local_edf; 74 edf_domain_t *edf = local_edf;
75 part_edf_domain_t *pedf = local_pedf;
76
77 /* Check for inconsistency. We don't need the lock for this since
78 * ->scheduled is only changed in schedule, which obviously is not
79 * executing in parallel on this CPU
80 */
81 BUG_ON(is_realtime(t) && t != pedf->scheduled);
75 82
76 /* expire tasks even if not in real-time mode 83 /* expire tasks even if not in real-time mode
77 * this makes sure that at the end of real-time mode 84 * this makes sure that at the end of real-time mode
@@ -81,7 +88,6 @@ static reschedule_check_t part_edf_scheduler_tick(void)
81 /* this task has exhausted its budget in this period */ 88 /* this task has exhausted its budget in this period */
82 set_rt_flags(t, RT_F_SLEEP); 89 set_rt_flags(t, RT_F_SLEEP);
83 want_resched = FORCE_RESCHED; 90 want_resched = FORCE_RESCHED;
84 /*set_will_schedule();*/
85 } 91 }
86 if (get_rt_mode() == MODE_RT_RUN) 92 if (get_rt_mode() == MODE_RT_RUN)
87 { 93 {
@@ -95,10 +101,7 @@ static reschedule_check_t part_edf_scheduler_tick(void)
95 { 101 {
96 read_lock_irqsave(&edf->ready_lock, flags); 102 read_lock_irqsave(&edf->ready_lock, flags);
97 if (preemption_needed(edf, t)) 103 if (preemption_needed(edf, t))
98 {
99 want_resched = FORCE_RESCHED; 104 want_resched = FORCE_RESCHED;
100 /*set_will_schedule();*/
101 }
102 read_unlock_irqrestore(&edf->ready_lock, flags); 105 read_unlock_irqrestore(&edf->ready_lock, flags);
103 } 106 }
104 } 107 }
@@ -110,7 +113,6 @@ static int part_edf_schedule(struct task_struct * prev,
110 runqueue_t * rq) 113 runqueue_t * rq)
111{ 114{
112 int need_deactivate = 1; 115 int need_deactivate = 1;
113 unsigned long flags;
114 part_edf_domain_t* pedf = local_pedf; 116 part_edf_domain_t* pedf = local_pedf;
115 edf_domain_t* edf = &pedf->domain; 117 edf_domain_t* edf = &pedf->domain;
116 118
@@ -119,8 +121,7 @@ static int part_edf_schedule(struct task_struct * prev,
119 prepare_for_next_period(prev); 121 prepare_for_next_period(prev);
120 122
121 if (get_rt_mode() == MODE_RT_RUN) { 123 if (get_rt_mode() == MODE_RT_RUN) {
122 write_lock_irqsave(&edf->ready_lock, flags); 124 write_lock(&edf->ready_lock);
123 /*clear_will_schedule();*/
124 if (is_realtime(prev) && is_released(prev) && is_running(prev) 125 if (is_realtime(prev) && is_released(prev) && is_running(prev)
125 && !preemption_needed(edf, prev)) { 126 && !preemption_needed(edf, prev)) {
126 /* this really should only happen if the task has 127 /* this really should only happen if the task has
@@ -144,7 +145,7 @@ static int part_edf_schedule(struct task_struct * prev,
144 if (*next) 145 if (*next)
145 set_rt_flags(*next, RT_F_RUNNING); 146 set_rt_flags(*next, RT_F_RUNNING);
146 147
147 write_unlock_irqrestore(&edf->ready_lock, flags); 148 write_unlock(&edf->ready_lock);
148 } 149 }
149 150
150 if (is_realtime(prev) && need_deactivate && prev->array) { 151 if (is_realtime(prev) && need_deactivate && prev->array) {
@@ -194,10 +195,10 @@ static void part_edf_finish_switch(struct task_struct *prev)
194 */ 195 */
195static long part_edf_prepare_task(struct task_struct * t) 196static long part_edf_prepare_task(struct task_struct * t)
196{ 197{
197 edf_domain_t* edf = remote_edf(get_partition(t)); 198 edf_domain_t* edf = task_edf(t);
198 199
199 200
200 printk(KERN_WARNING "[%d] part edf: prepare task %d on CPU %d\n", 201 TRACE("[%d] part edf: prepare task %d on CPU %d\n",
201 smp_processor_id(), t->pid, get_partition(t)); 202 smp_processor_id(), t->pid, get_partition(t));
202 if (t->state == TASK_STOPPED) { 203 if (t->state == TASK_STOPPED) {
203 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1); 204 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1);
@@ -221,7 +222,7 @@ static void part_edf_wake_up_task(struct task_struct *task)
221{ 222{
222 edf_domain_t* edf; 223 edf_domain_t* edf;
223 224
224 edf = remote_edf(get_partition(task)); 225 edf = task_edf(task);
225 226
226 /* We must determine whether task should go into the release 227 /* We must determine whether task should go into the release
227 * queue or into the ready queue. It may enter the ready queue 228 * queue or into the ready queue. It may enter the ready queue
@@ -265,8 +266,7 @@ static void part_edf_task_blocks(struct task_struct *t)
265 * SIGSTOP. 266 * SIGSTOP.
266 */ 267 */
267 TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); 268 TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice);
268 BUG_ON(t->rt_list.next != LIST_POISON1); 269 BUG_ON(in_list(&t->rt_list));
269 BUG_ON(t->rt_list.prev != LIST_POISON2);
270} 270}
271 271
272 272
@@ -279,15 +279,14 @@ static long part_edf_tear_down(struct task_struct * t)
279 BUG_ON(!is_realtime(t)); 279 BUG_ON(!is_realtime(t));
280 TRACE("part edf: tear down called for %d \n", t->pid); 280 TRACE("part edf: tear down called for %d \n", t->pid);
281 BUG_ON(t->array); 281 BUG_ON(t->array);
282 BUG_ON(t->rt_list.next != LIST_POISON1); 282 BUG_ON(in_list(&t->rt_list));
283 BUG_ON(t->rt_list.prev != LIST_POISON2);
284 return 0; 283 return 0;
285} 284}
286 285
287 286
288static int part_edf_mode_change(int new_mode) 287static int part_edf_mode_change(int new_mode)
289{ 288{
290 int cpu; 289 int cpu;
291 290
292 if (new_mode == MODE_RT_RUN) 291 if (new_mode == MODE_RT_RUN)
293 for_each_online_cpu(cpu) 292 for_each_online_cpu(cpu)