aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-10 15:50:28 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-10 15:50:28 -0400
commit55af2530696652dd7687f3e9baa4ab992b38529b (patch)
tree7a03d1a5b159c599c58e643007d180e33478f4d2
parent43e4fc9dc6f608c6a794cfb0e2b458c37ab2d8b9 (diff)
beautify P-EDF code
- make local and remote cpu state and edf domain references explicit
-rw-r--r--kernel/sched_part_edf.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/kernel/sched_part_edf.c b/kernel/sched_part_edf.c
index 46a999e122..792e022833 100644
--- a/kernel/sched_part_edf.c
+++ b/kernel/sched_part_edf.c
@@ -22,6 +22,12 @@ typedef struct {
22} part_edf_domain_t; 22} part_edf_domain_t;
23 23
24 24
25#define local_edf (&__get_cpu_var(part_edf_domains).domain)
26#define local_pedf (&__get_cpu_var(part_edf_domains))
27#define remote_edf(cpu) (&per_cpu(part_edf_domains, cpu).domain)
28#define remote_pedf(cpu) (&per_cpu(part_edf_domains, cpu))
29
30
25static void part_edf_domain_init(part_edf_domain_t* pedf, 31static void part_edf_domain_init(part_edf_domain_t* pedf,
26 edf_check_resched_needed_t check, 32 edf_check_resched_needed_t check,
27 int cpu) 33 int cpu)
@@ -65,7 +71,7 @@ static reschedule_check_t part_edf_scheduler_tick(void)
65 unsigned long flags; 71 unsigned long flags;
66 struct task_struct *t = current; 72 struct task_struct *t = current;
67 reschedule_check_t want_resched = NO_RESCHED; 73 reschedule_check_t want_resched = NO_RESCHED;
68 edf_domain_t *edf = &__get_cpu_var(part_edf_domains).domain; 74 edf_domain_t *edf = local_edf;
69 75
70 /* expire tasks even if not in real-time mode 76 /* expire tasks even if not in real-time mode
71 * this makes sure that at the end of real-time mode 77 * this makes sure that at the end of real-time mode
@@ -105,7 +111,7 @@ static int part_edf_schedule(struct task_struct * prev,
105{ 111{
106 int need_deactivate = 1; 112 int need_deactivate = 1;
107 unsigned long flags; 113 unsigned long flags;
108 part_edf_domain_t* pedf = &__get_cpu_var(part_edf_domains); 114 part_edf_domain_t* pedf = local_pedf;
109 edf_domain_t* edf = &pedf->domain; 115 edf_domain_t* edf = &pedf->domain;
110 116
111 117
@@ -152,7 +158,7 @@ static int part_edf_schedule(struct task_struct * prev,
152 158
153static void part_edf_finish_switch(struct task_struct *prev) 159static void part_edf_finish_switch(struct task_struct *prev)
154{ 160{
155 edf_domain_t* edf = &__get_cpu_var(part_edf_domains).domain; 161 edf_domain_t* edf = local_edf;
156 162
157 if (!is_realtime(prev) || !is_running(prev)) 163 if (!is_realtime(prev) || !is_running(prev))
158 return; 164 return;
@@ -188,12 +194,11 @@ static void part_edf_finish_switch(struct task_struct *prev)
188 */ 194 */
189static long part_edf_prepare_task(struct task_struct * t) 195static long part_edf_prepare_task(struct task_struct * t)
190{ 196{
191 int target_cpu = get_partition(t); 197 edf_domain_t* edf = remote_edf(get_partition(t));
192 edf_domain_t* edf = &per_cpu(part_edf_domains, target_cpu).domain;
193 198
194 199
195 printk(KERN_WARNING "[%d] part edf: prepare task %d on CPU %d\n", 200 printk(KERN_WARNING "[%d] part edf: prepare task %d on CPU %d\n",
196 smp_processor_id(), t->pid, target_cpu); 201 smp_processor_id(), t->pid, get_partition(t));
197 if (t->state == TASK_STOPPED) { 202 if (t->state == TASK_STOPPED) {
198 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1); 203 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1);
199 204
@@ -216,7 +221,7 @@ static void part_edf_wake_up_task(struct task_struct *task)
216{ 221{
217 edf_domain_t* edf; 222 edf_domain_t* edf;
218 223
219 edf = &per_cpu(part_edf_domains, get_partition(task)).domain; 224 edf = remote_edf(get_partition(task));
220 225
221 /* We must determine whether task should go into the release 226 /* We must determine whether task should go into the release
222 * queue or into the ready queue. It may enter the ready queue 227 * queue or into the ready queue. It may enter the ready queue
@@ -283,16 +288,10 @@ static long part_edf_tear_down(struct task_struct * t)
283static int part_edf_mode_change(int new_mode) 288static int part_edf_mode_change(int new_mode)
284{ 289{
285 int cpu; 290 int cpu;
286 edf_domain_t* edf;
287
288 291
289 if (new_mode == MODE_RT_RUN) { 292 if (new_mode == MODE_RT_RUN)
290 for_each_online_cpu(cpu) { 293 for_each_online_cpu(cpu)
291 edf = &per_cpu(part_edf_domains, cpu).domain; 294 prepare_new_releases(remote_edf(cpu), jiffies);
292 prepare_new_releases(edf, jiffies);
293 }
294
295 }
296 TRACE("[%d] part edf: mode changed to %d\n", 295 TRACE("[%d] part edf: mode changed to %d\n",
297 smp_processor_id(), new_mode); 296 smp_processor_id(), new_mode);
298 return 0; 297 return 0;
@@ -334,7 +333,7 @@ sched_plugin_t *__init init_part_edf_plugin(void)
334 set_sched_options(SCHED_NONE); 333 set_sched_options(SCHED_NONE);
335 for (i = 0; i < NR_CPUS; i++) 334 for (i = 0; i < NR_CPUS; i++)
336 { 335 {
337 part_edf_domain_init(&per_cpu(part_edf_domains, i), 336 part_edf_domain_init(remote_pedf(i),
338 part_edf_check_resched, i); 337 part_edf_check_resched, i);
339 printk("CPU partition %d initialized.", i); 338 printk("CPU partition %d initialized.", i);
340 } 339 }