aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-04 18:15:29 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-04 18:15:29 -0400
commit00dc2101d1d50f4cf20035cfcd66e7ce2569f8d6 (patch)
treea315ebaa9738ab7bb544ec20b24f450af6abea16
parent6d05093e061460dab8c3f8e517fb9906c9abe669 (diff)
GSN-EDF: facilitate debugging
Make CPU state available to gdb.
-rw-r--r--litmus/sched_gsn_edf.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 854dfcf141..08069abe1c 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -99,6 +99,8 @@ typedef struct {
99} cpu_entry_t; 99} cpu_entry_t;
100DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 100DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
101 101
102cpu_entry_t* gsnedf_cpus[NR_CPUS];
103
102#define set_will_schedule() \ 104#define set_will_schedule() \
103 (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1)) 105 (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1))
104#define clear_will_schedule() \ 106#define clear_will_schedule() \
@@ -292,7 +294,7 @@ static noinline void gsnedf_job_arrival(struct task_struct* task)
292 if (edf_preemption_needed(&gsnedf, last->linked)) { 294 if (edf_preemption_needed(&gsnedf, last->linked)) {
293 /* preemption necessary */ 295 /* preemption necessary */
294 task = __take_ready(&gsnedf); 296 task = __take_ready(&gsnedf);
295 TRACE("job_arrival: task %d linked to %d\n", 297 TRACE("job_arrival: attempting to link task %d to %d\n",
296 task->pid, last->cpu); 298 task->pid, last->cpu);
297 if (last->linked) 299 if (last->linked)
298 requeue(last->linked); 300 requeue(last->linked);
@@ -386,8 +388,8 @@ static void gsnedf_tick(struct task_struct* t)
386 */ 388 */
387static struct task_struct* gsnedf_schedule(struct task_struct * prev) 389static struct task_struct* gsnedf_schedule(struct task_struct * prev)
388{ 390{
389 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 391 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
390 int out_of_time, sleep, preempt, np, exists, blocks; 392 int out_of_time, sleep, preempt, np, exists, blocks;
391 struct task_struct* next = NULL; 393 struct task_struct* next = NULL;
392 394
393 /* Will be released in finish_switch. */ 395 /* Will be released in finish_switch. */
@@ -695,6 +697,7 @@ static int __init init_gsn_edf(void)
695 /* initialize CPU state */ 697 /* initialize CPU state */
696 for (cpu = 0; cpu < NR_CPUS; cpu++) { 698 for (cpu = 0; cpu < NR_CPUS; cpu++) {
697 entry = &per_cpu(gsnedf_cpu_entries, cpu); 699 entry = &per_cpu(gsnedf_cpu_entries, cpu);
700 gsnedf_cpus[cpu] = entry;
698 atomic_set(&entry->will_schedule, 0); 701 atomic_set(&entry->will_schedule, 0);
699 entry->linked = NULL; 702 entry->linked = NULL;
700 entry->scheduled = NULL; 703 entry->scheduled = NULL;