aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-10 19:24:24 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-10-10 19:24:24 -0400
commit77870ba296b06385088f02516b7346fa7a7756b4 (patch)
treed3d6ebbb69f8159981bb7f83059436ec119d9eef
parent848defae3a19b7e4b160603995db35908fa2a95c (diff)
Fixed level-A crash when cancel task execution
-rw-r--r--include/litmus/sched_trace.h6
-rw-r--r--include/litmus/trace.h1
-rw-r--r--litmus/ce_domain.c3
-rw-r--r--litmus/event_group.c21
-rw-r--r--litmus/rt_domain.c4
-rw-r--r--litmus/sched_mc.c25
6 files changed, 50 insertions, 10 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index e25a26e9d861..7179fc2745ad 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -193,6 +193,12 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) 193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when)
194 194
195 195
196#define QT_START lt_t _qt_start = litmus_clock()
197#define QT_END \
198 sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \
199 TRACE_ARGS, litmus_clock() - _qt_start)
200
201
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 202#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197 203
198#endif /* __KERNEL__ */ 204#endif /* __KERNEL__ */
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index 05f487263f28..95e1ee647a8a 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -99,5 +99,4 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
99#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) 99#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
100#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) 100#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN)
101 101
102
103#endif /* !_SYS_TRACE_H_ */ 102#endif /* !_SYS_TRACE_H_ */
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c
index fd9904e29a08..b2c5d4e935a5 100644
--- a/litmus/ce_domain.c
+++ b/litmus/ce_domain.c
@@ -59,7 +59,8 @@ struct task_struct* ce_peek_and_take_ready(domain_t *dom)
59 struct task_struct *ret = NULL, *sched = ce_data->should_schedule; 59 struct task_struct *ret = NULL, *sched = ce_data->should_schedule;
60 const int exists = NULL != sched; 60 const int exists = NULL != sched;
61 const int blocked = exists && !is_running(sched); 61 const int blocked = exists && !is_running(sched);
62 const int elig = exists && tsk_mc_data(sched)->mc_task.lvl_a_eligible; 62 const int elig = exists && tsk_mc_data(sched) &&
63 tsk_mc_data(sched)->mc_task.lvl_a_eligible;
63 64
64 /* Return the task we should schedule if it is not blocked or sleeping. */ 65 /* Return the task we should schedule if it is not blocked or sleeping. */
65 if (exists && !blocked && elig) 66 if (exists && !blocked && elig)
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 81bc87e98bbd..06520299fb1d 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -30,15 +30,18 @@ static unsigned int time2slot(lt_t time)
30static enum hrtimer_restart on_timer(struct hrtimer *timer) 30static enum hrtimer_restart on_timer(struct hrtimer *timer)
31{ 31{
32 int prio, num; 32 int prio, num;
33 unsigned long flags;
33 struct event_list *el; 34 struct event_list *el;
34 struct rt_event *e; 35 struct rt_event *e;
35 struct list_head *pos, events[NUM_EVENT_PRIORITIES]; 36 struct list_head *pos, events[NUM_EVENT_PRIORITIES];
36 raw_spinlock_t *queue_lock; 37 raw_spinlock_t *queue_lock;
37 38
39 QT_START;
40
38 el = container_of(timer, struct event_list, timer); 41 el = container_of(timer, struct event_list, timer);
39 queue_lock = &el->group->queue_lock; 42 queue_lock = &el->group->queue_lock;
40 43
41 raw_spin_lock(queue_lock); 44 raw_spin_lock_irqsave(queue_lock, flags);
42 45
43 /* Remove event_list from hashtable so that no more events 46 /* Remove event_list from hashtable so that no more events
44 * are added to it. 47 * are added to it.
@@ -65,18 +68,21 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer)
65 68
66 e = list_entry(pos, struct rt_event, events_node); 69 e = list_entry(pos, struct rt_event, events_node);
67 list_del_init(pos); 70 list_del_init(pos);
68 raw_spin_unlock(queue_lock); 71 raw_spin_unlock_irqrestore(queue_lock, flags);
69 72
70 VTRACE("Dequeueing event 0x%x with prio %d from 0x%x\n", 73 VTRACE("Dequeueing event 0x%x with prio %d from 0x%x\n",
71 e, e->prio, el); 74 e, e->prio, el);
72 e->function(e); 75 e->function(e);
73 76
74 raw_spin_lock(queue_lock); 77 raw_spin_lock_irqsave(queue_lock, flags);
75 } 78 }
76 } 79 }
77 raw_spin_unlock(queue_lock); 80 raw_spin_unlock_irqrestore(queue_lock, flags);
78 81
79 VTRACE("Exhausted %d events from list 0x%x\n", num, el); 82 VTRACE("Exhausted %d events from list 0x%x\n", num, el);
83
84 QT_END;
85
80 return HRTIMER_NORESTART; 86 return HRTIMER_NORESTART;
81} 87}
82 88
@@ -165,6 +171,8 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
165 struct event_list *el; 171 struct event_list *el;
166 int in_use; 172 int in_use;
167 173
174 QT_START;
175
168 VTRACE("Adding event 0x%x with priority %d for time %llu\n", 176 VTRACE("Adding event 0x%x with priority %d for time %llu\n",
169 e, e->prio, fire); 177 e, e->prio, fire);
170 178
@@ -199,6 +207,8 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
199 } else { 207 } else {
200 VTRACE("Not my timer @%llu\n", fire); 208 VTRACE("Not my timer @%llu\n", fire);
201 } 209 }
210
211 QT_END;
202} 212}
203 213
204/** 214/**
@@ -212,6 +222,8 @@ void cancel_event(struct rt_event *e)
212 struct event_group *group; 222 struct event_group *group;
213 struct list_head *list, *pos; 223 struct list_head *list, *pos;
214 224
225 QT_START;
226
215 VTRACE("Canceling event 0x%x with priority %d\n", e, e->prio); 227 VTRACE("Canceling event 0x%x with priority %d\n", e, e->prio);
216 group = e->_event_group; 228 group = e->_event_group;
217 if (!group) return; 229 if (!group) return;
@@ -258,6 +270,7 @@ void cancel_event(struct rt_event *e)
258 e->_event_group = NULL; 270 e->_event_group = NULL;
259 271
260 raw_spin_unlock(&group->queue_lock); 272 raw_spin_unlock(&group->queue_lock);
273 QT_END;
261} 274}
262 275
263struct kmem_cache *event_list_cache; 276struct kmem_cache *event_list_cache;
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 3355eb5a73be..24b7a260e95b 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -405,6 +405,8 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
405{ 405{
406 rt_domain_t *domain = (rt_domain_t*)dom->data; 406 rt_domain_t *domain = (rt_domain_t*)dom->data;
407 407
408 QT_START;
409
408 BUG_ON(!task || !is_realtime(task)); 410 BUG_ON(!task || !is_realtime(task));
409 TRACE_TASK(task, "Requeueing\n"); 411 TRACE_TASK(task, "Requeueing\n");
410 BUG_ON(is_queued(task)); 412 BUG_ON(is_queued(task));
@@ -420,6 +422,8 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
420 VTRACE_TASK(task, "add release(), rel=%llu\n", get_release(task)); 422 VTRACE_TASK(task, "add release(), rel=%llu\n", get_release(task));
421 add_release(domain, task); 423 add_release(domain, task);
422 } 424 }
425
426 QT_END;
423} 427}
424 428
425/* pd_take_ready - removes and returns the next ready task from the rt_domain 429/* pd_take_ready - removes and returns the next ready task from the rt_domain
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 5067b79b026b..9edf038ef164 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -492,6 +492,9 @@ static void check_for_preempt(struct domain *dom)
492 entry = crit_cpu(ce); 492 entry = crit_cpu(ce);
493 recheck = 1; 493 recheck = 1;
494 494
495 /* Cache ready task */
496 dom->peek_ready(dom);
497
495 raw_spin_lock(&entry->lock); 498 raw_spin_lock(&entry->lock);
496 if (!can_use(ce)) 499 if (!can_use(ce))
497 /* CPU disabled while locking! */ 500 /* CPU disabled while locking! */
@@ -530,6 +533,8 @@ static void remove_from_all(struct task_struct* task)
530 struct crit_entry *ce; 533 struct crit_entry *ce;
531 struct domain *dom = get_task_domain(task); 534 struct domain *dom = get_task_domain(task);
532 535
536 QT_START;
537
533 TRACE_MC_TASK(task, "Removing from everything\n"); 538 TRACE_MC_TASK(task, "Removing from everything\n");
534 BUG_ON(!task); 539 BUG_ON(!task);
535 540
@@ -551,7 +556,7 @@ static void remove_from_all(struct task_struct* task)
551 link_task_to_cpu(entry, NULL); 556 link_task_to_cpu(entry, NULL);
552 } 557 }
553 } else { 558 } else {
554 TRACE_MC_TASK(task, "Unlinked before we got lock!"); 559 TRACE_MC_TASK(task, "Unlinked before we got lock!\n");
555 } 560 }
556 if (update) 561 if (update)
557 update_crit_levels(entry); 562 update_crit_levels(entry);
@@ -566,6 +571,7 @@ static void remove_from_all(struct task_struct* task)
566 571
567 BUG_ON(is_queued(task)); 572 BUG_ON(is_queued(task));
568 raw_spin_unlock(dom->lock); 573 raw_spin_unlock(dom->lock);
574 QT_END;
569} 575}
570 576
571/** 577/**
@@ -663,6 +669,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
663 struct domain *dom = ce->domain; 669 struct domain *dom = ce->domain;
664 struct task_struct *old_link = NULL; 670 struct task_struct *old_link = NULL;
665 lt_t next_timer_abs; 671 lt_t next_timer_abs;
672 QT_START;
666 673
667 TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu); 674 TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu);
668 675
@@ -692,6 +699,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
692 raw_spin_unlock(dom->lock); 699 raw_spin_unlock(dom->lock);
693 check_for_preempt(dom); 700 check_for_preempt(dom);
694 } 701 }
702 QT_END;
695 return next_timer_abs; 703 return next_timer_abs;
696} 704}
697 705
@@ -734,11 +742,15 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks)
734 struct task_struct *first = bheap_peek(rt->order, tasks)->value; 742 struct task_struct *first = bheap_peek(rt->order, tasks)->value;
735 struct domain *dom = get_task_domain(first); 743 struct domain *dom = get_task_domain(first);
736 744
745 QT_START;
746
737 raw_spin_lock_irqsave(dom->lock, flags); 747 raw_spin_lock_irqsave(dom->lock, flags);
738 TRACE(TS "Jobs released\n", TA(first)); 748 TRACE(TS "Jobs released\n", TA(first));
739 __merge_ready(rt, tasks); 749 __merge_ready(rt, tasks);
740 check_for_preempt(dom); 750 check_for_preempt(dom);
741 raw_spin_unlock_irqrestore(dom->lock, flags); 751 raw_spin_unlock_irqrestore(dom->lock, flags);
752
753 QT_END;
742} 754}
743 755
744/** 756/**
@@ -877,6 +889,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
877 struct cpu_entry* entry = &__get_cpu_var(cpus); 889 struct cpu_entry* entry = &__get_cpu_var(cpus);
878 int i, out_of_time, sleep, preempt, exists, blocks, global, lower; 890 int i, out_of_time, sleep, preempt, exists, blocks, global, lower;
879 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 891 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
892 QT_START;
880 893
881 local_irq_save(flags); 894 local_irq_save(flags);
882 raw_spin_lock(&entry->lock); 895 raw_spin_lock(&entry->lock);
@@ -944,12 +957,15 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
944 */ 957 */
945 raw_spin_unlock(&entry->lock); 958 raw_spin_unlock(&entry->lock);
946 raw_spin_lock(dom->lock); 959 raw_spin_lock(dom->lock);
960
961 /* Peek at task here to avoid lock use */
962 dtask = dom->peek_ready(dom);
963
947 raw_spin_lock(&entry->lock); 964 raw_spin_lock(&entry->lock);
948 965
949 /* Now that we hold the domain lock...*/ 966 /* Now that we hold the domain lock...*/
950 fix_crit_position(ce); 967 fix_crit_position(ce);
951 968
952 dtask = dom->peek_ready(dom);
953 if (!entry->linked && !ce->linked && dtask && can_use(ce)) { 969 if (!entry->linked && !ce->linked && dtask && can_use(ce)) {
954 dom->take_ready(dom); 970 dom->take_ready(dom);
955 link_task_to_crit(ce, dtask); 971 link_task_to_crit(ce, dtask);
@@ -977,9 +993,10 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
977 raw_spin_unlock(&entry->lock); 993 raw_spin_unlock(&entry->lock);
978 local_irq_restore(flags); 994 local_irq_restore(flags);
979 if (next) { 995 if (next) {
980 TRACE_TASK(next, "Picked this task\n"); 996 TRACE_MC_TASK(next, "Picked this task\n");
981 } else if (exists && !next) 997 } else if (exists && !next)
982 TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock()); 998 TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock());
999 QT_END;
983 return next; 1000 return next;
984} 1001}
985 1002
@@ -987,7 +1004,7 @@ void mc_finish_switch(struct task_struct *prev)
987{ 1004{
988 struct cpu_entry* entry = &__get_cpu_var(cpus); 1005 struct cpu_entry* entry = &__get_cpu_var(cpus);
989 entry->scheduled = is_realtime(current) ? current : NULL; 1006 entry->scheduled = is_realtime(current) ? current : NULL;
990 TRACE_TASK(prev, "Switched away from to " TS, 1007 TRACE_TASK(prev, "Switched away from to " TS "\n",
991 TA(entry->scheduled)); 1008 TA(entry->scheduled));
992} 1009}
993 1010