aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-09 17:40:56 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-10-09 17:40:56 -0400
commitdd3b483c07497e34ad948890dc7cd871cd68dc7a (patch)
treeb86915b761354f3013e90fde29924542da15b562
parent75a56ba6a0321945786486070bf17882f4265907 (diff)
Fixed a timer bug and implemented finish switch.
The logic for cancelling a remote timer allowed timers to be enqueued twice in the pull list. A method hrtimer_pull_cancel() was added to hrtimer.c to cancel the timers properly. Finish-switch was added to fix a stack bug. This required a change to the global_preempt check in update_crit_levels
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--kernel/hrtimer.c23
-rw-r--r--litmus/event_group.c6
-rw-r--r--litmus/sched_mc.c37
4 files changed, 53 insertions, 15 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 76da541c1f66..573cb3419625 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -378,6 +378,8 @@ extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info);
378extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, 378extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info,
379 struct hrtimer *timer, ktime_t time, 379 struct hrtimer *timer, ktime_t time,
380 const enum hrtimer_mode mode); 380 const enum hrtimer_mode mode);
381extern void hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
382 struct hrtimer_start_on_info *info);
381#endif 383#endif
382 384
383extern int hrtimer_cancel(struct hrtimer *timer); 385extern int hrtimer_cancel(struct hrtimer *timer);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 94cf1c0e44c3..a8c3135dfed5 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1053,6 +1053,7 @@ void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info)
1053{ 1053{
1054 memset(info, 0, sizeof(struct hrtimer_start_on_info)); 1054 memset(info, 0, sizeof(struct hrtimer_start_on_info));
1055 atomic_set(&info->state, HRTIMER_START_ON_INACTIVE); 1055 atomic_set(&info->state, HRTIMER_START_ON_INACTIVE);
1056 INIT_LIST_HEAD(&info->list);
1056} 1057}
1057 1058
1058/** 1059/**
@@ -1071,7 +1072,7 @@ void hrtimer_pull(void)
1071 list_for_each_safe(pos, safe, &list) { 1072 list_for_each_safe(pos, safe, &list) {
1072 info = list_entry(pos, struct hrtimer_start_on_info, list); 1073 info = list_entry(pos, struct hrtimer_start_on_info, list);
1073 TRACE("pulled timer 0x%x\n", info->timer); 1074 TRACE("pulled timer 0x%x\n", info->timer);
1074 list_del(pos); 1075 list_del_init(pos);
1075 if (atomic_read(&info->state) != HRTIMER_START_ON_INACTIVE) 1076 if (atomic_read(&info->state) != HRTIMER_START_ON_INACTIVE)
1076 hrtimer_start(info->timer, info->time, info->mode); 1077 hrtimer_start(info->timer, info->time, info->mode);
1077 if (atomic_read(&info->state) == HRTIMER_START_ON_INACTIVE) 1078 if (atomic_read(&info->state) == HRTIMER_START_ON_INACTIVE)
@@ -1080,6 +1081,22 @@ void hrtimer_pull(void)
1080} 1081}
1081 1082
1082/** 1083/**
1084 * hrtimer_pull_cancel - Cancel a remote timer pull
1085 */
1086void hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
1087 struct hrtimer_start_on_info *info)
1088{
1089 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
1090
1091 raw_spin_lock(&base->lock);
1092 list_del_init(&info->list);
1093 raw_spin_unlock(&base->lock);
1094
1095 atomic_set(&info->state, HRTIMER_START_ON_INACTIVE);
1096 hrtimer_try_to_cancel(timer);
1097}
1098
1099/**
1083 * hrtimer_start_on - trigger timer arming on remote cpu 1100 * hrtimer_start_on - trigger timer arming on remote cpu
1084 * @cpu: remote cpu 1101 * @cpu: remote cpu
1085 * @info: save timer information for enqueuing on remote cpu 1102 * @info: save timer information for enqueuing on remote cpu
@@ -1088,8 +1105,8 @@ void hrtimer_pull(void)
1088 * @mode: timer mode 1105 * @mode: timer mode
1089 */ 1106 */
1090int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info, 1107int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
1091 struct hrtimer *timer, ktime_t time, 1108 struct hrtimer *timer, ktime_t time,
1092 const enum hrtimer_mode mode) 1109 const enum hrtimer_mode mode)
1093{ 1110{
1094 unsigned long flags; 1111 unsigned long flags;
1095 struct hrtimer_cpu_base* base; 1112 struct hrtimer_cpu_base* base;
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 0535f9f9bf96..518ccb459137 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -233,8 +233,9 @@ void cancel_event(struct rt_event *e)
233 } 233 }
234 234
235 /* Disable the event_list */ 235 /* Disable the event_list */
236 atomic_set(&e->event_list->info.state, HRTIMER_START_ON_INACTIVE); 236 hrtimer_pull_cancel(group->cpu,
237 hrtimer_try_to_cancel(&e->event_list->timer); 237 &e->event_list->timer,
238 &e->event_list->info);
238 list_del_init(&e->event_list->queue_node); 239 list_del_init(&e->event_list->queue_node);
239 } else { 240 } else {
240 VTRACE("List 0x%p is empty\n", e->event_list); 241 VTRACE("List 0x%p is empty\n", e->event_list);
@@ -253,6 +254,7 @@ struct event_list* event_list_alloc(int gfp_flags)
253 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 254 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
254 INIT_LIST_HEAD(&el->queue_node); 255 INIT_LIST_HEAD(&el->queue_node);
255 el->timer.function = on_timer; 256 el->timer.function = on_timer;
257 hrtimer_start_on_info_init(&el->info);
256 } else { 258 } else {
257 VTRACE("Failed to allocate event list!\n"); 259 VTRACE("Failed to allocate event list!\n");
258 printk(KERN_CRIT "Failed to allocate event list.\n"); 260 printk(KERN_CRIT "Failed to allocate event list.\n");
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 100470d302a1..277762709d3d 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -440,9 +440,14 @@ static void update_crit_levels(struct cpu_entry *entry)
440 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { 440 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
441 ce = &entry->crit_entries[i]; 441 ce = &entry->crit_entries[i];
442 442
443 global_preempted = entry->scheduled == ce->linked && 443 global_preempted = ce->linked &&
444 ce->linked && entry->linked && 444 /* This task is running */
445 ce->linked->rt_param.scheduled_on == entry->cpu &&
446 /* But it was preempted */
447 ce->linked != entry->linked &&
448 /* And it is an eligible global task */
445 !is_ghost(ce->linked) && is_global(ce->domain); 449 !is_ghost(ce->linked) && is_global(ce->domain);
450
446 /* Do not readmit global tasks which are preempted! These can't 451 /* Do not readmit global tasks which are preempted! These can't
447 * ever be re-admitted until they are descheduled for reasons 452 * ever be re-admitted until they are descheduled for reasons
448 * explained in job_arrival. 453 * explained in job_arrival.
@@ -857,7 +862,7 @@ out:
857/** 862/**
858 * mc_schedule() - Return next task which should be scheduled. 863 * mc_schedule() - Return next task which should be scheduled.
859 */ 864 */
860static struct task_struct* mc_schedule(struct task_struct * prev) 865static struct task_struct* mc_schedule(struct task_struct* prev)
861{ 866{
862 unsigned long flags; 867 unsigned long flags;
863 struct domain *dom; 868 struct domain *dom;
@@ -867,6 +872,11 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
867 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 872 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
868 873
869 local_irq_save(flags); 874 local_irq_save(flags);
875 raw_spin_lock(&entry->lock);
876
877 if (entry->scheduled) {
878 TRACE_TASK(entry->scheduled, " and prev is %d\n", prev->pid);
879 }
870 880
871 /* Sanity checking */ 881 /* Sanity checking */
872 BUG_ON(entry->scheduled && entry->scheduled != prev); 882 BUG_ON(entry->scheduled && entry->scheduled != prev);
@@ -874,7 +884,6 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
874 BUG_ON(is_realtime(prev) && !entry->scheduled); 884 BUG_ON(is_realtime(prev) && !entry->scheduled);
875 885
876 /* Determine state */ 886 /* Determine state */
877 raw_spin_lock(&entry->lock);
878 exists = entry->scheduled != NULL; 887 exists = entry->scheduled != NULL;
879 blocks = exists && !is_running(entry->scheduled); 888 blocks = exists && !is_running(entry->scheduled);
880 out_of_time = exists && budget_enforced(entry->scheduled) && 889 out_of_time = exists && budget_enforced(entry->scheduled) &&
@@ -885,11 +894,12 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
885 lower = exists && preempt && entry->linked && 894 lower = exists && preempt && entry->linked &&
886 tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked); 895 tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked);
887 896
888 if (exists) {
889 entry->scheduled->rt_param.scheduled_on = NO_CPU;
890 }
891 TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d\n", 897 TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d\n",
892 TA(prev), blocks, out_of_time, sleep, preempt); 898 TA(prev), blocks, out_of_time, sleep, preempt);
899
900 if (exists)
901 prev->rt_param.scheduled_on = NO_CPU;
902
893 raw_spin_unlock(&entry->lock); 903 raw_spin_unlock(&entry->lock);
894 904
895 905
@@ -953,9 +963,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
953 963
954 /* Schedule next task */ 964 /* Schedule next task */
955 next = entry->linked; 965 next = entry->linked;
956 entry->scheduled = next; 966 if (entry->linked)
957 if (entry->scheduled) 967 entry->linked->rt_param.scheduled_on = entry->cpu;
958 entry->scheduled->rt_param.scheduled_on = entry->cpu;
959 sched_state_task_picked(); 968 sched_state_task_picked();
960 969
961 raw_spin_unlock(&entry->lock); 970 raw_spin_unlock(&entry->lock);
@@ -967,6 +976,13 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
967 return next; 976 return next;
968} 977}
969 978
979void mc_finish_switch(struct task_struct *prev)
980{
981 struct cpu_entry* entry = &__get_cpu_var(cpus);
982 entry->scheduled = is_realtime(current) ? current : NULL;
983 TRACE_TASK(prev, "Switched away from\n");
984}
985
970/* 986/*
971 * This is the plugin's release at function, called by the release task-set 987 * This is the plugin's release at function, called by the release task-set
972 * system call. Other places in the file use the generic LITMUS release_at(), 988 * system call. Other places in the file use the generic LITMUS release_at(),
@@ -1060,6 +1076,7 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
1060 .activate_plugin = mc_activate_plugin, 1076 .activate_plugin = mc_activate_plugin,
1061 .release_at = mc_release_at, 1077 .release_at = mc_release_at,
1062 .deactivate_plugin = mc_deactivate_plugin, 1078 .deactivate_plugin = mc_deactivate_plugin,
1079 .finish_switch = mc_finish_switch,
1063}; 1080};
1064 1081
1065static void init_crit_entry(struct crit_entry *ce, enum crit_level level, 1082static void init_crit_entry(struct crit_entry *ce, enum crit_level level,