aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-10 16:37:01 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-10-10 16:37:01 -0400
commit848defae3a19b7e4b160603995db35908fa2a95c (patch)
tree290847d59897c489f37f6926005dbe78cce52782
parentc9133cb774fa1f7390007c88fe807b681c8e08e2 (diff)
Adding events is now accomplished in O(1) time
-rw-r--r--include/linux/hrtimer.h4
-rw-r--r--include/litmus/event_group.h5
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--litmus/event_group.c179
-rw-r--r--litmus/sched_mc.c4
5 files changed, 104 insertions, 94 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 573cb3419625..0315d04211f3 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -378,8 +378,8 @@ extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info);
378extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, 378extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info,
379 struct hrtimer *timer, ktime_t time, 379 struct hrtimer *timer, ktime_t time,
380 const enum hrtimer_mode mode); 380 const enum hrtimer_mode mode);
381extern void hrtimer_pull_cancel(int cpu, struct hrtimer *timer, 381extern int hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
382 struct hrtimer_start_on_info *info); 382 struct hrtimer_start_on_info *info);
383#endif 383#endif
384 384
385extern int hrtimer_cancel(struct hrtimer *timer); 385extern int hrtimer_cancel(struct hrtimer *timer);
diff --git a/include/litmus/event_group.h b/include/litmus/event_group.h
index 90ddd0bfe673..b0654e0ec5e6 100644
--- a/include/litmus/event_group.h
+++ b/include/litmus/event_group.h
@@ -3,6 +3,8 @@
3 3
4#define EVENT_QUEUE_SLOTS 127 /* prime */ 4#define EVENT_QUEUE_SLOTS 127 /* prime */
5 5
6#define NUM_EVENT_PRIORITIES 4 /* num crit levels really */
7
6struct rt_event; 8struct rt_event;
7typedef void (*fire_event_t)(struct rt_event *e); 9typedef void (*fire_event_t)(struct rt_event *e);
8 10
@@ -17,7 +19,8 @@ struct event_group {
17 * A group of actions to fire at a given time 19 * A group of actions to fire at a given time
18 */ 20 */
19struct event_list { 21struct event_list {
20 struct list_head events; 22 /* Use multiple list heads so that inserts are O(1) */
23 struct list_head events[NUM_EVENT_PRIORITIES];
21 24
22 /* For timer firing */ 25 /* For timer firing */
23 lt_t fire_time; 26 lt_t fire_time;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index a8c3135dfed5..c215ddbf49d0 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1083,8 +1083,8 @@ void hrtimer_pull(void)
1083/** 1083/**
1084 * hrtimer_pull_cancel - Cancel a remote timer pull 1084 * hrtimer_pull_cancel - Cancel a remote timer pull
1085 */ 1085 */
1086void hrtimer_pull_cancel(int cpu, struct hrtimer *timer, 1086int hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
1087 struct hrtimer_start_on_info *info) 1087 struct hrtimer_start_on_info *info)
1088{ 1088{
1089 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); 1089 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
1090 1090
@@ -1093,7 +1093,7 @@ void hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
1093 raw_spin_unlock(&base->lock); 1093 raw_spin_unlock(&base->lock);
1094 1094
1095 atomic_set(&info->state, HRTIMER_START_ON_INACTIVE); 1095 atomic_set(&info->state, HRTIMER_START_ON_INACTIVE);
1096 hrtimer_try_to_cancel(timer); 1096 return hrtimer_try_to_cancel(timer);
1097} 1097}
1098 1098
1099/** 1099/**
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 4a54881fb256..81bc87e98bbd 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -29,71 +29,55 @@ static unsigned int time2slot(lt_t time)
29 */ 29 */
30static enum hrtimer_restart on_timer(struct hrtimer *timer) 30static enum hrtimer_restart on_timer(struct hrtimer *timer)
31{ 31{
32 unsigned long num = 0; 32 int prio, num;
33 struct event_list *el; 33 struct event_list *el;
34 struct rt_event *e; 34 struct rt_event *e;
35 struct list_head *pos, events; 35 struct list_head *pos, events[NUM_EVENT_PRIORITIES];
36 raw_spinlock_t *queue_lock;
36 37
37 el = container_of(timer, struct event_list, timer); 38 el = container_of(timer, struct event_list, timer);
39 queue_lock = &el->group->queue_lock;
38 40
39 raw_spin_lock(&el->group->queue_lock); 41 raw_spin_lock(queue_lock);
40 42
41 /* Remove event_list from hashtable so no more events 43 /* Remove event_list from hashtable so that no more events
42 * are added to it. 44 * are added to it.
43 */ 45 */
44 VTRACE("Removing event list 0x%p\n", el); 46 VTRACE("Removing event list 0x%x\n", el);
45 list_del_init(&el->queue_node); 47 list_del_init(&el->queue_node);
46 48
47 /* Empty event list so that it can be re-used as soon 49 /* Copy over events so that the event_list can re-used when the lock
48 * as the lock is released. 50 * is released.
49 */ 51 */
50 VTRACE("Emptying event list 0x%p\n", el); 52 VTRACE("Emptying event list 0x%x\n", el);
51 list_replace_init(&el->events, &events); 53 for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++) {
52 54 list_replace_init(&el->events[prio], &events[prio]);
53 /* Fire events */ 55 }
54 for (pos = events.next; prefetch(pos->next), events.next != &events;
55 pos = events.next, num++) {
56 56
57 e = list_entry(pos, struct rt_event, events_node); 57 for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++) {
58 list_del_init(pos); 58 /* Fire events. Complicated loop is used so that events
59 raw_spin_unlock(&el->group->queue_lock); 59 * in the list can be canceled (removed) while other events are
60 * executing.
61 */
62 for (pos = events[prio].next, num = 0;
63 prefetch(pos->next), events[prio].next != &events[prio];
64 pos = events[prio].next, num++) {
60 65
61 VTRACE("Dequeueing event 0x%p with prio %d from 0x%p\n", 66 e = list_entry(pos, struct rt_event, events_node);
62 e, e->prio, el); 67 list_del_init(pos);
68 raw_spin_unlock(queue_lock);
63 69
64 e->function(e); 70 VTRACE("Dequeueing event 0x%x with prio %d from 0x%x\n",
71 e, e->prio, el);
72 e->function(e);
65 73
66 raw_spin_lock(&el->group->queue_lock); 74 raw_spin_lock(queue_lock);
67 }
68 raw_spin_unlock(&el->group->queue_lock);
69 VTRACE("Exhausted %d events from list 0x%p\n", num, el);
70 /* sched_trace_action(NULL, num); */
71 return HRTIMER_NORESTART;
72}
73
74/*
75 * Insert event in event-list, respecting priority order.
76 */
77void insert_event(struct event_list *el, struct rt_event *e)
78{
79 struct list_head *pos, *last = NULL;
80 struct rt_event *queued;
81 list_for_each(pos, &el->events) {
82 queued = list_entry(pos, struct rt_event, events_node);
83 last = pos;
84 if (e->prio < queued->prio) {
85 VTRACE("Inserting priority %d event 0x%p before %d 0x%p "
86 "in 0x%p, pos 0x%p\n", e->prio, e,
87 queued->prio, &queued->events_node, el, pos);
88 BUG_ON(!list_empty(&e->events_node));
89 list_add_tail(&e->events_node, pos);
90 return;
91 } 75 }
92 } 76 }
93 VTRACE("Inserting priority %d event 0x%p at end of 0x%p, last 0x%p\n", 77 raw_spin_unlock(queue_lock);
94 e->prio, e, el, last); 78
95 BUG_ON(!list_empty(&e->events_node)); 79 VTRACE("Exhausted %d events from list 0x%x\n", num, el);
96 list_add(&e->events_node, (last) ? last : pos); 80 return HRTIMER_NORESTART;
97} 81}
98 82
99/* 83/*
@@ -111,7 +95,7 @@ static struct event_list* get_event_list(struct event_group *group,
111 unsigned int slot = time2slot(fire); 95 unsigned int slot = time2slot(fire);
112 int remaining = 300; 96 int remaining = 300;
113 97
114 VTRACE("Getting list for time %llu, event 0x%p\n", fire, e); 98 VTRACE("Getting list for time %llu, event 0x%x\n", fire, e);
115 99
116 /* Initialize pos for the case that the list is empty */ 100 /* Initialize pos for the case that the list is empty */
117 pos = group->event_queue[slot].next; 101 pos = group->event_queue[slot].next;
@@ -120,7 +104,7 @@ static struct event_list* get_event_list(struct event_group *group,
120 tmp = list_entry(pos, struct event_list, queue_node); 104 tmp = list_entry(pos, struct event_list, queue_node);
121 if (lt_after_eq(fire, tmp->fire_time) && 105 if (lt_after_eq(fire, tmp->fire_time) &&
122 lt_before(fire, tmp->fire_time + group->res)) { 106 lt_before(fire, tmp->fire_time + group->res)) {
123 VTRACE("Found match 0x%p at time %llu\n", 107 VTRACE("Found match 0x%x at time %llu\n",
124 tmp, tmp->fire_time); 108 tmp, tmp->fire_time);
125 el = tmp; 109 el = tmp;
126 break; 110 break;
@@ -142,7 +126,7 @@ static struct event_list* get_event_list(struct event_group *group,
142 tmp->fire_time = fire; 126 tmp->fire_time = fire;
143 tmp->group = group; 127 tmp->group = group;
144 /* Add to queue */ 128 /* Add to queue */
145 VTRACE("Using list 0x%p for priority %d and time %llu\n", 129 VTRACE("Using list 0x%x for priority %d and time %llu\n",
146 tmp, e->prio, fire); 130 tmp, e->prio, fire);
147 BUG_ON(!list_empty(&tmp->queue_node)); 131 BUG_ON(!list_empty(&tmp->queue_node));
148 list_add(&tmp->queue_node, pos->prev); 132 list_add(&tmp->queue_node, pos->prev);
@@ -154,18 +138,23 @@ static struct event_list* get_event_list(struct event_group *group,
154/* 138/*
155 * Prepare a release list for a new set of events. 139 * Prepare a release list for a new set of events.
156 */ 140 */
157static void reinit_event_list(struct rt_event *e) 141static void reinit_event_list(struct event_group *group, struct rt_event *e)
158{ 142{
143 int prio, t_ret;
159 struct event_list *el = e->event_list; 144 struct event_list *el = e->event_list;
160 int ret; 145
161 VTRACE("Reinitting list 0x%p for event 0x%p\n", el, e); 146 VTRACE("Reinitting list 0x%x for event 0x%x\n", el, e);
162 ret = hrtimer_try_to_cancel(&el->timer); 147
163 BUG_ON(ret == 1); 148 /* Cancel timer */
164 if (ret == -1) { 149 t_ret = hrtimer_pull_cancel(group->cpu, &el->timer, &el->info);
150 BUG_ON(t_ret == 1);
151 if (t_ret == -1) {
152 /* The on_timer callback is running for this list */
165 VTRACE("Timer is running concurrently!\n"); 153 VTRACE("Timer is running concurrently!\n");
166 } 154 }
167 INIT_LIST_HEAD(&el->events); 155 /* Clear event lists */
168 atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); 156 for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++)
157 INIT_LIST_HEAD(&el->events[prio]);
169} 158}
170 159
171/** 160/**
@@ -176,7 +165,7 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
176 struct event_list *el; 165 struct event_list *el;
177 int in_use; 166 int in_use;
178 167
179 VTRACE("Adding event 0x%p with priority %d for time %llu\n", 168 VTRACE("Adding event 0x%x with priority %d for time %llu\n",
180 e, e->prio, fire); 169 e, e->prio, fire);
181 170
182 /* A NULL group means use the group of the currently executing CPU */ 171 /* A NULL group means use the group of the currently executing CPU */
@@ -190,18 +179,19 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
190 if (!el) { 179 if (!el) {
191 /* Use our own, but drop lock first */ 180 /* Use our own, but drop lock first */
192 raw_spin_unlock(&group->queue_lock); 181 raw_spin_unlock(&group->queue_lock);
193 reinit_event_list(e); 182 reinit_event_list(group, e);
194 raw_spin_lock(&group->queue_lock); 183 raw_spin_lock(&group->queue_lock);
195 el = get_event_list(group, e, fire, 1); 184 el = get_event_list(group, e, fire, 1);
196 } 185 }
197 186
198 /* Add event to sorted list */ 187 /* Add event to sorted list */
199 insert_event(el, e); 188 VTRACE("Inserting event 0x%x at end of event_list 0x%x\n", e, el);
189 list_add(&e->events_node, &el->events[e->prio]);
200 raw_spin_unlock(&group->queue_lock); 190 raw_spin_unlock(&group->queue_lock);
201 191
202 /* Arm timer if we are the owner */ 192 /* Arm timer if we are the owner */
203 if (el == e->event_list) { 193 if (el == e->event_list) {
204 VTRACE("Arming timer on event 0x%p for %llu\n", e, fire); 194 VTRACE("Arming timer on event 0x%x for %llu\n", e, fire);
205 in_use = hrtimer_start_on(group->cpu, &el->info, 195 in_use = hrtimer_start_on(group->cpu, &el->info,
206 &el->timer, ns_to_ktime(el->fire_time), 196 &el->timer, ns_to_ktime(el->fire_time),
207 HRTIMER_MODE_ABS_PINNED); 197 HRTIMER_MODE_ABS_PINNED);
@@ -216,56 +206,73 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
216 */ 206 */
217void cancel_event(struct rt_event *e) 207void cancel_event(struct rt_event *e)
218{ 208{
219 struct rt_event *swap; 209 int prio, cancel;
210 struct rt_event *swap, *entry;
220 struct event_list *tmp; 211 struct event_list *tmp;
221 struct event_group *group = e->_event_group; 212 struct event_group *group;
213 struct list_head *list, *pos;
222 214
223 VTRACE("Canceling event 0x%p with priority %d\n", e, e->prio); 215 VTRACE("Canceling event 0x%x with priority %d\n", e, e->prio);
216 group = e->_event_group;
224 if (!group) return; 217 if (!group) return;
225 218
226 /* If our event_list contains any events, it is in use */
227 raw_spin_lock(&group->queue_lock); 219 raw_spin_lock(&group->queue_lock);
228 if (!list_empty(&e->event_list->events)) {
229 VTRACE("List 0x%p is not empty\n", e->event_list);
230 220
231 /* If our event_list contains events, we are the first element 221 /* Relies on the fact that an event_list's owner is ALWAYS present
232 * in that list. If there is anyone after us in the list, then 222 * as one of the event_list's events.
233 * swap our list with theirs to that the event_list can still 223 */
234 * trigger the queued events. 224 for (prio = 0, cancel = 0, swap = NULL;
235 */ 225 prio < NUM_EVENT_PRIORITIES && !swap;
236 if (!list_is_singular(&e->event_list->events)) { 226 prio++) {
237 swap = list_entry(e->events_node.next, struct rt_event, 227
238 events_node); 228 list = &e->event_list->events[prio];
239 VTRACE("Swapping with event 0x%p of priority %d\n", 229 cancel |= !list_empty(list);
240 swap, swap->prio); 230
241 tmp = swap->event_list; 231 /* Find any element which is not the event_list's owner */
242 swap->event_list = e->event_list; 232 list_for_each(pos, list) {
243 e->event_list = tmp; 233 entry = list_entry(pos, struct rt_event, events_node);
234 if (entry != e) {
235 swap = entry;
236 break;
237 }
244 } 238 }
239 }
245 240
246 /* Disable the event_list */ 241 if (swap) {
242 /* Give the other guy ownership of the event_list */
243 VTRACE("Swapping list 0x%x with event 0x%x event list 0x%x\n",
244 e->event_list, swap, swap->event_list);
245 tmp = swap->event_list;
246 swap->event_list = e->event_list;
247 BUG_ON(!tmp);
248 e->event_list = tmp;
249 } else if (cancel) {
250 /* Cancel the event_list we own */
247 hrtimer_pull_cancel(group->cpu, 251 hrtimer_pull_cancel(group->cpu,
248 &e->event_list->timer, 252 &e->event_list->timer,
249 &e->event_list->info); 253 &e->event_list->info);
250 list_del_init(&e->event_list->queue_node); 254 list_del_init(&e->event_list->queue_node);
251 } else {
252 VTRACE("List 0x%p is empty\n", e->event_list);
253 } 255 }
256 /* Remove ourselves from any list we may be a part of */
254 list_del_init(&e->events_node); 257 list_del_init(&e->events_node);
255 raw_spin_unlock(&group->queue_lock);
256 e->_event_group = NULL; 258 e->_event_group = NULL;
259
260 raw_spin_unlock(&group->queue_lock);
257} 261}
258 262
259struct kmem_cache *event_list_cache; 263struct kmem_cache *event_list_cache;
260 264
261struct event_list* event_list_alloc(int gfp_flags) 265struct event_list* event_list_alloc(int gfp_flags)
262{ 266{
267 int prio;
263 struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags); 268 struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags);
264 if (el) { 269 if (el) {
265 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 270 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
266 INIT_LIST_HEAD(&el->queue_node); 271 INIT_LIST_HEAD(&el->queue_node);
267 el->timer.function = on_timer; 272 el->timer.function = on_timer;
268 hrtimer_start_on_info_init(&el->info); 273 hrtimer_start_on_info_init(&el->info);
274 for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++)
275 INIT_LIST_HEAD(&el->events[prio]);
269 } else { 276 } else {
270 VTRACE("Failed to allocate event list!\n"); 277 VTRACE("Failed to allocate event list!\n");
271 printk(KERN_CRIT "Failed to allocate event list.\n"); 278 printk(KERN_CRIT "Failed to allocate event list.\n");
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 1d1ecbb8c9c4..5067b79b026b 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -987,8 +987,8 @@ void mc_finish_switch(struct task_struct *prev)
987{ 987{
988 struct cpu_entry* entry = &__get_cpu_var(cpus); 988 struct cpu_entry* entry = &__get_cpu_var(cpus);
989 entry->scheduled = is_realtime(current) ? current : NULL; 989 entry->scheduled = is_realtime(current) ? current : NULL;
990 TRACE_TASK(prev, "Switched away from to (something/%d)\n", 990 TRACE_TASK(prev, "Switched away from to " TS,
991 ((entry->scheduled) ? entry->scheduled->pid : 0)); 991 TA(entry->scheduled));
992} 992}
993 993
994/* 994/*