aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/event_group.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-09-27 14:47:26 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-09-27 14:47:26 -0400
commitf21e1d0ef90c2e88ae6a563afc31ea601ed968c7 (patch)
treed7a0cf75344c890c81fbd402f0da1764937eebc8 /litmus/event_group.c
parent2fe725ef2142dd6c1bbf72e8d1b0a6f7e885d7ed (diff)
Timer merging
Diffstat (limited to 'litmus/event_group.c')
-rw-r--r--litmus/event_group.c108
1 files changed, 72 insertions, 36 deletions
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 22c74a19d1d6..b4521ab370d1 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -5,6 +5,12 @@
5#include <litmus/trace.h> 5#include <litmus/trace.h>
6#include <litmus/event_group.h> 6#include <litmus/event_group.h>
7 7
8#if 1
9#define VTRACE TRACE
10#else
11#define VTRACE(fmt, args...)
12#endif
13
8/* 14/*
9 * Return event_queue slot for the given time. 15 * Return event_queue slot for the given time.
10 */ 16 */
@@ -20,6 +26,7 @@ static unsigned int time2slot(lt_t time)
20static enum hrtimer_restart on_timer(struct hrtimer *timer) 26static enum hrtimer_restart on_timer(struct hrtimer *timer)
21{ 27{
22 unsigned long flags; 28 unsigned long flags;
29 int num = 0;
23 struct event_list *el; 30 struct event_list *el;
24 struct rt_event *e; 31 struct rt_event *e;
25 struct list_head *pos, *safe, list; 32 struct list_head *pos, *safe, list;
@@ -27,19 +34,24 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer)
27 el = container_of(timer, struct event_list, timer); 34 el = container_of(timer, struct event_list, timer);
28 35
29 raw_spin_lock_irqsave(&el->group->queue_lock, flags); 36 raw_spin_lock_irqsave(&el->group->queue_lock, flags);
37 VTRACE("Removing event list 0x%p\n", el);
30 list_del_init(&el->list); 38 list_del_init(&el->list);
31 raw_spin_unlock_irqrestore(&el->group->queue_lock, flags); 39 raw_spin_unlock_irqrestore(&el->group->queue_lock, flags);
32 40
33 /* Empty event list so this event can be requeued */ 41 /* Empty event list so this event can be requeued */
42 VTRACE("Emptying event list 0x%p\n", el);
34 list_replace_init(&el->events, &list); 43 list_replace_init(&el->events, &list);
35 44
36 /* Fire events */ 45 /* Fire events */
37 list_for_each_safe(pos, safe, &list) { 46 list_for_each_safe(pos, safe, &list) {
47 num++;
38 e = list_entry(pos, struct rt_event, list); 48 e = list_entry(pos, struct rt_event, list);
39 TRACE("Dequeueing event with prio %d\n", e->prio); 49 TRACE("Dequeueing event with prio %d from 0x%p\n",
50 e->prio, el);
40 list_del_init(pos); 51 list_del_init(pos);
41 e->fire(e->data); 52 e->function(e);
42 } 53 }
54 VTRACE("Exhausted %d events from list 0x%p\n", num, el);
43 return HRTIMER_NORESTART; 55 return HRTIMER_NORESTART;
44} 56}
45 57
@@ -48,16 +60,22 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer)
48 */ 60 */
49void insert_event(struct event_list *el, struct rt_event *e) 61void insert_event(struct event_list *el, struct rt_event *e)
50{ 62{
51 struct list_head *pos; 63 struct list_head *pos, *last = NULL;
52 struct rt_event *queued; 64 struct rt_event *queued;
53 list_for_each_prev(pos, &el->events) { 65 list_for_each(pos, &el->events) {
54 queued = list_entry(pos, struct rt_event, list); 66 queued = list_entry(pos, struct rt_event, list);
67 last = pos;
55 if (e->prio < queued->prio) { 68 if (e->prio < queued->prio) {
56 __list_add(&e->list, pos, pos->next); 69 VTRACE("Inserting priority %d 0x%p before %d 0x%p "
70 "in 0x%p, pos 0x%p\n", e->prio, &e->list,
71 queued->prio, &queued->list, el, pos);
72 list_add_tail(&e->list, pos);
57 return; 73 return;
58 } 74 }
59 } 75 }
60 list_add(&e->list, &el->events); 76 VTRACE("Inserting priority %d 0x%p at end of 0x%p, last 0x%p\n",
77 e->prio, &el->list, el, last);
78 list_add(&e->list, (last) ? last : pos);
61} 79}
62 80
63/* 81/*
@@ -65,52 +83,60 @@ void insert_event(struct event_list *el, struct rt_event *e)
65 * is being used yet and use_event_heap is 1, will create the list 83 * is being used yet and use_event_heap is 1, will create the list
66 * and return it. Otherwise it will return NULL. 84 * and return it. Otherwise it will return NULL.
67 */ 85 */
68static struct event_list *get_event_list(struct event_group *group, 86static struct event_list* get_event_list(struct event_group *group,
69 struct rt_event *e, 87 struct rt_event *e,
70 lt_t fire, 88 lt_t fire,
71 int use_event_heap) 89 int use_event_list)
72{ 90{
73 struct list_head* pos; 91 struct list_head* pos;
74 struct event_list *el = NULL, *tmp; 92 struct event_list *el = NULL, *tmp;
75 unsigned int slot = time2slot(fire); 93 unsigned int slot = time2slot(fire);
76 94
77 /* initialize pos for the case that the list is empty */ 95 VTRACE("Getting list for %llu\n", fire);
96
97 /* Initialize pos for the case that the list is empty */
78 pos = group->event_queue[slot].next; 98 pos = group->event_queue[slot].next;
79 list_for_each(pos, &group->event_queue[slot]) { 99 list_for_each(pos, &group->event_queue[slot]) {
80 tmp = list_entry(pos, struct event_list, list); 100 tmp = list_entry(pos, struct event_list, list);
81 if (lt_after_eq(fire, tmp->fire_time) && 101 if (lt_after_eq(fire, tmp->fire_time) &&
82 lt_before(tmp->fire_time, fire)) { 102 lt_before(fire, tmp->fire_time + group->res)) {
83 /* perfect match -- this happens on hyperperiod 103 VTRACE("Found match at time %llu\n", tmp->fire_time);
84 * boundaries
85 */
86 el = tmp; 104 el = tmp;
87 break; 105 break;
88 } else if (lt_before(fire, tmp->fire_time)) { 106 } else if (lt_before(fire, tmp->fire_time)) {
89 /* we need to insert a new node since rh is 107 /* We need to insert a new node since el is
90 * already in the future 108 * already in the future
91 */ 109 */
110 VTRACE("Time %llu was before %llu\n",
111 fire, tmp->fire_time);
92 break; 112 break;
113 } else {
114 VTRACE("Time %llu was after %llu\n",
115 fire, tmp->fire_time + group->res);
93 } 116 }
94 } 117 }
95 if (!el && use_event_heap) { 118 if (!el && use_event_list) {
96 /* use pre-allocated release heap */ 119 /* Use pre-allocated list */
97 tmp = e->event_list; 120 tmp = e->event_list;
98 tmp->fire_time = fire; 121 tmp->fire_time = fire;
99 tmp->group = group; 122 tmp->group = group;
100 /* add to queue */ 123 /* Add to queue */
101 list_add(&tmp->list, pos->prev); 124 list_add(&tmp->list, pos->prev);
102 el = tmp; 125 el = tmp;
126 VTRACE("Using list for priority %d and time %llu\n",
127 e->prio, fire);
103 } 128 }
104 return el; 129 return el;
105} 130}
106 131
107/* 132/*
108 * Prepare a release heap for a new set of events. 133 * Prepare a release list for a new set of events.
109 */ 134 */
110static void reinit_event_list(struct rt_event *e) 135static void reinit_event_list(struct rt_event *e)
111{ 136{
112 struct event_list *el = e->event_list; 137 struct event_list *el = e->event_list;
113 BUG_ON(hrtimer_cancel(&el->timer)); 138 BUG_ON(hrtimer_cancel(&el->timer));
139 VTRACE("Reinitting 0x%p\n", el);
114 INIT_LIST_HEAD(&el->events); 140 INIT_LIST_HEAD(&el->events);
115 atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); 141 atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE);
116} 142}
@@ -122,7 +148,8 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
122{ 148{
123 struct event_list *el; 149 struct event_list *el;
124 150
125 TRACE("Adding event with prio %d @ %llu\n", event->prio, fire); 151 VTRACE("Adding event with priority %d for time %llu\n",
152 e->prio, fire);
126 153
127 raw_spin_lock(&group->queue_lock); 154 raw_spin_lock(&group->queue_lock);
128 el = get_event_list(group, e, fire, 0); 155 el = get_event_list(group, e, fire, 0);
@@ -140,7 +167,7 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
140 167
141 /* Arm timer if we are the owner */ 168 /* Arm timer if we are the owner */
142 if (el == e->event_list) { 169 if (el == e->event_list) {
143 TRACE("Arming timer for %llu\n", fire); 170 VTRACE("Arming timer for %llu\n", fire);
144 if (group->cpu == smp_processor_id()) { 171 if (group->cpu == smp_processor_id()) {
145 __hrtimer_start_range_ns(&el->timer, 172 __hrtimer_start_range_ns(&el->timer,
146 ns_to_ktime(el->fire_time), 173 ns_to_ktime(el->fire_time),
@@ -151,18 +178,23 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
151 HRTIMER_MODE_ABS_PINNED); 178 HRTIMER_MODE_ABS_PINNED);
152 } 179 }
153 } else { 180 } else {
154 TRACE("Not my timer @%llu", fire); 181 VTRACE("Not my timer @%llu\n", fire);
155 } 182 }
156} 183}
157 184
158/** 185/**
159 * cancel_event() - Remove event from the group. 186 * cancel_event() - Remove event from the group.
160 */ 187 */
161void cancel_event(struct event_group *group, struct rt_event *e) 188void cancel_event(struct rt_event *e)
162{ 189{
163 raw_spin_lock(&group->queue_lock); 190 struct event_group *group;
164 list_del_init(&e->list); 191 if (e->list.next != &e->list) {
165 raw_spin_unlock(&group->queue_lock); 192 group = e->event_list->group;
193 raw_spin_lock(&group->queue_lock);
194 VTRACE("Canceling event with priority %d\n", e->prio);
195 list_del_init(&e->list);
196 raw_spin_unlock(&group->queue_lock);
197 }
166} 198}
167 199
168/** 200/**
@@ -171,30 +203,34 @@ void cancel_event(struct event_group *group, struct rt_event *e)
171void init_event_group(struct event_group *group, lt_t res, int cpu) 203void init_event_group(struct event_group *group, lt_t res, int cpu)
172{ 204{
173 int i; 205 int i;
206 VTRACE("Creating group with res %llu on CPU %d", res, cpu);
174 group->res = res; 207 group->res = res;
175 group->cpu = cpu; 208 group->cpu = cpu;
176 209 for (i = 0; i < EVENT_QUEUE_SLOTS; i++)
177 for (i = 0; i < EVENT_QUEUE_SLOTS; i++) {
178 INIT_LIST_HEAD(&group->event_queue[i]); 210 INIT_LIST_HEAD(&group->event_queue[i]);
179 }
180
181 raw_spin_lock_init(&group->queue_lock); 211 raw_spin_lock_init(&group->queue_lock);
182} 212}
183 213
184struct kmem_cache *event_list_cache; 214struct kmem_cache *event_list_cache, *event_cache;
185struct event_list * event_list_alloc(int gfp_flags) 215
216struct event_list* event_list_alloc(int gfp_flags)
186{ 217{
187 struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags); 218 struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags);
188 if (el) { 219 if (el) {
189 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 220 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
190 INIT_LIST_HEAD(&el->list); 221 INIT_LIST_HEAD(&el->list);
191 el->timer.function = on_timer; 222 el->timer.function = on_timer;
223 } else {
224 VTRACE("Failed to allocate event list!");
192 } 225 }
193 return el; 226 return el;
194} 227}
195 228
196void event_list_free(struct event_list *el) 229void init_event(struct rt_event *e, int prio, fire_event_t function,
230 struct event_list *el)
197{ 231{
198 hrtimer_cancel(&el->timer); 232 e->prio = prio;
199 kmem_cache_free(event_list_cache, el); 233 e->function = function;
234 e->event_list = el;
235 INIT_LIST_HEAD(&e->list);
200} 236}