diff options
author | Christopher Kenna <cjk@cs.unc.edu> | 2011-09-30 01:23:20 -0400 |
---|---|---|
committer | Christopher Kenna <cjk@cs.unc.edu> | 2011-09-30 01:23:20 -0400 |
commit | cd5685b6483df2f1ba8affc0ff8a0679f4044db8 (patch) | |
tree | b2c15c6f04fdfd96a738900d8e822057847ea641 /litmus/event_group.c | |
parent | 23a00b911b968c6290251913ecc34171836b4d32 (diff) |
Refactor timer merging and add it to CE plugin.
THIS CODE IS UNTESTED
We now initialize one event group for each cpu on system start. We can
get the event group for a CPU via a function in event_group.c
Another change is that an event now stores what group it is in when it
add_event() is called on it. This lets us cancel it without knowing what
event group it is in.
The above is important because Level-C events (like releases) have a
NULL event group. When calling add_event(), it will get the event group
of the current CPU. If the event needs to be canceled later, we need
that saved group in the event so we know where to remove it from.
Diffstat (limited to 'litmus/event_group.c')
-rw-r--r-- | litmus/event_group.c | 65 |
1 files changed, 50 insertions, 15 deletions
diff --git a/litmus/event_group.c b/litmus/event_group.c index 276ba5dd242d..db43961258bf 100644 --- a/litmus/event_group.c +++ b/litmus/event_group.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/slab.h> | 1 | #include <linux/slab.h> |
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/module.h> | ||
3 | 4 | ||
4 | #include <litmus/litmus.h> | 5 | #include <litmus/litmus.h> |
5 | #include <litmus/trace.h> | 6 | #include <litmus/trace.h> |
@@ -158,6 +159,12 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire) | |||
158 | VTRACE("Adding event 0x%p with priority %d for time %llu\n", | 159 | VTRACE("Adding event 0x%p with priority %d for time %llu\n", |
159 | e, e->prio, fire); | 160 | e, e->prio, fire); |
160 | 161 | ||
162 | /* A NULL group means use the group of the currently executing CPU */ | ||
163 | if (NULL == group) | ||
164 | group = get_event_group_for(NO_CPU); | ||
165 | /* Saving the group is important for cancellations */ | ||
166 | e->_event_group = group; | ||
167 | |||
161 | raw_spin_lock(&group->queue_lock); | 168 | raw_spin_lock(&group->queue_lock); |
162 | el = get_event_list(group, e, fire, 0); | 169 | el = get_event_list(group, e, fire, 0); |
163 | if (!el) { | 170 | if (!el) { |
@@ -192,11 +199,12 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire) | |||
192 | /** | 199 | /** |
193 | * cancel_event() - Remove event from the group. | 200 | * cancel_event() - Remove event from the group. |
194 | */ | 201 | */ |
195 | void cancel_event(struct event_group *group, struct rt_event *e) | 202 | void cancel_event(struct rt_event *e) |
196 | { | 203 | { |
197 | struct list_head *swap = NULL; | 204 | struct list_head *swap = NULL; |
198 | struct rt_event *swappy; | 205 | struct rt_event *swappy; |
199 | struct event_list *tmp; | 206 | struct event_list *tmp; |
207 | struct event_group *group = e->_event_group; | ||
200 | 208 | ||
201 | if (e->list.next != &e->list) { | 209 | if (e->list.next != &e->list) { |
202 | raw_spin_lock(&group->queue_lock); | 210 | raw_spin_lock(&group->queue_lock); |
@@ -222,25 +230,12 @@ void cancel_event(struct event_group *group, struct rt_event *e) | |||
222 | 230 | ||
223 | hrtimer_try_to_cancel(&e->event_list->timer); | 231 | hrtimer_try_to_cancel(&e->event_list->timer); |
224 | list_del_init(&e->event_list->list); | 232 | list_del_init(&e->event_list->list); |
233 | e->_event_group = NULL; | ||
225 | 234 | ||
226 | raw_spin_unlock(&group->queue_lock); | 235 | raw_spin_unlock(&group->queue_lock); |
227 | } | 236 | } |
228 | } | 237 | } |
229 | 238 | ||
230 | /** | ||
231 | * init_event_group() - Prepare group for events. | ||
232 | */ | ||
233 | void init_event_group(struct event_group *group, lt_t res, int cpu) | ||
234 | { | ||
235 | int i; | ||
236 | VTRACE("Creating group with res %llu on CPU %d", res, cpu); | ||
237 | group->res = res; | ||
238 | group->cpu = cpu; | ||
239 | for (i = 0; i < EVENT_QUEUE_SLOTS; i++) | ||
240 | INIT_LIST_HEAD(&group->event_queue[i]); | ||
241 | raw_spin_lock_init(&group->queue_lock); | ||
242 | } | ||
243 | |||
244 | struct kmem_cache *event_list_cache, *event_cache; | 239 | struct kmem_cache *event_list_cache, *event_cache; |
245 | 240 | ||
246 | struct event_list* event_list_alloc(int gfp_flags) | 241 | struct event_list* event_list_alloc(int gfp_flags) |
@@ -264,3 +259,43 @@ void init_event(struct rt_event *e, int prio, fire_event_t function, | |||
264 | e->event_list = el; | 259 | e->event_list = el; |
265 | INIT_LIST_HEAD(&e->list); | 260 | INIT_LIST_HEAD(&e->list); |
266 | } | 261 | } |
262 | |||
263 | /** | ||
264 | * init_event_group() - Prepare group for events. | ||
265 | * @group Group to prepare | ||
266 | * @res Timer resolution. Two events of @res distance will be merged | ||
267 | * @cpu Cpu on which to fire timers | ||
268 | */ | ||
269 | static void init_event_group(struct event_group *group, lt_t res, int cpu) | ||
270 | { | ||
271 | int i; | ||
272 | VTRACE("Creating group with resolution %llu on CPU %d", res, cpu); | ||
273 | group->res = res; | ||
274 | group->cpu = cpu; | ||
275 | for (i = 0; i < EVENT_QUEUE_SLOTS; i++) | ||
276 | INIT_LIST_HEAD(&group->event_queue[i]); | ||
277 | raw_spin_lock_init(&group->queue_lock); | ||
278 | } | ||
279 | |||
280 | |||
281 | DEFINE_PER_CPU(struct event_group, _event_groups); | ||
282 | |||
283 | struct event_group *get_event_group_for(const int cpu) | ||
284 | { | ||
285 | return &per_cpu(_event_groups, | ||
286 | (NO_CPU == cpu) ? smp_processor_id() : cpu); | ||
287 | } | ||
288 | |||
289 | static int __init _init_event_groups(void) | ||
290 | { | ||
291 | int cpu; | ||
292 | printk("Initializing LITMUS^RT event groups.\n"); | ||
293 | |||
294 | for_each_online_cpu(cpu) { | ||
295 | init_event_group(get_event_group_for(cpu), | ||
296 | CONFIG_MERGE_TIMERS_WINDOW, cpu); | ||
297 | } | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | module_init(_init_event_groups); | ||