aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/event_group.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/event_group.c')
-rw-r--r--litmus/event_group.c198
1 files changed, 198 insertions, 0 deletions
diff --git a/litmus/event_group.c b/litmus/event_group.c
new file mode 100644
index 000000000000..11a690d5b2cd
--- /dev/null
+++ b/litmus/event_group.c
@@ -0,0 +1,198 @@
1#include <litmus/event_group.h>
2#include <litmus/trace.h>
3#include <linux/sched.h>
4
5/*
6 * Return event_queue slot for the given time.
7 */
8static unsigned int time2slot(lt_t time)
9{
10 return (unsigned int) time2quanta(time, FLOOR) % EVENT_QUEUE_SLOTS;
11}
12
13/*
14 * Executes events from an event_list in priority order.
15 * Events can requeue themselves when they are called.
16 */
17static enum hrtimer_restart on_timer(struct hrtimer *timer)
18{
19 unsigned long flags;
20 event_list_t *el;
21 rt_event_t *e;
22 struct list_head *pos, *safe, list;
23
24 el = container_of(timer, event_list_t, timer);
25
26 raw_spin_lock_irqsave(&el->group->queue_lock, flags);
27 list_del_init(&el->list);
28 raw_spin_unlock_irqrestore(&el->group->queue_lock, flags);
29
30 /* Empty event list so this event can be requeued */
31 list_del_init(&el->events, list);
32
33 /* Fire events */
34 list_for_each_safe(pos, safe, &el->events) {
35 e = list_entry(pos, rt_event_t, list);
36 TRACE("Dequeueing event with prio %d\n", e->prio);
37 list_del_init(pos);
38 e->fire(e->data);
39 }
40}
41
42/*
43 * Insert event in event-list, respecting priority order.
44 */
45void insert_event(event_list_t *el, rt_event_t *e)
46{
47 struct list_head *pos;
48 event_list_t *queued;
49 list_for_each_prev(pos, &el->events) {
50 queued = list_entry(pos, event_list_t, list);
51 if (e->prio < queued->prio) {
52 __list_add(&e->list, pos, pos->next);
53 return;
54 }
55 }
56 list_add(&e->list, &el->events);
57}
58
59/*
60 * Return event_list for the given event and time. If no event_list
61 * is being used yet and use_event_heap is 1, will create the list
62 * and return it. Otherwise it will return NULL.
63 */
64static event_list_t* get_event_list(event_group_t *group,
65 rt_event_t *e,
66 lt_t fire,
67 int use_event_heap)
68{
69 struct list_head* pos;
70 event_list_t *el = NULL, *tmp;
71 unsigned int slot = time2slot(release_time);
72
73 /* initialize pos for the case that the list is empty */
74 pos = group->event_queue[slot].next;
75 list_for_each(pos, &group->event_queue[slot]) {
76 tmp = list_entry(pos, event_list_t, list);
77 if (lt_after_eq(fire, tmp->fire_time) &&
78 lt_before(tmp->fire_time, fire)) {
79 /* perfect match -- this happens on hyperperiod
80 * boundaries
81 */
82 el = tmp;
83 break;
84 } else if (lt_before(fire, tmp->fire_time)) {
85 /* we need to insert a new node since rh is
86 * already in the future
87 */
88 break;
89 }
90 }
91 if (!el && use_event_heap) {
92 /* use pre-allocated release heap */
93 tmp = e->event_list;
94 tmp->fire_time = fire;
95 tmp->group = group;
96 /* add to queue */
97 list_add(&tmp->list, pos->prev);
98 el = tmp;
99 }
100 return el;
101}
102
103/*
104 * Prepare a release heap for a new set of events.
105 */
106static void reinit_release_heap(rt_event_t *e)
107{
108 event_list_t *el = e->event_list;
109 BUG_ON(hrtimer_cancel(&el->timer));
110 INIT_LIST_HEAD(&el->events);
111 atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE);
112}
113
114/**
115 * add_event() - Add timer to event group.
116 */
117void add_event(event_group_t *group, rt_event_t *e, lt_t fire)
118{
119 event_list_t *el;
120
121 TRACE("Adding event with prio %d @ %llu\n", event->prio, fire);
122
123 raw_spin_lock(&group->queue_lock);
124 el = get_event_list(group, e, fire, 0);
125 if (!el) {
126 /* Use our own, but drop lock first */
127 raw_spin_unlock(&group->queue_lock);
128 reinit_event_list(e);
129 raw_spin_lock(&group->queue_lock);
130 el = get_event_list(group, e, fire, 1);
131 }
132
133 /* Add event to sorted list */
134 insert_event(el, e);
135 raw_spin_unlock(&group->queue_lock);
136
137 /* Arm timer if we are the owner */
138 if (el == e->event_list) {
139 TRACE("Arming timer for %llu\n", fire);
140 if (group->cpu == smp_processor_id()) {
141 __hrtimer_start_range_ns(&el->timer,
142 ns_to_ktime(el->fire_time),
143 0, HRTIMER_MODE_ABS_PINNED, 0);
144 } else {
145 hrtimer_start_on(group->cpu, &el->info,
146 &el->timer, ns_to_ktime(el->fire_time),
147 HRTIMER_MODE_ABS_PINNED);
148 }
149 } else {
150 TRACE("Not my timer @%llu", fire);
151 }
152}
153
154/**
155 * cancel_event() - Remove event from the group.
156 */
157void cancel_event(event_group_t *group, rt_event_t *e)
158{
159 raw_spin_lock(&group->queue_lock);
160 list_del_init(&e->list);
161 raw_spin_unlock(&group->queue_lock);
162}
163
164/**
165 * init_event_group() - Prepare group for events.
166 */
167void init_event_group(event_group_t *group, lt_t res)
168{
169 int i;
170 group->res = res;
171 group->cpu = cpu;
172
173 INIT_LIST_HEAD(group->tobe_queued);
174 for (i = 0; i < EVENT_QUEUE_SLOTS; i++) {
175 INIT_LIST_HEAD(&group->event_queue[i]);
176 }
177
178 raw_spin_lock_init(&group->queue_lock);
179 raw_spin_lock_init(&group->tobe_lock);
180}
181
182struct kmem_cache *event_list_cache;
183event_list_t* event_list_alloc(int gfp_flags)
184{
185 event_list_t *el = kmem_cache_alloc(event_list_cache, gfp_flags);
186 if (el) {
187 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
188 INIT_LIST_HEAD(&el->list);
189 el->timer.function = on_timer;
190 }
191 return el;
192}
193
194void event_list_free(event_list_t *el)
195{
196 hrtimer_cancel(&el->timer);
197 kmem_cache_free(event_list_cache, el);
198}