aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-08 21:07:30 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-10-08 21:07:30 -0400
commit3d39c63ff7b9ec086fd8004951af439757a2fd49 (patch)
treefc26741925e249c0aebe5a7f02d3d53599f8bb3c /litmus
parent5177128caca8a03d19c293de841ce05070b1c7e9 (diff)
Renamed list variables for clarity in event_list
Diffstat (limited to 'litmus')
-rw-r--r--litmus/event_group.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 6811fa2cf9e9..f2ee9158294b 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -33,7 +33,7 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer)
33 unsigned long num = 0; 33 unsigned long num = 0;
34 struct event_list *el; 34 struct event_list *el;
35 struct rt_event *e; 35 struct rt_event *e;
36 struct list_head *pos, *safe, list; 36 struct list_head *pos, *safe, events;
37 37
38 el = container_of(timer, struct event_list, timer); 38 el = container_of(timer, struct event_list, timer);
39 39
@@ -42,17 +42,17 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer)
42 42
43 raw_spin_lock_irqsave(&el->group->queue_lock, flags); 43 raw_spin_lock_irqsave(&el->group->queue_lock, flags);
44 VTRACE("Removing event list 0x%p\n", el); 44 VTRACE("Removing event list 0x%p\n", el);
45 list_del_init(&el->list); 45 list_del_init(&el->queue_node);
46 raw_spin_unlock_irqrestore(&el->group->queue_lock, flags); 46 raw_spin_unlock_irqrestore(&el->group->queue_lock, flags);
47 47
48 /* Empty event list so this event can be requeued */ 48 /* Empty event list so this event can be requeued */
49 VTRACE("Emptying event list 0x%p\n", el); 49 VTRACE("Emptying event list 0x%p\n", el);
50 list_replace_init(&el->events, &list); 50 list_replace_init(&el->events, &events);
51 51
52 /* Fire events */ 52 /* Fire events */
53 list_for_each_safe(pos, safe, &list) { 53 list_for_each_safe(pos, safe, &events) {
54 num++; 54 num++;
55 e = list_entry(pos, struct rt_event, list); 55 e = list_entry(pos, struct rt_event, events_node);
56 VTRACE("Dequeueing event 0x%p with prio %d from 0x%p\n", 56 VTRACE("Dequeueing event 0x%p with prio %d from 0x%p\n",
57 e, e->prio, el); 57 e, e->prio, el);
58 list_del_init(pos); 58 list_del_init(pos);
@@ -71,22 +71,21 @@ void insert_event(struct event_list *el, struct rt_event *e)
71 struct list_head *pos, *last = NULL; 71 struct list_head *pos, *last = NULL;
72 struct rt_event *queued; 72 struct rt_event *queued;
73 list_for_each(pos, &el->events) { 73 list_for_each(pos, &el->events) {
74 queued = list_entry(pos, struct rt_event, list); 74 queued = list_entry(pos, struct rt_event, events_node);
75 last = pos; 75 last = pos;
76 if (e->prio < queued->prio) { 76 if (e->prio < queued->prio) {
77 VTRACE("Inserting priority %d event 0x%p before %d 0x%p " 77 VTRACE("Inserting priority %d event 0x%p before %d 0x%p "
78 "in 0x%p, pos 0x%p\n", e->prio, e, 78 "in 0x%p, pos 0x%p\n", e->prio, e,
79 queued->prio, &queued->list, el, pos); 79 queued->prio, &queued->events_node, el, pos);
80 BUG_ON(!list_empty(&e->list)); 80 BUG_ON(!list_empty(&e->events_node));
81 list_add_tail(&e->list, pos); 81 list_add_tail(&e->events_node, pos);
82 return; 82 return;
83 } 83 }
84 } 84 }
85 VTRACE("Inserting priority %d event 0x%p at end of 0x%p, last 0x%p\n", 85 VTRACE("Inserting priority %d event 0x%p at end of 0x%p, last 0x%p\n",
86 e->prio, e, el, last); 86 e->prio, e, el, last);
87 BUG_ON(!list_empty(&e->list)); 87 BUG_ON(!list_empty(&e->events_node));
88 list_add(&e->list, (last) ? last : pos); 88 list_add(&e->events_node, (last) ? last : pos);
89 VTRACE("Singular? %d\n", list_is_singular(&el->events));
90} 89}
91 90
92/* 91/*
@@ -110,7 +109,7 @@ static struct event_list* get_event_list(struct event_group *group,
110 pos = group->event_queue[slot].next; 109 pos = group->event_queue[slot].next;
111 list_for_each(pos, &group->event_queue[slot]) { 110 list_for_each(pos, &group->event_queue[slot]) {
112 BUG_ON(remaining-- < 0); 111 BUG_ON(remaining-- < 0);
113 tmp = list_entry(pos, struct event_list, list); 112 tmp = list_entry(pos, struct event_list, queue_node);
114 if (lt_after_eq(fire, tmp->fire_time) && 113 if (lt_after_eq(fire, tmp->fire_time) &&
115 lt_before(fire, tmp->fire_time + group->res)) { 114 lt_before(fire, tmp->fire_time + group->res)) {
116 VTRACE("Found match 0x%p at time %llu\n", 115 VTRACE("Found match 0x%p at time %llu\n",
@@ -137,8 +136,8 @@ static struct event_list* get_event_list(struct event_group *group,
137 /* Add to queue */ 136 /* Add to queue */
138 VTRACE("Using list 0x%p for priority %d and time %llu\n", 137 VTRACE("Using list 0x%p for priority %d and time %llu\n",
139 tmp, e->prio, fire); 138 tmp, e->prio, fire);
140 BUG_ON(!list_empty(&tmp->list)); 139 BUG_ON(!list_empty(&tmp->queue_node));
141 list_add(&tmp->list, pos->prev); 140 list_add(&tmp->queue_node, pos->prev);
142 el = tmp; 141 el = tmp;
143 } 142 }
144 return el; 143 return el;
@@ -227,10 +226,10 @@ void cancel_event(struct rt_event *e)
227 * trigger the queued events. 226 * trigger the queued events.
228 */ 227 */
229 if (!list_is_singular(&e->event_list->events)) { 228 if (!list_is_singular(&e->event_list->events)) {
230 swap = list_entry(e->list.next, struct rt_event, list); 229 swap = list_entry(e->events_node.next, struct rt_event,
230 events_node);
231 VTRACE("Swapping with event 0x%p of priority %d\n", 231 VTRACE("Swapping with event 0x%p of priority %d\n",
232 swap, swap->prio); 232 swap, swap->prio);
233
234 tmp = swap->event_list; 233 tmp = swap->event_list;
235 swap->event_list = e->event_list; 234 swap->event_list = e->event_list;
236 e->event_list = tmp; 235 e->event_list = tmp;
@@ -239,11 +238,11 @@ void cancel_event(struct rt_event *e)
239 /* Disable the event_list */ 238 /* Disable the event_list */
240 atomic_set(&e->event_list->info.state, HRTIMER_START_ON_INACTIVE); 239 atomic_set(&e->event_list->info.state, HRTIMER_START_ON_INACTIVE);
241 hrtimer_try_to_cancel(&e->event_list->timer); 240 hrtimer_try_to_cancel(&e->event_list->timer);
242 list_del_init(&e->event_list->list); 241 list_del_init(&e->event_list->queue_node);
243 } else { 242 } else {
244 VTRACE("List 0x%p is empty\n", e->event_list); 243 VTRACE("List 0x%p is empty\n", e->event_list);
245 } 244 }
246 list_del_init(&e->list); 245 list_del_init(&e->events_node);
247 raw_spin_unlock(&group->queue_lock); 246 raw_spin_unlock(&group->queue_lock);
248 e->_event_group = NULL; 247 e->_event_group = NULL;
249} 248}
@@ -255,7 +254,7 @@ struct event_list* event_list_alloc(int gfp_flags)
255 struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags); 254 struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags);
256 if (el) { 255 if (el) {
257 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 256 hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
258 INIT_LIST_HEAD(&el->list); 257 INIT_LIST_HEAD(&el->queue_node);
259 el->timer.function = on_timer; 258 el->timer.function = on_timer;
260 } else { 259 } else {
261 VTRACE("Failed to allocate event list!\n"); 260 VTRACE("Failed to allocate event list!\n");
@@ -272,7 +271,7 @@ void init_event(struct rt_event *e, int prio, fire_event_t function,
272 e->function = function; 271 e->function = function;
273 e->event_list = el; 272 e->event_list = el;
274 e->_event_group = NULL; 273 e->_event_group = NULL;
275 INIT_LIST_HEAD(&e->list); 274 INIT_LIST_HEAD(&e->events_node);
276} 275}
277 276
278/** 277/**