diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-24 18:33:08 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-24 18:33:08 -0400 |
commit | d1e50b511a6586da696ef5a61ed18818b8139b67 (patch) | |
tree | df641092f7eb044cdbfe1a8cd5969c33a0f820a4 /litmus | |
parent | c30d3e248cb6ece50cc9b06769ed794d600f0c20 (diff) |
Checkpoint commit,initial timer merge design
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/event_group.c | 198 | ||||
-rw-r--r-- | litmus/litmus.c | 9 | ||||
-rw-r--r-- | litmus/sched_mc.c | 27 |
3 files changed, 218 insertions, 16 deletions
diff --git a/litmus/event_group.c b/litmus/event_group.c new file mode 100644 index 000000000000..11a690d5b2cd --- /dev/null +++ b/litmus/event_group.c | |||
@@ -0,0 +1,198 @@ | |||
1 | #include <litmus/event_group.h> | ||
2 | #include <litmus/trace.h> | ||
3 | #include <linux/sched.h> | ||
4 | |||
5 | /* | ||
6 | * Return event_queue slot for the given time. | ||
7 | */ | ||
8 | static unsigned int time2slot(lt_t time) | ||
9 | { | ||
10 | return (unsigned int) time2quanta(time, FLOOR) % EVENT_QUEUE_SLOTS; | ||
11 | } | ||
12 | |||
13 | /* | ||
14 | * Executes events from an event_list in priority order. | ||
15 | * Events can requeue themselves when they are called. | ||
16 | */ | ||
17 | static enum hrtimer_restart on_timer(struct hrtimer *timer) | ||
18 | { | ||
19 | unsigned long flags; | ||
20 | event_list_t *el; | ||
21 | rt_event_t *e; | ||
22 | struct list_head *pos, *safe, list; | ||
23 | |||
24 | el = container_of(timer, event_list_t, timer); | ||
25 | |||
26 | raw_spin_lock_irqsave(&el->group->queue_lock, flags); | ||
27 | list_del_init(&el->list); | ||
28 | raw_spin_unlock_irqrestore(&el->group->queue_lock, flags); | ||
29 | |||
30 | /* Empty event list so this event can be requeued */ | ||
31 | list_del_init(&el->events, list); | ||
32 | |||
33 | /* Fire events */ | ||
34 | list_for_each_safe(pos, safe, &el->events) { | ||
35 | e = list_entry(pos, rt_event_t, list); | ||
36 | TRACE("Dequeueing event with prio %d\n", e->prio); | ||
37 | list_del_init(pos); | ||
38 | e->fire(e->data); | ||
39 | } | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Insert event in event-list, respecting priority order. | ||
44 | */ | ||
45 | void insert_event(event_list_t *el, rt_event_t *e) | ||
46 | { | ||
47 | struct list_head *pos; | ||
48 | event_list_t *queued; | ||
49 | list_for_each_prev(pos, &el->events) { | ||
50 | queued = list_entry(pos, event_list_t, list); | ||
51 | if (e->prio < queued->prio) { | ||
52 | __list_add(&e->list, pos, pos->next); | ||
53 | return; | ||
54 | } | ||
55 | } | ||
56 | list_add(&e->list, &el->events); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Return event_list for the given event and time. If no event_list | ||
61 | * is being used yet and use_event_heap is 1, will create the list | ||
62 | * and return it. Otherwise it will return NULL. | ||
63 | */ | ||
64 | static event_list_t* get_event_list(event_group_t *group, | ||
65 | rt_event_t *e, | ||
66 | lt_t fire, | ||
67 | int use_event_heap) | ||
68 | { | ||
69 | struct list_head* pos; | ||
70 | event_list_t *el = NULL, *tmp; | ||
71 | unsigned int slot = time2slot(release_time); | ||
72 | |||
73 | /* initialize pos for the case that the list is empty */ | ||
74 | pos = group->event_queue[slot].next; | ||
75 | list_for_each(pos, &group->event_queue[slot]) { | ||
76 | tmp = list_entry(pos, event_list_t, list); | ||
77 | if (lt_after_eq(fire, tmp->fire_time) && | ||
78 | lt_before(tmp->fire_time, fire)) { | ||
79 | /* perfect match -- this happens on hyperperiod | ||
80 | * boundaries | ||
81 | */ | ||
82 | el = tmp; | ||
83 | break; | ||
84 | } else if (lt_before(fire, tmp->fire_time)) { | ||
85 | /* we need to insert a new node since rh is | ||
86 | * already in the future | ||
87 | */ | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | if (!el && use_event_heap) { | ||
92 | /* use pre-allocated release heap */ | ||
93 | tmp = e->event_list; | ||
94 | tmp->fire_time = fire; | ||
95 | tmp->group = group; | ||
96 | /* add to queue */ | ||
97 | list_add(&tmp->list, pos->prev); | ||
98 | el = tmp; | ||
99 | } | ||
100 | return el; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Prepare a release heap for a new set of events. | ||
105 | */ | ||
106 | static void reinit_release_heap(rt_event_t *e) | ||
107 | { | ||
108 | event_list_t *el = e->event_list; | ||
109 | BUG_ON(hrtimer_cancel(&el->timer)); | ||
110 | INIT_LIST_HEAD(&el->events); | ||
111 | atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * add_event() - Add timer to event group. | ||
116 | */ | ||
117 | void add_event(event_group_t *group, rt_event_t *e, lt_t fire) | ||
118 | { | ||
119 | event_list_t *el; | ||
120 | |||
121 | TRACE("Adding event with prio %d @ %llu\n", event->prio, fire); | ||
122 | |||
123 | raw_spin_lock(&group->queue_lock); | ||
124 | el = get_event_list(group, e, fire, 0); | ||
125 | if (!el) { | ||
126 | /* Use our own, but drop lock first */ | ||
127 | raw_spin_unlock(&group->queue_lock); | ||
128 | reinit_event_list(e); | ||
129 | raw_spin_lock(&group->queue_lock); | ||
130 | el = get_event_list(group, e, fire, 1); | ||
131 | } | ||
132 | |||
133 | /* Add event to sorted list */ | ||
134 | insert_event(el, e); | ||
135 | raw_spin_unlock(&group->queue_lock); | ||
136 | |||
137 | /* Arm timer if we are the owner */ | ||
138 | if (el == e->event_list) { | ||
139 | TRACE("Arming timer for %llu\n", fire); | ||
140 | if (group->cpu == smp_processor_id()) { | ||
141 | __hrtimer_start_range_ns(&el->timer, | ||
142 | ns_to_ktime(el->fire_time), | ||
143 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
144 | } else { | ||
145 | hrtimer_start_on(group->cpu, &el->info, | ||
146 | &el->timer, ns_to_ktime(el->fire_time), | ||
147 | HRTIMER_MODE_ABS_PINNED); | ||
148 | } | ||
149 | } else { | ||
150 | TRACE("Not my timer @%llu", fire); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * cancel_event() - Remove event from the group. | ||
156 | */ | ||
157 | void cancel_event(event_group_t *group, rt_event_t *e) | ||
158 | { | ||
159 | raw_spin_lock(&group->queue_lock); | ||
160 | list_del_init(&e->list); | ||
161 | raw_spin_unlock(&group->queue_lock); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * init_event_group() - Prepare group for events. | ||
166 | */ | ||
167 | void init_event_group(event_group_t *group, lt_t res) | ||
168 | { | ||
169 | int i; | ||
170 | group->res = res; | ||
171 | group->cpu = cpu; | ||
172 | |||
173 | INIT_LIST_HEAD(group->tobe_queued); | ||
174 | for (i = 0; i < EVENT_QUEUE_SLOTS; i++) { | ||
175 | INIT_LIST_HEAD(&group->event_queue[i]); | ||
176 | } | ||
177 | |||
178 | raw_spin_lock_init(&group->queue_lock); | ||
179 | raw_spin_lock_init(&group->tobe_lock); | ||
180 | } | ||
181 | |||
182 | struct kmem_cache *event_list_cache; | ||
183 | event_list_t* event_list_alloc(int gfp_flags) | ||
184 | { | ||
185 | event_list_t *el = kmem_cache_alloc(event_list_cache, gfp_flags); | ||
186 | if (el) { | ||
187 | hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
188 | INIT_LIST_HEAD(&el->list); | ||
189 | el->timer.function = on_timer; | ||
190 | } | ||
191 | return el; | ||
192 | } | ||
193 | |||
194 | void event_list_free(event_list_t *el) | ||
195 | { | ||
196 | hrtimer_cancel(&el->timer); | ||
197 | kmem_cache_free(event_list_cache, el); | ||
198 | } | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 89fb4e9aff8c..0e7cc753d8f2 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -37,8 +37,9 @@ atomic_t __log_seq_no = ATOMIC_INIT(0); | |||
37 | atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); | 37 | atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | static struct kmem_cache * bheap_node_cache; | 40 | static struct kmem_cache *bheap_node_cache; |
41 | extern struct kmem_cache * release_heap_cache; | 41 | extern struct kmem_cache *release_heap_cache; |
42 | extern struct kmem_cache *event_list_cache; | ||
42 | 43 | ||
43 | struct bheap_node* bheap_node_alloc(int gfp_flags) | 44 | struct bheap_node* bheap_node_alloc(int gfp_flags) |
44 | { | 45 | { |
@@ -610,8 +611,9 @@ static int __init _init_litmus(void) | |||
610 | 611 | ||
611 | register_sched_plugin(&linux_sched_plugin); | 612 | register_sched_plugin(&linux_sched_plugin); |
612 | 613 | ||
613 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | 614 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); |
614 | release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); | 615 | release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); |
616 | event_list_cache = KMEM_CACHE(event_list, SLAB_PANIC); | ||
615 | 617 | ||
616 | #ifdef CONFIG_MAGIC_SYSRQ | 618 | #ifdef CONFIG_MAGIC_SYSRQ |
617 | /* offer some debugging help */ | 619 | /* offer some debugging help */ |
@@ -631,6 +633,7 @@ static void _exit_litmus(void) | |||
631 | exit_litmus_proc(); | 633 | exit_litmus_proc(); |
632 | kmem_cache_destroy(bheap_node_cache); | 634 | kmem_cache_destroy(bheap_node_cache); |
633 | kmem_cache_destroy(release_heap_cache); | 635 | kmem_cache_destroy(release_heap_cache); |
636 | kmem_cache_destroy(event_list_cache); | ||
634 | } | 637 | } |
635 | 638 | ||
636 | module_init(_init_litmus); | 639 | module_init(_init_litmus); |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 596742aa5583..9eae2b716a01 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -768,7 +768,6 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
768 | raw_spin_unlock(dom->lock); | 768 | raw_spin_unlock(dom->lock); |
769 | update_crit_levels(entry); | 769 | update_crit_levels(entry); |
770 | raw_spin_lock(&entry->lock); | 770 | raw_spin_lock(&entry->lock); |
771 | continue; | ||
772 | } | 771 | } |
773 | } | 772 | } |
774 | raw_spin_unlock(dom->lock); | 773 | raw_spin_unlock(dom->lock); |
@@ -804,18 +803,6 @@ static long mc_activate_plugin(void) | |||
804 | * Initialization | 803 | * Initialization |
805 | * ************************************************************************** */ | 804 | * ************************************************************************** */ |
806 | 805 | ||
807 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | ||
808 | .plugin_name = "MC", | ||
809 | .task_new = mc_task_new, | ||
810 | .complete_job = complete_job, | ||
811 | .task_exit = mc_task_exit, | ||
812 | .schedule = mc_schedule, | ||
813 | .task_wake_up = mc_task_wake_up, | ||
814 | .task_block = mc_task_block, | ||
815 | .admit_task = mc_admit_task, | ||
816 | .activate_plugin = mc_activate_plugin, | ||
817 | }; | ||
818 | |||
819 | /* Initialize values here so that they are allocated with the module | 806 | /* Initialize values here so that they are allocated with the module |
820 | * and destroyed when the module is unloaded. | 807 | * and destroyed when the module is unloaded. |
821 | */ | 808 | */ |
@@ -832,6 +819,20 @@ static rt_domain_t _mc_crit_c_rt; | |||
832 | struct bheap _mc_heap_c; | 819 | struct bheap _mc_heap_c; |
833 | struct bheap_node _mc_nodes_c[NR_CPUS]; | 820 | struct bheap_node _mc_nodes_c[NR_CPUS]; |
834 | 821 | ||
822 | release_at)_ | ||
823 | |||
824 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | ||
825 | .plugin_name = "MC", | ||
826 | .task_new = mc_task_new, | ||
827 | .complete_job = complete_job, | ||
828 | .task_exit = mc_task_exit, | ||
829 | .schedule = mc_schedule, | ||
830 | .task_wake_up = mc_task_wake_up, | ||
831 | .task_block = mc_task_block, | ||
832 | .admit_task = mc_admit_task, | ||
833 | .activate_plugin = mc_activate_plugin, | ||
834 | }; | ||
835 | |||
835 | static void init_crit_entry(crit_entry_t *ce, enum crit_level level, | 836 | static void init_crit_entry(crit_entry_t *ce, enum crit_level level, |
836 | domain_data_t *dom_data, | 837 | domain_data_t *dom_data, |
837 | struct bheap_node *node) | 838 | struct bheap_node *node) |