aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/rt_domain.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:31:46 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:14:41 -0400
commit53696c1fe6a6ada66f2a47c078d62aee40ad8ebe (patch)
tree01353124d90341322967c6b6e4b010fe4ed22026 /litmus/rt_domain.c
parent4e593e7105dec02e62ea7a1812dccb35a0d56d01 (diff)
[ported from 2008.3] Add rt_domain_t support
Still to be merged: - arm_release_timer() with no rq locking
Diffstat (limited to 'litmus/rt_domain.c')
-rw-r--r--litmus/rt_domain.c287
1 files changed, 287 insertions, 0 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
new file mode 100644
index 000000000000..4fa834018efa
--- /dev/null
+++ b/litmus/rt_domain.c
@@ -0,0 +1,287 @@
1/*
2 * litmus/rt_domain.c
3 *
4 * LITMUS real-time infrastructure. This file contains the
5 * functions that manipulate RT domains. RT domains are an abstraction
6 * of a ready queue and a release queue.
7 */
8
9#include <linux/percpu.h>
10#include <linux/sched.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13
14#include <litmus/litmus.h>
15#include <litmus/sched_plugin.h>
16#include <litmus/sched_trace.h>
17
18#include <litmus/rt_domain.h>
19
20#include <litmus/trace.h>
21
22#include <litmus/heap.h>
23
24static int dummy_resched(rt_domain_t *rt)
25{
26 return 0;
27}
28
29static int dummy_order(struct heap_node* a, struct heap_node* b)
30{
31 return 0;
32}
33
34/* default implementation: use default lock */
35static void default_release_jobs(rt_domain_t* rt, struct heap* tasks)
36{
37 merge_ready(rt, tasks);
38}
39
40static unsigned int time2slot(lt_t time)
41{
42 return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS;
43}
44
45static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
46{
47 unsigned long flags;
48 struct release_heap* rh;
49
50 TRACE("on_release_timer(0x%p) starts.\n", timer);
51
52 TS_RELEASE_START;
53
54 rh = container_of(timer, struct release_heap, timer);
55
56 spin_lock_irqsave(&rh->dom->release_lock, flags);
57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
58 /* remove from release queue */
59 list_del(&rh->list);
60 spin_unlock_irqrestore(&rh->dom->release_lock, flags);
61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
62
63 /* call release callback */
64 rh->dom->release_jobs(rh->dom, &rh->heap);
65 /* WARNING: rh can be referenced from other CPUs from now on. */
66
67 TS_RELEASE_END;
68
69 TRACE("on_release_timer(0x%p) ends.\n", timer);
70
71 return HRTIMER_NORESTART;
72}
73
74/* allocated in litmus.c */
75struct kmem_cache * release_heap_cache;
76
77struct release_heap* release_heap_alloc(int gfp_flags)
78{
79 struct release_heap* rh;
80 rh= kmem_cache_alloc(release_heap_cache, gfp_flags);
81 if (rh) {
82 /* initialize timer */
83 hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
84 rh->timer.function = on_release_timer;
85 }
86 return rh;
87}
88
89void release_heap_free(struct release_heap* rh)
90{
91 /* make sure timer is no longer in use */
92 hrtimer_cancel(&rh->timer);
93 kmem_cache_free(release_heap_cache, rh);
94}
95
96/* Caller must hold release lock.
97 * Will return heap for given time. If no such heap exists prior to
98 * the invocation it will be created.
99 */
100static struct release_heap* get_release_heap(rt_domain_t *rt,
101 struct task_struct* t,
102 int use_task_heap)
103{
104 struct list_head* pos;
105 struct release_heap* heap = NULL;
106 struct release_heap* rh;
107 lt_t release_time = get_release(t);
108 unsigned int slot = time2slot(release_time);
109
110 /* initialize pos for the case that the list is empty */
111 pos = rt->release_queue.slot[slot].next;
112 list_for_each(pos, &rt->release_queue.slot[slot]) {
113 rh = list_entry(pos, struct release_heap, list);
114 if (release_time == rh->release_time) {
115 /* perfect match -- this happens on hyperperiod
116 * boundaries
117 */
118 heap = rh;
119 break;
120 } else if (lt_before(release_time, rh->release_time)) {
121 /* we need to insert a new node since rh is
122 * already in the future
123 */
124 break;
125 }
126 }
127 if (!heap && use_task_heap) {
128 /* use pre-allocated release heap */
129 rh = tsk_rt(t)->rel_heap;
130
131 rh->dom = rt;
132 rh->release_time = release_time;
133
134 /* add to release queue */
135 list_add(&rh->list, pos->prev);
136 heap = rh;
137 }
138 return heap;
139}
140
141static void reinit_release_heap(struct task_struct* t)
142{
143 struct release_heap* rh;
144
145 /* use pre-allocated release heap */
146 rh = tsk_rt(t)->rel_heap;
147
148 /* Make sure it is safe to use. The timer callback could still
149 * be executing on another CPU; hrtimer_cancel() will wait
150 * until the timer callback has completed. However, under no
151 * circumstances should the timer be active (= yet to be
152 * triggered).
153 *
154 * WARNING: If the CPU still holds the release_lock at this point,
155 * deadlock may occur!
156 */
157 BUG_ON(hrtimer_cancel(&rh->timer));
158
159 /* initialize */
160 heap_init(&rh->heap);
161}
162
163static void arm_release_timer(unsigned long _rt)
164{
165 rt_domain_t *rt = (rt_domain_t*) _rt;
166 unsigned long flags;
167 struct list_head list;
168 struct list_head *pos, *safe;
169 struct task_struct* t;
170 struct release_heap* rh;
171
172 /* We only have to defend against the ISR since norq callbacks
173 * are serialized.
174 */
175 TRACE("arm_release_timer() at %llu\n", litmus_clock());
176 spin_lock_irqsave(&rt->tobe_lock, flags);
177 list_replace_init(&rt->tobe_released, &list);
178 spin_unlock_irqrestore(&rt->tobe_lock, flags);
179
180 list_for_each_safe(pos, safe, &list) {
181 /* pick task of work list */
182 t = list_entry(pos, struct task_struct, rt_param.list);
183 sched_trace_task_release(t);
184 list_del(pos);
185
186 /* put into release heap while holding release_lock */
187 spin_lock_irqsave(&rt->release_lock, flags);
188 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
189 rh = get_release_heap(rt, t, 0);
190 if (!rh) {
191 /* need to use our own, but drop lock first */
192 spin_unlock(&rt->release_lock);
193 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
194 &rt->release_lock);
195 reinit_release_heap(t);
196 TRACE_TASK(t, "release_heap ready\n");
197 spin_lock(&rt->release_lock);
198 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
199 &rt->release_lock);
200 rh = get_release_heap(rt, t, 1);
201 }
202 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
203 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
204 spin_unlock_irqrestore(&rt->release_lock, flags);
205 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
206
207 /* To avoid arming the timer multiple times, we only let the
208 * owner do the arming (which is the "first" task to reference
209 * this release_heap anyway).
210 */
211 if (rh == tsk_rt(t)->rel_heap) {
212 TRACE_TASK(t, "arming timer 0x%p\n", &rh->timer);
213 hrtimer_start(&rh->timer,
214 ns_to_ktime(rh->release_time),
215 HRTIMER_MODE_ABS);
216 } else
217 TRACE_TASK(t, "0x%p is not my timer\n", &rh->timer);
218 }
219}
220
221void rt_domain_init(rt_domain_t *rt,
222 heap_prio_t order,
223 check_resched_needed_t check,
224 release_jobs_t release
225 )
226{
227 int i;
228
229 BUG_ON(!rt);
230 if (!check)
231 check = dummy_resched;
232 if (!release)
233 release = default_release_jobs;
234 if (!order)
235 order = dummy_order;
236
237 heap_init(&rt->ready_queue);
238 INIT_LIST_HEAD(&rt->tobe_released);
239 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
240 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
241
242 spin_lock_init(&rt->ready_lock);
243 spin_lock_init(&rt->release_lock);
244 spin_lock_init(&rt->tobe_lock);
245
246 rt->check_resched = check;
247 rt->release_jobs = release;
248 rt->order = order;
249}
250
251/* add_ready - add a real-time task to the rt ready queue. It must be runnable.
252 * @new: the newly released task
253 */
254void __add_ready(rt_domain_t* rt, struct task_struct *new)
255{
256 TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
257 new->comm, new->pid, get_exec_cost(new), get_rt_period(new),
258 get_release(new), litmus_clock());
259
260 BUG_ON(heap_node_in_heap(tsk_rt(new)->heap_node));
261
262 heap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node);
263 rt->check_resched(rt);
264}
265
266/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable.
267 * @tasks - the newly released tasks
268 */
269void __merge_ready(rt_domain_t* rt, struct heap* tasks)
270{
271 heap_union(rt->order, &rt->ready_queue, tasks);
272 rt->check_resched(rt);
273}
274
275/* add_release - add a real-time task to the rt release queue.
276 * @task: the sleeping task
277 */
278void __add_release(rt_domain_t* rt, struct task_struct *task)
279{
280 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
281 list_add(&tsk_rt(task)->list, &rt->tobe_released);
282 task->rt_param.domain = rt;
283 /* XXX arm_release_timer() used to be activated here
284 * such that it would be called with the runqueue lock dropped.
285 */
286}
287