aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/rt_domain.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/rt_domain.c')
-rw-r--r--litmus/rt_domain.c310
1 files changed, 310 insertions, 0 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
new file mode 100644
index 000000000000..609ff0f82abb
--- /dev/null
+++ b/litmus/rt_domain.c
@@ -0,0 +1,310 @@
1/*
2 * litmus/rt_domain.c
3 *
4 * LITMUS real-time infrastructure. This file contains the
5 * functions that manipulate RT domains. RT domains are an abstraction
6 * of a ready queue and a release queue.
7 */
8
9#include <linux/percpu.h>
10#include <linux/sched.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13
14#include <litmus/litmus.h>
15#include <litmus/sched_plugin.h>
16#include <litmus/sched_trace.h>
17
18#include <litmus/rt_domain.h>
19
20#include <litmus/trace.h>
21
22#include <litmus/bheap.h>
23
24static int dummy_resched(rt_domain_t *rt)
25{
26 return 0;
27}
28
29static int dummy_order(struct bheap_node* a, struct bheap_node* b)
30{
31 return 0;
32}
33
34/* default implementation: use default lock */
35static void default_release_jobs(rt_domain_t* rt, struct bheap* tasks)
36{
37 merge_ready(rt, tasks);
38}
39
40static unsigned int time2slot(lt_t time)
41{
42 return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS;
43}
44
45static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
46{
47 unsigned long flags;
48 struct release_heap* rh;
49
50 TRACE("on_release_timer(0x%p) starts.\n", timer);
51
52 TS_RELEASE_START;
53
54 rh = container_of(timer, struct release_heap, timer);
55
56 spin_lock_irqsave(&rh->dom->release_lock, flags);
57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
58 /* remove from release queue */
59 list_del(&rh->list);
60 spin_unlock_irqrestore(&rh->dom->release_lock, flags);
61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
62
63 /* call release callback */
64 rh->dom->release_jobs(rh->dom, &rh->heap);
65 /* WARNING: rh can be referenced from other CPUs from now on. */
66
67 TS_RELEASE_END;
68
69 TRACE("on_release_timer(0x%p) ends.\n", timer);
70
71 return HRTIMER_NORESTART;
72}
73
74/* allocated in litmus.c */
75struct kmem_cache * release_heap_cache;
76
77struct release_heap* release_heap_alloc(int gfp_flags)
78{
79 struct release_heap* rh;
80 rh= kmem_cache_alloc(release_heap_cache, gfp_flags);
81 if (rh) {
82 /* initialize timer */
83 hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
84 rh->timer.function = on_release_timer;
85 }
86 return rh;
87}
88
89void release_heap_free(struct release_heap* rh)
90{
91 /* make sure timer is no longer in use */
92 hrtimer_cancel(&rh->timer);
93 kmem_cache_free(release_heap_cache, rh);
94}
95
96/* Caller must hold release lock.
97 * Will return heap for given time. If no such heap exists prior to
98 * the invocation it will be created.
99 */
100static struct release_heap* get_release_heap(rt_domain_t *rt,
101 struct task_struct* t,
102 int use_task_heap)
103{
104 struct list_head* pos;
105 struct release_heap* heap = NULL;
106 struct release_heap* rh;
107 lt_t release_time = get_release(t);
108 unsigned int slot = time2slot(release_time);
109
110 /* initialize pos for the case that the list is empty */
111 pos = rt->release_queue.slot[slot].next;
112 list_for_each(pos, &rt->release_queue.slot[slot]) {
113 rh = list_entry(pos, struct release_heap, list);
114 if (release_time == rh->release_time) {
115 /* perfect match -- this happens on hyperperiod
116 * boundaries
117 */
118 heap = rh;
119 break;
120 } else if (lt_before(release_time, rh->release_time)) {
121 /* we need to insert a new node since rh is
122 * already in the future
123 */
124 break;
125 }
126 }
127 if (!heap && use_task_heap) {
128 /* use pre-allocated release heap */
129 rh = tsk_rt(t)->rel_heap;
130
131 rh->dom = rt;
132 rh->release_time = release_time;
133
134 /* add to release queue */
135 list_add(&rh->list, pos->prev);
136 heap = rh;
137 }
138 return heap;
139}
140
141static void reinit_release_heap(struct task_struct* t)
142{
143 struct release_heap* rh;
144
145 /* use pre-allocated release heap */
146 rh = tsk_rt(t)->rel_heap;
147
148 /* Make sure it is safe to use. The timer callback could still
149 * be executing on another CPU; hrtimer_cancel() will wait
150 * until the timer callback has completed. However, under no
151 * circumstances should the timer be active (= yet to be
152 * triggered).
153 *
154 * WARNING: If the CPU still holds the release_lock at this point,
155 * deadlock may occur!
156 */
157 BUG_ON(hrtimer_cancel(&rh->timer));
158
159 /* initialize */
160 bheap_init(&rh->heap);
161 atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE);
162}
163/* arm_release_timer() - start local release timer or trigger
164 * remote timer (pull timer)
165 *
166 * Called by add_release() with:
167 * - tobe_lock taken
168 * - IRQ disabled
169 */
170static void arm_release_timer(rt_domain_t *_rt)
171{
172 rt_domain_t *rt = _rt;
173 struct list_head list;
174 struct list_head *pos, *safe;
175 struct task_struct* t;
176 struct release_heap* rh;
177
178 TRACE("arm_release_timer() at %llu\n", litmus_clock());
179 list_replace_init(&rt->tobe_released, &list);
180
181 list_for_each_safe(pos, safe, &list) {
182 /* pick task of work list */
183 t = list_entry(pos, struct task_struct, rt_param.list);
184 sched_trace_task_release(t);
185 list_del(pos);
186
187 /* put into release heap while holding release_lock */
188 spin_lock(&rt->release_lock);
189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
190
191 rh = get_release_heap(rt, t, 0);
192 if (!rh) {
193 /* need to use our own, but drop lock first */
194 spin_unlock(&rt->release_lock);
195 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
196 &rt->release_lock);
197
198 reinit_release_heap(t);
199 TRACE_TASK(t, "release_heap ready\n");
200
201 spin_lock(&rt->release_lock);
202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
203 &rt->release_lock);
204
205 rh = get_release_heap(rt, t, 1);
206 }
207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
209
210 spin_unlock(&rt->release_lock);
211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
212
213 /* To avoid arming the timer multiple times, we only let the
214 * owner do the arming (which is the "first" task to reference
215 * this release_heap anyway).
216 */
217 if (rh == tsk_rt(t)->rel_heap) {
218 TRACE_TASK(t, "arming timer 0x%p\n", &rh->timer);
219 /* we cannot arm the timer using hrtimer_start()
220 * as it may deadlock on rq->lock
221 *
222 * PINNED mode is ok on both local and remote CPU
223 */
224 if (rt->release_master == NO_CPU)
225 __hrtimer_start_range_ns(&rh->timer,
226 ns_to_ktime(rh->release_time),
227 0, HRTIMER_MODE_ABS_PINNED, 0);
228 else
229 hrtimer_start_on(rt->release_master,
230 &rh->info, &rh->timer,
231 ns_to_ktime(rh->release_time),
232 HRTIMER_MODE_ABS_PINNED);
233 } else
234 TRACE_TASK(t, "0x%p is not my timer\n", &rh->timer);
235 }
236}
237
238void rt_domain_init(rt_domain_t *rt,
239 bheap_prio_t order,
240 check_resched_needed_t check,
241 release_jobs_t release
242 )
243{
244 int i;
245
246 BUG_ON(!rt);
247 if (!check)
248 check = dummy_resched;
249 if (!release)
250 release = default_release_jobs;
251 if (!order)
252 order = dummy_order;
253
254 rt->release_master = NO_CPU;
255
256 bheap_init(&rt->ready_queue);
257 INIT_LIST_HEAD(&rt->tobe_released);
258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
259 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
260
261 spin_lock_init(&rt->ready_lock);
262 spin_lock_init(&rt->release_lock);
263 spin_lock_init(&rt->tobe_lock);
264
265 rt->check_resched = check;
266 rt->release_jobs = release;
267 rt->order = order;
268}
269
270/* add_ready - add a real-time task to the rt ready queue. It must be runnable.
271 * @new: the newly released task
272 */
273void __add_ready(rt_domain_t* rt, struct task_struct *new)
274{
275 TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
276 new->comm, new->pid, get_exec_cost(new), get_rt_period(new),
277 get_release(new), litmus_clock());
278
279 BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node));
280
281 bheap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node);
282 rt->check_resched(rt);
283}
284
285/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable.
286 * @tasks - the newly released tasks
287 */
288void __merge_ready(rt_domain_t* rt, struct bheap* tasks)
289{
290 bheap_union(rt->order, &rt->ready_queue, tasks);
291 rt->check_resched(rt);
292}
293
294/* add_release - add a real-time task to the rt release queue.
295 * @task: the sleeping task
296 */
297void __add_release(rt_domain_t* rt, struct task_struct *task)
298{
299 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
300 list_add(&tsk_rt(task)->list, &rt->tobe_released);
301 task->rt_param.domain = rt;
302
303 /* start release timer */
304 TS_SCHED2_START(task);
305
306 arm_release_timer(rt);
307
308 TS_SCHED2_END(task);
309}
310