aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:31:46 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:31:46 -0500
commitb7ca066915c003a0138f80f04aec9a3c30c873f3 (patch)
treed855eff2efead114df6cc88863f741875fa16d7b
parent5789b583c78f06d046c3585c8def400f65ae2b68 (diff)
Add rt_domain_t support
to be merged: - arm_release_timer() with no rq locking
-rw-r--r--include/litmus/rt_domain.h158
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/litmus.c45
-rw-r--r--litmus/rt_domain.c287
4 files changed, 478 insertions, 13 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..bde1e5a54812
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,158 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/heap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock;
26 struct heap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock;
30 struct release_queue release_queue;
31
32 /* for moving tasks to the release queue */
33 spinlock_t tobe_lock;
34 struct list_head tobe_released;
35
36 /* how do we check if we need to kick another CPU? */
37 check_resched_needed_t check_resched;
38
39 /* how do we release jobs? */
40 release_jobs_t release_jobs;
41
42 /* how are tasks ordered in the ready queue? */
43 heap_prio_t order;
44} rt_domain_t;
45
46struct release_heap {
47 /* list_head for per-time-slot list */
48 struct list_head list;
49 lt_t release_time;
50 /* all tasks to be released at release_time */
51 struct heap heap;
52 /* used to trigger the release */
53 struct hrtimer timer;
54 /* required for the timer callback */
55 rt_domain_t* dom;
56};
57
58
59static inline struct task_struct* __next_ready(rt_domain_t* rt)
60{
61 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue);
62 if (hn)
63 return heap2task(hn);
64 else
65 return NULL;
66}
67
68void rt_domain_init(rt_domain_t *rt, heap_prio_t order,
69 check_resched_needed_t check,
70 release_jobs_t relase);
71
72void __add_ready(rt_domain_t* rt, struct task_struct *new);
73void __merge_ready(rt_domain_t* rt, struct heap *tasks);
74void __add_release(rt_domain_t* rt, struct task_struct *task);
75
76static inline struct task_struct* __take_ready(rt_domain_t* rt)
77{
78 struct heap_node* hn = heap_take(rt->order, &rt->ready_queue);
79 if (hn)
80 return heap2task(hn);
81 else
82 return NULL;
83}
84
85static inline struct task_struct* __peek_ready(rt_domain_t* rt)
86{
87 struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue);
88 if (hn)
89 return heap2task(hn);
90 else
91 return NULL;
92}
93
94static inline int is_queued(struct task_struct *t)
95{
96 return heap_node_in_heap(tsk_rt(t)->heap_node);
97}
98
99static inline void remove(rt_domain_t* rt, struct task_struct *t)
100{
101 heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
102}
103
104static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
105{
106 unsigned long flags;
107 /* first we need the write lock for rt_ready_queue */
108 spin_lock_irqsave(&rt->ready_lock, flags);
109 __add_ready(rt, new);
110 spin_unlock_irqrestore(&rt->ready_lock, flags);
111}
112
113static inline void merge_ready(rt_domain_t* rt, struct heap* tasks)
114{
115 unsigned long flags;
116 spin_lock_irqsave(&rt->ready_lock, flags);
117 __merge_ready(rt, tasks);
118 spin_unlock_irqrestore(&rt->ready_lock, flags);
119}
120
121static inline struct task_struct* take_ready(rt_domain_t* rt)
122{
123 unsigned long flags;
124 struct task_struct* ret;
125 /* first we need the write lock for rt_ready_queue */
126 spin_lock_irqsave(&rt->ready_lock, flags);
127 ret = __take_ready(rt);
128 spin_unlock_irqrestore(&rt->ready_lock, flags);
129 return ret;
130}
131
132
133static inline void add_release(rt_domain_t* rt, struct task_struct *task)
134{
135 unsigned long flags;
136 /* first we need the write lock for rt_ready_queue */
137 spin_lock_irqsave(&rt->tobe_lock, flags);
138 __add_release(rt, task);
139 spin_unlock_irqrestore(&rt->tobe_lock, flags);
140}
141
142static inline int __jobs_pending(rt_domain_t* rt)
143{
144 return !heap_empty(&rt->ready_queue);
145}
146
147static inline int jobs_pending(rt_domain_t* rt)
148{
149 unsigned long flags;
150 int ret;
151 /* first we need the write lock for rt_ready_queue */
152 spin_lock_irqsave(&rt->ready_lock, flags);
153 ret = !heap_empty(&rt->ready_queue);
154 spin_unlock_irqrestore(&rt->ready_lock, flags);
155 return ret;
156}
157
158#endif
diff --git a/litmus/Makefile b/litmus/Makefile
index e93f19bb2016..576c1ded20e9 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -5,6 +5,7 @@
5obj-y = sched_plugin.o litmus.o \ 5obj-y = sched_plugin.o litmus.o \
6 jobs.o \ 6 jobs.o \
7 sync.o \ 7 sync.o \
8 rt_domain.o \
8 heap.o 9 heap.o
9 10
10obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 11obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 9254f1621af7..de751a14d77c 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -19,6 +19,8 @@
19 19
20#include <litmus/trace.h> 20#include <litmus/trace.h>
21 21
22#include <litmus/rt_domain.h>
23
22/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
23atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
24static DEFINE_SPINLOCK(task_transition_lock); 26static DEFINE_SPINLOCK(task_transition_lock);
@@ -30,6 +32,7 @@ atomic_t __log_seq_no = ATOMIC_INIT(0);
30atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); 32atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
31 33
32static struct kmem_cache * heap_node_cache; 34static struct kmem_cache * heap_node_cache;
35extern struct kmem_cache * release_heap_cache;
33 36
34struct heap_node* heap_node_alloc(int gfp_flags) 37struct heap_node* heap_node_alloc(int gfp_flags)
35{ 38{
@@ -41,6 +44,9 @@ void heap_node_free(struct heap_node* hn)
41 kmem_cache_free(heap_node_cache, hn); 44 kmem_cache_free(heap_node_cache, hn);
42} 45}
43 46
47struct release_heap* release_heap_alloc(int gfp_flags);
48void release_heap_free(struct release_heap* rh);
49
44/* 50/*
45 * sys_set_task_rt_param 51 * sys_set_task_rt_param
46 * @pid: Pid of the task which scheduling parameters must be changed 52 * @pid: Pid of the task which scheduling parameters must be changed
@@ -299,15 +305,16 @@ long litmus_admit_task(struct task_struct* tsk)
299 get_exec_cost(tsk) > get_rt_period(tsk)) { 305 get_exec_cost(tsk) > get_rt_period(tsk)) {
300 TRACE_TASK(tsk, "litmus admit: invalid task parameters " 306 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
301 "(%lu, %lu)\n", 307 "(%lu, %lu)\n",
302 get_exec_cost(tsk), get_rt_period(tsk)); 308 get_exec_cost(tsk), get_rt_period(tsk));
303 return -EINVAL; 309 retval = -EINVAL;
310 goto out;
304 } 311 }
305 312
306 if (!cpu_online(get_partition(tsk))) 313 if (!cpu_online(get_partition(tsk))) {
307 {
308 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", 314 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
309 get_partition(tsk)); 315 get_partition(tsk));
310 return -EINVAL; 316 retval = -EINVAL;
317 goto out;
311 } 318 }
312 319
313 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 320 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
@@ -316,17 +323,22 @@ long litmus_admit_task(struct task_struct* tsk)
316 spin_lock_irqsave(&task_transition_lock, flags); 323 spin_lock_irqsave(&task_transition_lock, flags);
317 324
318 /* allocate heap node for this task */ 325 /* allocate heap node for this task */
319 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC); 326 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC);
320 if (!tsk_rt(tsk)->heap_node || 327 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
321 !tsk_rt(tsk)->rel_heap) { 328
329 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
322 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 330 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
323 retval = -ENOMEM; 331
324 heap_node_free(tsk_rt(tsk)->heap_node); 332 heap_node_free(tsk_rt(tsk)->heap_node);
325 } else 333 release_heap_free(tsk_rt(tsk)->rel_heap);
334
335 retval = -ENOMEM;
336 goto out_unlock;
337 } else {
326 heap_node_init(&tsk_rt(tsk)->heap_node, tsk); 338 heap_node_init(&tsk_rt(tsk)->heap_node, tsk);
339 }
327 340
328 if (!retval) 341 retval = litmus->admit_task(tsk);
329 retval = litmus->admit_task(tsk);
330 342
331 if (!retval) { 343 if (!retval) {
332 sched_trace_task_name(tsk); 344 sched_trace_task_name(tsk);
@@ -334,8 +346,9 @@ long litmus_admit_task(struct task_struct* tsk)
334 atomic_inc(&rt_task_count); 346 atomic_inc(&rt_task_count);
335 } 347 }
336 348
349out_unlock:
337 spin_unlock_irqrestore(&task_transition_lock, flags); 350 spin_unlock_irqrestore(&task_transition_lock, flags);
338 351out:
339 return retval; 352 return retval;
340} 353}
341 354
@@ -343,9 +356,13 @@ void litmus_exit_task(struct task_struct* tsk)
343{ 356{
344 if (is_realtime(tsk)) { 357 if (is_realtime(tsk)) {
345 sched_trace_task_completion(tsk, 1); 358 sched_trace_task_completion(tsk, 1);
359
346 litmus->task_exit(tsk); 360 litmus->task_exit(tsk);
361
347 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); 362 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node));
348 heap_node_free(tsk_rt(tsk)->heap_node); 363 heap_node_free(tsk_rt(tsk)->heap_node);
364 release_heap_free(tsk_rt(tsk)->rel_heap);
365
349 atomic_dec(&rt_task_count); 366 atomic_dec(&rt_task_count);
350 reinit_litmus_state(tsk, 1); 367 reinit_litmus_state(tsk, 1);
351 } 368 }
@@ -632,6 +649,7 @@ static int __init _init_litmus(void)
632 register_sched_plugin(&linux_sched_plugin); 649 register_sched_plugin(&linux_sched_plugin);
633 650
634 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC); 651 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC);
652 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
635 653
636#ifdef CONFIG_MAGIC_SYSRQ 654#ifdef CONFIG_MAGIC_SYSRQ
637 /* offer some debugging help */ 655 /* offer some debugging help */
@@ -650,6 +668,7 @@ static void _exit_litmus(void)
650{ 668{
651 exit_litmus_proc(); 669 exit_litmus_proc();
652 kmem_cache_destroy(heap_node_cache); 670 kmem_cache_destroy(heap_node_cache);
671 kmem_cache_destroy(release_heap_cache);
653} 672}
654 673
655module_init(_init_litmus); 674module_init(_init_litmus);
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
new file mode 100644
index 000000000000..4fa834018efa
--- /dev/null
+++ b/litmus/rt_domain.c
@@ -0,0 +1,287 @@
1/*
2 * litmus/rt_domain.c
3 *
4 * LITMUS real-time infrastructure. This file contains the
5 * functions that manipulate RT domains. RT domains are an abstraction
6 * of a ready queue and a release queue.
7 */
8
9#include <linux/percpu.h>
10#include <linux/sched.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13
14#include <litmus/litmus.h>
15#include <litmus/sched_plugin.h>
16#include <litmus/sched_trace.h>
17
18#include <litmus/rt_domain.h>
19
20#include <litmus/trace.h>
21
22#include <litmus/heap.h>
23
24static int dummy_resched(rt_domain_t *rt)
25{
26 return 0;
27}
28
29static int dummy_order(struct heap_node* a, struct heap_node* b)
30{
31 return 0;
32}
33
34/* default implementation: use default lock */
35static void default_release_jobs(rt_domain_t* rt, struct heap* tasks)
36{
37 merge_ready(rt, tasks);
38}
39
40static unsigned int time2slot(lt_t time)
41{
42 return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS;
43}
44
45static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
46{
47 unsigned long flags;
48 struct release_heap* rh;
49
50 TRACE("on_release_timer(0x%p) starts.\n", timer);
51
52 TS_RELEASE_START;
53
54 rh = container_of(timer, struct release_heap, timer);
55
56 spin_lock_irqsave(&rh->dom->release_lock, flags);
57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
58 /* remove from release queue */
59 list_del(&rh->list);
60 spin_unlock_irqrestore(&rh->dom->release_lock, flags);
61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
62
63 /* call release callback */
64 rh->dom->release_jobs(rh->dom, &rh->heap);
65 /* WARNING: rh can be referenced from other CPUs from now on. */
66
67 TS_RELEASE_END;
68
69 TRACE("on_release_timer(0x%p) ends.\n", timer);
70
71 return HRTIMER_NORESTART;
72}
73
74/* allocated in litmus.c */
75struct kmem_cache * release_heap_cache;
76
77struct release_heap* release_heap_alloc(int gfp_flags)
78{
79 struct release_heap* rh;
80 rh= kmem_cache_alloc(release_heap_cache, gfp_flags);
81 if (rh) {
82 /* initialize timer */
83 hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
84 rh->timer.function = on_release_timer;
85 }
86 return rh;
87}
88
89void release_heap_free(struct release_heap* rh)
90{
91 /* make sure timer is no longer in use */
92 hrtimer_cancel(&rh->timer);
93 kmem_cache_free(release_heap_cache, rh);
94}
95
96/* Caller must hold release lock.
97 * Will return heap for given time. If no such heap exists prior to
98 * the invocation it will be created.
99 */
100static struct release_heap* get_release_heap(rt_domain_t *rt,
101 struct task_struct* t,
102 int use_task_heap)
103{
104 struct list_head* pos;
105 struct release_heap* heap = NULL;
106 struct release_heap* rh;
107 lt_t release_time = get_release(t);
108 unsigned int slot = time2slot(release_time);
109
110 /* initialize pos for the case that the list is empty */
111 pos = rt->release_queue.slot[slot].next;
112 list_for_each(pos, &rt->release_queue.slot[slot]) {
113 rh = list_entry(pos, struct release_heap, list);
114 if (release_time == rh->release_time) {
115 /* perfect match -- this happens on hyperperiod
116 * boundaries
117 */
118 heap = rh;
119 break;
120 } else if (lt_before(release_time, rh->release_time)) {
121 /* we need to insert a new node since rh is
122 * already in the future
123 */
124 break;
125 }
126 }
127 if (!heap && use_task_heap) {
128 /* use pre-allocated release heap */
129 rh = tsk_rt(t)->rel_heap;
130
131 rh->dom = rt;
132 rh->release_time = release_time;
133
134 /* add to release queue */
135 list_add(&rh->list, pos->prev);
136 heap = rh;
137 }
138 return heap;
139}
140
141static void reinit_release_heap(struct task_struct* t)
142{
143 struct release_heap* rh;
144
145 /* use pre-allocated release heap */
146 rh = tsk_rt(t)->rel_heap;
147
148 /* Make sure it is safe to use. The timer callback could still
149 * be executing on another CPU; hrtimer_cancel() will wait
150 * until the timer callback has completed. However, under no
151 * circumstances should the timer be active (= yet to be
152 * triggered).
153 *
154 * WARNING: If the CPU still holds the release_lock at this point,
155 * deadlock may occur!
156 */
157 BUG_ON(hrtimer_cancel(&rh->timer));
158
159 /* initialize */
160 heap_init(&rh->heap);
161}
162
163static void arm_release_timer(unsigned long _rt)
164{
165 rt_domain_t *rt = (rt_domain_t*) _rt;
166 unsigned long flags;
167 struct list_head list;
168 struct list_head *pos, *safe;
169 struct task_struct* t;
170 struct release_heap* rh;
171
172 /* We only have to defend against the ISR since norq callbacks
173 * are serialized.
174 */
175 TRACE("arm_release_timer() at %llu\n", litmus_clock());
176 spin_lock_irqsave(&rt->tobe_lock, flags);
177 list_replace_init(&rt->tobe_released, &list);
178 spin_unlock_irqrestore(&rt->tobe_lock, flags);
179
180 list_for_each_safe(pos, safe, &list) {
181 /* pick task of work list */
182 t = list_entry(pos, struct task_struct, rt_param.list);
183 sched_trace_task_release(t);
184 list_del(pos);
185
186 /* put into release heap while holding release_lock */
187 spin_lock_irqsave(&rt->release_lock, flags);
188 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
189 rh = get_release_heap(rt, t, 0);
190 if (!rh) {
191 /* need to use our own, but drop lock first */
192 spin_unlock(&rt->release_lock);
193 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
194 &rt->release_lock);
195 reinit_release_heap(t);
196 TRACE_TASK(t, "release_heap ready\n");
197 spin_lock(&rt->release_lock);
198 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
199 &rt->release_lock);
200 rh = get_release_heap(rt, t, 1);
201 }
202 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
203 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
204 spin_unlock_irqrestore(&rt->release_lock, flags);
205 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
206
207 /* To avoid arming the timer multiple times, we only let the
208 * owner do the arming (which is the "first" task to reference
209 * this release_heap anyway).
210 */
211 if (rh == tsk_rt(t)->rel_heap) {
212 TRACE_TASK(t, "arming timer 0x%p\n", &rh->timer);
213 hrtimer_start(&rh->timer,
214 ns_to_ktime(rh->release_time),
215 HRTIMER_MODE_ABS);
216 } else
217 TRACE_TASK(t, "0x%p is not my timer\n", &rh->timer);
218 }
219}
220
221void rt_domain_init(rt_domain_t *rt,
222 heap_prio_t order,
223 check_resched_needed_t check,
224 release_jobs_t release
225 )
226{
227 int i;
228
229 BUG_ON(!rt);
230 if (!check)
231 check = dummy_resched;
232 if (!release)
233 release = default_release_jobs;
234 if (!order)
235 order = dummy_order;
236
237 heap_init(&rt->ready_queue);
238 INIT_LIST_HEAD(&rt->tobe_released);
239 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
240 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
241
242 spin_lock_init(&rt->ready_lock);
243 spin_lock_init(&rt->release_lock);
244 spin_lock_init(&rt->tobe_lock);
245
246 rt->check_resched = check;
247 rt->release_jobs = release;
248 rt->order = order;
249}
250
251/* add_ready - add a real-time task to the rt ready queue. It must be runnable.
252 * @new: the newly released task
253 */
254void __add_ready(rt_domain_t* rt, struct task_struct *new)
255{
256 TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
257 new->comm, new->pid, get_exec_cost(new), get_rt_period(new),
258 get_release(new), litmus_clock());
259
260 BUG_ON(heap_node_in_heap(tsk_rt(new)->heap_node));
261
262 heap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node);
263 rt->check_resched(rt);
264}
265
266/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable.
267 * @tasks - the newly released tasks
268 */
269void __merge_ready(rt_domain_t* rt, struct heap* tasks)
270{
271 heap_union(rt->order, &rt->ready_queue, tasks);
272 rt->check_resched(rt);
273}
274
275/* add_release - add a real-time task to the rt release queue.
276 * @task: the sleeping task
277 */
278void __add_release(rt_domain_t* rt, struct task_struct *task)
279{
280 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
281 list_add(&tsk_rt(task)->list, &rt->tobe_released);
282 task->rt_param.domain = rt;
283 /* XXX arm_release_timer() used to be activated here
284 * such that it would be called with the runqueue lock dropped.
285 */
286}
287