aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:31:46 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:14:41 -0400
commit53696c1fe6a6ada66f2a47c078d62aee40ad8ebe (patch)
tree01353124d90341322967c6b6e4b010fe4ed22026 /include/litmus
parent4e593e7105dec02e62ea7a1812dccb35a0d56d01 (diff)
[ported from 2008.3] Add rt_domain_t support
Still to be merged: - arm_release_timer() with no rq locking
Diffstat (limited to 'include/litmus')
-rw-r--r--include/litmus/rt_domain.h158
1 files changed, 158 insertions, 0 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..bde1e5a54812
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,158 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/heap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock;
26 struct heap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock;
30 struct release_queue release_queue;
31
32 /* for moving tasks to the release queue */
33 spinlock_t tobe_lock;
34 struct list_head tobe_released;
35
36 /* how do we check if we need to kick another CPU? */
37 check_resched_needed_t check_resched;
38
39 /* how do we release jobs? */
40 release_jobs_t release_jobs;
41
42 /* how are tasks ordered in the ready queue? */
43 heap_prio_t order;
44} rt_domain_t;
45
46struct release_heap {
47 /* list_head for per-time-slot list */
48 struct list_head list;
49 lt_t release_time;
50 /* all tasks to be released at release_time */
51 struct heap heap;
52 /* used to trigger the release */
53 struct hrtimer timer;
54 /* required for the timer callback */
55 rt_domain_t* dom;
56};
57
58
59static inline struct task_struct* __next_ready(rt_domain_t* rt)
60{
61 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue);
62 if (hn)
63 return heap2task(hn);
64 else
65 return NULL;
66}
67
68void rt_domain_init(rt_domain_t *rt, heap_prio_t order,
69 check_resched_needed_t check,
70 release_jobs_t relase);
71
72void __add_ready(rt_domain_t* rt, struct task_struct *new);
73void __merge_ready(rt_domain_t* rt, struct heap *tasks);
74void __add_release(rt_domain_t* rt, struct task_struct *task);
75
76static inline struct task_struct* __take_ready(rt_domain_t* rt)
77{
78 struct heap_node* hn = heap_take(rt->order, &rt->ready_queue);
79 if (hn)
80 return heap2task(hn);
81 else
82 return NULL;
83}
84
85static inline struct task_struct* __peek_ready(rt_domain_t* rt)
86{
87 struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue);
88 if (hn)
89 return heap2task(hn);
90 else
91 return NULL;
92}
93
94static inline int is_queued(struct task_struct *t)
95{
96 return heap_node_in_heap(tsk_rt(t)->heap_node);
97}
98
99static inline void remove(rt_domain_t* rt, struct task_struct *t)
100{
101 heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
102}
103
104static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
105{
106 unsigned long flags;
107 /* first we need the write lock for rt_ready_queue */
108 spin_lock_irqsave(&rt->ready_lock, flags);
109 __add_ready(rt, new);
110 spin_unlock_irqrestore(&rt->ready_lock, flags);
111}
112
113static inline void merge_ready(rt_domain_t* rt, struct heap* tasks)
114{
115 unsigned long flags;
116 spin_lock_irqsave(&rt->ready_lock, flags);
117 __merge_ready(rt, tasks);
118 spin_unlock_irqrestore(&rt->ready_lock, flags);
119}
120
121static inline struct task_struct* take_ready(rt_domain_t* rt)
122{
123 unsigned long flags;
124 struct task_struct* ret;
125 /* first we need the write lock for rt_ready_queue */
126 spin_lock_irqsave(&rt->ready_lock, flags);
127 ret = __take_ready(rt);
128 spin_unlock_irqrestore(&rt->ready_lock, flags);
129 return ret;
130}
131
132
133static inline void add_release(rt_domain_t* rt, struct task_struct *task)
134{
135 unsigned long flags;
136 /* first we need the write lock for rt_ready_queue */
137 spin_lock_irqsave(&rt->tobe_lock, flags);
138 __add_release(rt, task);
139 spin_unlock_irqrestore(&rt->tobe_lock, flags);
140}
141
142static inline int __jobs_pending(rt_domain_t* rt)
143{
144 return !heap_empty(&rt->ready_queue);
145}
146
147static inline int jobs_pending(rt_domain_t* rt)
148{
149 unsigned long flags;
150 int ret;
151 /* first we need the write lock for rt_ready_queue */
152 spin_lock_irqsave(&rt->ready_lock, flags);
153 ret = !heap_empty(&rt->ready_queue);
154 spin_unlock_irqrestore(&rt->ready_lock, flags);
155 return ret;
156}
157
158#endif