aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_domain.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/rt_domain.h')
-rw-r--r--include/litmus/rt_domain.h162
1 files changed, 162 insertions, 0 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
new file mode 100644
index 000000000000..b452be1d2256
--- /dev/null
+++ b/include/litmus/rt_domain.h
@@ -0,0 +1,162 @@
1/* CLEANUP: Add comments and make it less messy.
2 *
3 */
4
5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__
7
8#include <litmus/bheap.h>
9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11
12struct _rt_domain;
13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16
17struct release_queue {
18 /* each slot maintains a list of release heaps sorted
19 * by release time */
20 struct list_head slot[RELEASE_QUEUE_SLOTS];
21};
22
23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock;
26 struct bheap ready_queue;
27
28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock;
30 struct release_queue release_queue;
31 int release_master;
32
33 /* for moving tasks to the release queue */
34 spinlock_t tobe_lock;
35 struct list_head tobe_released;
36
37 /* how do we check if we need to kick another CPU? */
38 check_resched_needed_t check_resched;
39
40 /* how do we release jobs? */
41 release_jobs_t release_jobs;
42
43 /* how are tasks ordered in the ready queue? */
44 bheap_prio_t order;
45} rt_domain_t;
46
47struct release_heap {
48 /* list_head for per-time-slot list */
49 struct list_head list;
50 lt_t release_time;
51 /* all tasks to be released at release_time */
52 struct bheap heap;
53 /* used to trigger the release */
54 struct hrtimer timer;
55 /* used to delegate releases */
56 struct hrtimer_start_on_info info;
57 /* required for the timer callback */
58 rt_domain_t* dom;
59};
60
61
62static inline struct task_struct* __next_ready(rt_domain_t* rt)
63{
64 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
65 if (hn)
66 return bheap2task(hn);
67 else
68 return NULL;
69}
70
71void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
72 check_resched_needed_t check,
73 release_jobs_t relase);
74
75void __add_ready(rt_domain_t* rt, struct task_struct *new);
76void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
77void __add_release(rt_domain_t* rt, struct task_struct *task);
78
79static inline struct task_struct* __take_ready(rt_domain_t* rt)
80{
81 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
82 if (hn)
83 return bheap2task(hn);
84 else
85 return NULL;
86}
87
88static inline struct task_struct* __peek_ready(rt_domain_t* rt)
89{
90 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
91 if (hn)
92 return bheap2task(hn);
93 else
94 return NULL;
95}
96
97static inline int is_queued(struct task_struct *t)
98{
99 BUG_ON(!tsk_rt(t)->heap_node);
100 return bheap_node_in_heap(tsk_rt(t)->heap_node);
101}
102
103static inline void remove(rt_domain_t* rt, struct task_struct *t)
104{
105 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
106}
107
108static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
109{
110 unsigned long flags;
111 /* first we need the write lock for rt_ready_queue */
112 spin_lock_irqsave(&rt->ready_lock, flags);
113 __add_ready(rt, new);
114 spin_unlock_irqrestore(&rt->ready_lock, flags);
115}
116
117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
118{
119 unsigned long flags;
120 spin_lock_irqsave(&rt->ready_lock, flags);
121 __merge_ready(rt, tasks);
122 spin_unlock_irqrestore(&rt->ready_lock, flags);
123}
124
125static inline struct task_struct* take_ready(rt_domain_t* rt)
126{
127 unsigned long flags;
128 struct task_struct* ret;
129 /* first we need the write lock for rt_ready_queue */
130 spin_lock_irqsave(&rt->ready_lock, flags);
131 ret = __take_ready(rt);
132 spin_unlock_irqrestore(&rt->ready_lock, flags);
133 return ret;
134}
135
136
137static inline void add_release(rt_domain_t* rt, struct task_struct *task)
138{
139 unsigned long flags;
140 /* first we need the write lock for rt_ready_queue */
141 spin_lock_irqsave(&rt->tobe_lock, flags);
142 __add_release(rt, task);
143 spin_unlock_irqrestore(&rt->tobe_lock, flags);
144}
145
146static inline int __jobs_pending(rt_domain_t* rt)
147{
148 return !bheap_empty(&rt->ready_queue);
149}
150
151static inline int jobs_pending(rt_domain_t* rt)
152{
153 unsigned long flags;
154 int ret;
155 /* first we need the write lock for rt_ready_queue */
156 spin_lock_irqsave(&rt->ready_lock, flags);
157 ret = !bheap_empty(&rt->ready_queue);
158 spin_unlock_irqrestore(&rt->ready_lock, flags);
159 return ret;
160}
161
162#endif