aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_domain.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/rt_domain.h')
-rw-r--r--include/litmus/rt_domain.h36
1 files changed, 18 insertions, 18 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index c7c55bef3e42..c780fdfcccae 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -5,14 +5,14 @@
5#ifndef __UNC_RT_DOMAIN_H__ 5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__ 6#define __UNC_RT_DOMAIN_H__
7 7
8#include <litmus/heap.h> 8#include <litmus/bheap.h>
9 9
10#define RELEASE_QUEUE_SLOTS 127 /* prime */ 10#define RELEASE_QUEUE_SLOTS 127 /* prime */
11 11
12struct _rt_domain; 12struct _rt_domain;
13 13
14typedef int (*check_resched_needed_t)(struct _rt_domain *rt); 14typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks); 15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16 16
17struct release_queue { 17struct release_queue {
18 /* each slot maintains a list of release heaps sorted 18 /* each slot maintains a list of release heaps sorted
@@ -23,7 +23,7 @@ struct release_queue {
23typedef struct _rt_domain { 23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */ 24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock; 25 spinlock_t ready_lock;
26 struct heap ready_queue; 26 struct bheap ready_queue;
27 27
28 /* real-time tasks waiting for release are in here */ 28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock; 29 spinlock_t release_lock;
@@ -41,7 +41,7 @@ typedef struct _rt_domain {
41 release_jobs_t release_jobs; 41 release_jobs_t release_jobs;
42 42
43 /* how are tasks ordered in the ready queue? */ 43 /* how are tasks ordered in the ready queue? */
44 heap_prio_t order; 44 bheap_prio_t order;
45} rt_domain_t; 45} rt_domain_t;
46 46
47struct release_heap { 47struct release_heap {
@@ -49,7 +49,7 @@ struct release_heap {
49 struct list_head list; 49 struct list_head list;
50 lt_t release_time; 50 lt_t release_time;
51 /* all tasks to be released at release_time */ 51 /* all tasks to be released at release_time */
52 struct heap heap; 52 struct bheap heap;
53 /* used to trigger the release */ 53 /* used to trigger the release */
54 struct hrtimer timer; 54 struct hrtimer timer;
55 /* used to delegate releases */ 55 /* used to delegate releases */
@@ -61,47 +61,47 @@ struct release_heap {
61 61
62static inline struct task_struct* __next_ready(rt_domain_t* rt) 62static inline struct task_struct* __next_ready(rt_domain_t* rt)
63{ 63{
64 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); 64 struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue);
65 if (hn) 65 if (hn)
66 return heap2task(hn); 66 return bheap2task(hn);
67 else 67 else
68 return NULL; 68 return NULL;
69} 69}
70 70
71void rt_domain_init(rt_domain_t *rt, heap_prio_t order, 71void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
72 check_resched_needed_t check, 72 check_resched_needed_t check,
73 release_jobs_t relase); 73 release_jobs_t relase);
74 74
75void __add_ready(rt_domain_t* rt, struct task_struct *new); 75void __add_ready(rt_domain_t* rt, struct task_struct *new);
76void __merge_ready(rt_domain_t* rt, struct heap *tasks); 76void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
77void __add_release(rt_domain_t* rt, struct task_struct *task); 77void __add_release(rt_domain_t* rt, struct task_struct *task);
78 78
79static inline struct task_struct* __take_ready(rt_domain_t* rt) 79static inline struct task_struct* __take_ready(rt_domain_t* rt)
80{ 80{
81 struct heap_node* hn = heap_take(rt->order, &rt->ready_queue); 81 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
82 if (hn) 82 if (hn)
83 return heap2task(hn); 83 return bheap2task(hn);
84 else 84 else
85 return NULL; 85 return NULL;
86} 86}
87 87
88static inline struct task_struct* __peek_ready(rt_domain_t* rt) 88static inline struct task_struct* __peek_ready(rt_domain_t* rt)
89{ 89{
90 struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue); 90 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
91 if (hn) 91 if (hn)
92 return heap2task(hn); 92 return bheap2task(hn);
93 else 93 else
94 return NULL; 94 return NULL;
95} 95}
96 96
97static inline int is_queued(struct task_struct *t) 97static inline int is_queued(struct task_struct *t)
98{ 98{
99 return heap_node_in_heap(tsk_rt(t)->heap_node); 99 return bheap_node_in_heap(tsk_rt(t)->heap_node);
100} 100}
101 101
102static inline void remove(rt_domain_t* rt, struct task_struct *t) 102static inline void remove(rt_domain_t* rt, struct task_struct *t)
103{ 103{
104 heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); 104 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
105} 105}
106 106
107static inline void add_ready(rt_domain_t* rt, struct task_struct *new) 107static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
@@ -113,7 +113,7 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
113 spin_unlock_irqrestore(&rt->ready_lock, flags); 113 spin_unlock_irqrestore(&rt->ready_lock, flags);
114} 114}
115 115
116static inline void merge_ready(rt_domain_t* rt, struct heap* tasks) 116static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
117{ 117{
118 unsigned long flags; 118 unsigned long flags;
119 spin_lock_irqsave(&rt->ready_lock, flags); 119 spin_lock_irqsave(&rt->ready_lock, flags);
@@ -144,7 +144,7 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
144 144
145static inline int __jobs_pending(rt_domain_t* rt) 145static inline int __jobs_pending(rt_domain_t* rt)
146{ 146{
147 return !heap_empty(&rt->ready_queue); 147 return !bheap_empty(&rt->ready_queue);
148} 148}
149 149
150static inline int jobs_pending(rt_domain_t* rt) 150static inline int jobs_pending(rt_domain_t* rt)
@@ -153,7 +153,7 @@ static inline int jobs_pending(rt_domain_t* rt)
153 int ret; 153 int ret;
154 /* first we need the write lock for rt_ready_queue */ 154 /* first we need the write lock for rt_ready_queue */
155 spin_lock_irqsave(&rt->ready_lock, flags); 155 spin_lock_irqsave(&rt->ready_lock, flags);
156 ret = !heap_empty(&rt->ready_queue); 156 ret = !bheap_empty(&rt->ready_queue);
157 spin_unlock_irqrestore(&rt->ready_lock, flags); 157 spin_unlock_irqrestore(&rt->ready_lock, flags);
158 return ret; 158 return ret;
159} 159}