aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/rt_domain.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/rt_domain.h')
-rw-r--r--include/litmus/rt_domain.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index b452be1d2256..9bf980713474 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -22,16 +22,16 @@ struct release_queue {
22 22
23typedef struct _rt_domain { 23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */ 24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock; 25 raw_spinlock_t ready_lock;
26 struct bheap ready_queue; 26 struct bheap ready_queue;
27 27
28 /* real-time tasks waiting for release are in here */ 28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock; 29 raw_spinlock_t release_lock;
30 struct release_queue release_queue; 30 struct release_queue release_queue;
31 int release_master; 31 int release_master;
32 32
33 /* for moving tasks to the release queue */ 33 /* for moving tasks to the release queue */
34 spinlock_t tobe_lock; 34 raw_spinlock_t tobe_lock;
35 struct list_head tobe_released; 35 struct list_head tobe_released;
36 36
37 /* how do we check if we need to kick another CPU? */ 37 /* how do we check if we need to kick another CPU? */
@@ -109,17 +109,17 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
109{ 109{
110 unsigned long flags; 110 unsigned long flags;
111 /* first we need the write lock for rt_ready_queue */ 111 /* first we need the write lock for rt_ready_queue */
112 spin_lock_irqsave(&rt->ready_lock, flags); 112 raw_spin_lock_irqsave(&rt->ready_lock, flags);
113 __add_ready(rt, new); 113 __add_ready(rt, new);
114 spin_unlock_irqrestore(&rt->ready_lock, flags); 114 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
115} 115}
116 116
117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) 117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
118{ 118{
119 unsigned long flags; 119 unsigned long flags;
120 spin_lock_irqsave(&rt->ready_lock, flags); 120 raw_spin_lock_irqsave(&rt->ready_lock, flags);
121 __merge_ready(rt, tasks); 121 __merge_ready(rt, tasks);
122 spin_unlock_irqrestore(&rt->ready_lock, flags); 122 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
123} 123}
124 124
125static inline struct task_struct* take_ready(rt_domain_t* rt) 125static inline struct task_struct* take_ready(rt_domain_t* rt)
@@ -127,9 +127,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt)
127 unsigned long flags; 127 unsigned long flags;
128 struct task_struct* ret; 128 struct task_struct* ret;
129 /* first we need the write lock for rt_ready_queue */ 129 /* first we need the write lock for rt_ready_queue */
130 spin_lock_irqsave(&rt->ready_lock, flags); 130 raw_spin_lock_irqsave(&rt->ready_lock, flags);
131 ret = __take_ready(rt); 131 ret = __take_ready(rt);
132 spin_unlock_irqrestore(&rt->ready_lock, flags); 132 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
133 return ret; 133 return ret;
134} 134}
135 135
@@ -138,9 +138,9 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
138{ 138{
139 unsigned long flags; 139 unsigned long flags;
140 /* first we need the write lock for rt_ready_queue */ 140 /* first we need the write lock for rt_ready_queue */
141 spin_lock_irqsave(&rt->tobe_lock, flags); 141 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
142 __add_release(rt, task); 142 __add_release(rt, task);
143 spin_unlock_irqrestore(&rt->tobe_lock, flags); 143 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
144} 144}
145 145
146static inline int __jobs_pending(rt_domain_t* rt) 146static inline int __jobs_pending(rt_domain_t* rt)
@@ -153,9 +153,9 @@ static inline int jobs_pending(rt_domain_t* rt)
153 unsigned long flags; 153 unsigned long flags;
154 int ret; 154 int ret;
155 /* first we need the write lock for rt_ready_queue */ 155 /* first we need the write lock for rt_ready_queue */
156 spin_lock_irqsave(&rt->ready_lock, flags); 156 raw_spin_lock_irqsave(&rt->ready_lock, flags);
157 ret = !bheap_empty(&rt->ready_queue); 157 ret = !bheap_empty(&rt->ready_queue);
158 spin_unlock_irqrestore(&rt->ready_lock, flags); 158 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
159 return ret; 159 return ret;
160} 160}
161 161