aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZelin Tong <ztong@ludwig.cs.unc.edu>2020-09-15 08:20:34 -0400
committerZelin Tong <ztong@ludwig.cs.unc.edu>2020-09-15 08:20:34 -0400
commite21750e5fcfcd24ae828eab337acc04284aff887 (patch)
treec4b9b713556e090321c543d5ef8c09231abf9a1b
parentddad9de416639a19016f38cd3161d4840315a7a7 (diff)
Backup Commit
-rw-r--r--include/litmus/litmus.h1
-rw-r--r--include/litmus/reservations/reservation.h44
-rw-r--r--include/litmus/rt_domain.h127
-rw-r--r--litmus/rt_domain.c301
-rw-r--r--litmus/sched_ext_res.c35
5 files changed, 419 insertions, 89 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index f550367ddd4b..2081fab6c825 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -120,6 +120,7 @@ static inline lt_t litmus_clock(void)
120void preempt_if_preemptable(struct task_struct* t, int on_cpu); 120void preempt_if_preemptable(struct task_struct* t, int on_cpu);
121 121
122#define bheap2task(hn) ((struct task_struct*) hn->value) 122#define bheap2task(hn) ((struct task_struct*) hn->value)
123#define bheap2res(hn) ((struct reservation*) hn->value)
123 124
124static inline int is_present(struct task_struct* t) 125static inline int is_present(struct task_struct* t)
125{ 126{
diff --git a/include/litmus/reservations/reservation.h b/include/litmus/reservations/reservation.h
index 1752dac4e698..3dce3a47a2fa 100644
--- a/include/litmus/reservations/reservation.h
+++ b/include/litmus/reservations/reservation.h
@@ -202,6 +202,8 @@ struct sup_reservation_environment {
202 lt_t next_scheduler_update; 202 lt_t next_scheduler_update;
203 /* set to true if a call to sup_dispatch() is imminent */ 203 /* set to true if a call to sup_dispatch() is imminent */
204 bool will_schedule; 204 bool will_schedule;
205
206 struct res_env_ops ops;
205}; 207};
206 208
207/* Contract: 209/* Contract:
@@ -221,4 +223,46 @@ struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
221struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env, 223struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
222 unsigned int id); 224 unsigned int id);
223 225
226/* ************************************************************************* */
227/* cpu_entry struct */
228typedef struct {
229 int cpu;
230 struct task_struct* linked;
231 struct task_struct* scheduled;
232 struct bheap_node* hn;
233 struct list_node* ln;
234} cpu_entry_t;
235
236/* Hierarchical reservation environment */
237struct ext_reservation_environment {
238 struct reservation environment env;
239
240 /* only used for global reservation environments */
241 raw_spinlock_t lock;
242
243 /* ordered by priority */
244 struct list_head active_reservations;
245
246 /* ordered by next_replenishment */
247 struct list_head depleted_reservations;
248
249 /* unordered */
250 struct list_head inactive_reservations;
251
252 /* list of all reservations */
253 struct list_head all_reservations;
254
255 lt_t next_scheduler_update;
256 bool will_scheduled;
257}
258
259/* Hierarchical reservation */
260struct ext_reservation {
261 /* reservation data */
262 struct reservation res;
263
264 /* reservation environment for child reservations */
265 struct list_head child_envs;
266};
267
224#endif 268#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index ac249292e866..5a7fb59bd750 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -5,7 +5,11 @@
5#ifndef __UNC_RT_DOMAIN_H__ 5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__ 6#define __UNC_RT_DOMAIN_H__
7 7
8#include <linux/rbtree.h>
9
8#include <litmus/bheap.h> 10#include <litmus/bheap.h>
11#include <litmus/binheap.h>
12#include <litmus/reservations/ext_reservation.h>
9 13
10#define RELEASE_QUEUE_SLOTS 127 /* prime */ 14#define RELEASE_QUEUE_SLOTS 127 /* prime */
11 15
@@ -15,9 +19,9 @@ typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
15typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks); 19typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
16 20
17struct release_queue { 21struct release_queue {
18 /* each slot maintains a list of release heaps sorted 22 struct list_head slot[RELEASE_QUEUE_SLOTS];
19 * by release time */ 23 struct binheap queue;
20 struct list_head slot[RELEASE_QUEUE_SLOTS]; 24 volatile lt_t earliest_release;
21}; 25};
22 26
23typedef struct _rt_domain { 27typedef struct _rt_domain {
@@ -27,12 +31,19 @@ typedef struct _rt_domain {
27 31
28 /* real-time tasks waiting for release are in here */ 32 /* real-time tasks waiting for release are in here */
29 raw_spinlock_t release_lock; 33 raw_spinlock_t release_lock;
30 struct release_queue release_queue; 34 struct release_queue release_queue;
31 35
32#ifdef CONFIG_RELEASE_MASTER 36#ifdef CONFIG_RELEASE_MASTER
33 int release_master; 37 int release_master;
38 /* used to delegate releases.
39 * does not appear to be assigned anywhere in Litmus
40 */
41 struct hrtimer_start_on_info info;
34#endif 42#endif
35 43
44 /* release timer for earliest heap in release_queue */
45 struct hrtimer timer;
46
36 /* for moving tasks to the release queue */ 47 /* for moving tasks to the release queue */
37 raw_spinlock_t tobe_lock; 48 raw_spinlock_t tobe_lock;
38 struct list_head tobe_released; 49 struct list_head tobe_released;
@@ -48,22 +59,18 @@ typedef struct _rt_domain {
48} rt_domain_t; 59} rt_domain_t;
49 60
50struct release_heap { 61struct release_heap {
51 /* list_head for per-time-slot list */ 62 /* for enqueueing into release_queue */
52 struct list_head list; 63 struct binheap_node node;
64 struct list_head list;
53 lt_t release_time; 65 lt_t release_time;
54 /* all tasks to be released at release_time */ 66 /* all tasks to be released at release_time */
55 struct bheap heap; 67 struct bheap heap;
56 /* used to trigger the release */ 68 /* list of all tasks, used for release callback in reservation */
57 struct hrtimer timer; 69 struct list_head list_head;
58
59#ifdef CONFIG_RELEASE_MASTER
60 /* used to delegate releases */
61 struct hrtimer_start_on_info info;
62#endif
63 /* required for the timer callback */
64 rt_domain_t* dom;
65}; 70};
66 71
72static void suspend_releases(rt_domain_t* rt);
73static void resume_releases(rt_domain_t* rt);
67 74
68static inline struct task_struct* __next_ready(rt_domain_t* rt) 75static inline struct task_struct* __next_ready(rt_domain_t* rt)
69{ 76{
@@ -74,6 +81,15 @@ static inline struct task_struct* __next_ready(rt_domain_t* rt)
74 return NULL; 81 return NULL;
75} 82}
76 83
84static inline struct reservation* __next_ready_res(rt_domain_t* rt)
85{
86 struct bheap_node *n = bheap_peek(rt->order, &rt->ready_queue);
87 if (hn)
88 return bheap2res(hn);
89 else
90 return NULL;
91}
92
77void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, 93void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
78 check_resched_needed_t check, 94 check_resched_needed_t check,
79 release_jobs_t relase); 95 release_jobs_t relase);
@@ -82,6 +98,10 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new);
82void __merge_ready(rt_domain_t* rt, struct bheap *tasks); 98void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
83void __add_release(rt_domain_t* rt, struct task_struct *task); 99void __add_release(rt_domain_t* rt, struct task_struct *task);
84 100
101void __add_ready_res(rt_domain* rt, struct reservation* new);
102void __merge_ready_res(rt_domain_t* rt, struct bheap *res);
103void __add_release_res(rt_domain_t* rt, struct reservation* res);
104
85static inline struct task_struct* __take_ready(rt_domain_t* rt) 105static inline struct task_struct* __take_ready(rt_domain_t* rt)
86{ 106{
87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); 107 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
@@ -91,6 +111,15 @@ static inline struct task_struct* __take_ready(rt_domain_t* rt)
91 return NULL; 111 return NULL;
92} 112}
93 113
114static inline struct reservation* __take_ready_res(rt_domain_t* rt)
115{
116 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
117 if (hn)
118 return bheap2res(hn);
119 else
120 return NULL;
121}
122
94static inline struct task_struct* __peek_ready(rt_domain_t* rt) 123static inline struct task_struct* __peek_ready(rt_domain_t* rt)
95{ 124{
96 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue); 125 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
@@ -100,17 +129,37 @@ static inline struct task_struct* __peek_ready(rt_domain_t* rt)
100 return NULL; 129 return NULL;
101} 130}
102 131
132static inline struct reservation* __peek_ready_res(rt_domain_t* rt)
133{
134 struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue);
135 if (hn)
136 return bheap2res(hn);
137 else
138 return NULL;
139}
140
103static inline int is_queued(struct task_struct *t) 141static inline int is_queued(struct task_struct *t)
104{ 142{
105 BUG_ON(!tsk_rt(t)->heap_node); 143 BUG_ON(!tsk_rt(t)->heap_node);
106 return bheap_node_in_heap(tsk_rt(t)->heap_node); 144 return bheap_node_in_heap(tsk_rt(t)->heap_node);
107} 145}
108 146
147static inline int is_queued_res(struct reservation* res)
148{
149 BUG_ON(!cres->heap_node);
150 return bheap_node_in_heap(res->heap_node);
151}
152
109static inline void remove(rt_domain_t* rt, struct task_struct *t) 153static inline void remove(rt_domain_t* rt, struct task_struct *t)
110{ 154{
111 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); 155 bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
112} 156}
113 157
158static inline void remove_res(rt_domain_t* rt, struct reservation* res)
159{
160 bheap_delete(rt->order, &rt->ready_queue, res->heap_node);
161}
162
114static inline void add_ready(rt_domain_t* rt, struct task_struct *new) 163static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
115{ 164{
116 unsigned long flags; 165 unsigned long flags;
@@ -120,6 +169,15 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
120 raw_spin_unlock_irqrestore(&rt->ready_lock, flags); 169 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
121} 170}
122 171
172static inline void add_ready_res(rt_domain_t* rt, struct reservation *new)
173{
174 unsigned long flags;
175 /* first we need the write lock for rt_ready_queue */
176 raw_spin_lock_irqsave(&rt->ready_lock, flags);
177 __add_ready_res(rt, new);
178 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
179}
180
123static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) 181static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
124{ 182{
125 unsigned long flags; 183 unsigned long flags;
@@ -128,6 +186,14 @@ static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
128 raw_spin_unlock_irqrestore(&rt->ready_lock, flags); 186 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
129} 187}
130 188
189static inline void merge_ready_res(rt_domain_t* rt, struct bheap* tasks)
190{
191 unsigned long flags;
192 raw_spin_lock_irqsave(&rt->ready_lock, flags);
193 __merge_ready_res(rt, tasks);
194 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
195}
196
131static inline struct task_struct* take_ready(rt_domain_t* rt) 197static inline struct task_struct* take_ready(rt_domain_t* rt)
132{ 198{
133 unsigned long flags; 199 unsigned long flags;
@@ -139,6 +205,16 @@ static inline struct task_struct* take_ready(rt_domain_t* rt)
139 return ret; 205 return ret;
140} 206}
141 207
208static inline struct reservation* take_ready_res(rt_domain_t* rt)
209{
210 unsigned long flags;
211 struct reservation* ret;
212 /* first we need the write lock for rt_ready_queue */
213 raw_spin_lock_irqsave(&rt->ready_lock, flags);
214 ret = __take_ready_res(rt);
215 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
216 return ret;
217}
142 218
143static inline void add_release(rt_domain_t* rt, struct task_struct *task) 219static inline void add_release(rt_domain_t* rt, struct task_struct *task)
144{ 220{
@@ -148,10 +224,21 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
148 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); 224 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
149} 225}
150 226
227static inline void add_release_res(rt_domain_t* rt, struct reservation* res)
228{
229 unsigned long flags;
230 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
231 __add_release_res(rt, task);
232 raw_spin_unlock_irqstore(&rt->tobe_lock, flags);
233}
234
151#ifdef CONFIG_RELEASE_MASTER 235#ifdef CONFIG_RELEASE_MASTER
152void __add_release_on(rt_domain_t* rt, struct task_struct *task, 236void __add_release_on(rt_domain_t* rt, struct task_struct *task,
153 int target_cpu); 237 int target_cpu);
154 238
239void __add_release_res_on(rt_domain_t* rt, struct reservation* res,
240 int target_cpu);
241
155static inline void add_release_on(rt_domain_t* rt, 242static inline void add_release_on(rt_domain_t* rt,
156 struct task_struct *task, 243 struct task_struct *task,
157 int target_cpu) 244 int target_cpu)
@@ -161,6 +248,16 @@ static inline void add_release_on(rt_domain_t* rt,
161 __add_release_on(rt, task, target_cpu); 248 __add_release_on(rt, task, target_cpu);
162 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); 249 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
163} 250}
251
252static inline void add_release_res_on(rt_domain_rt* rt,
253 struct reservation* res,
254 int target_cpu)
255{
256 unsigned long flags;
257 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
258 __add_release_res_on(rt, res, target_cpu);
259 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
260}
164#endif 261#endif
165 262
166static inline int __jobs_pending(rt_domain_t* rt) 263static inline int __jobs_pending(rt_domain_t* rt)
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 733a483e3084..f90f5718c0ad 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -10,6 +10,7 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/rbtree.h>
13 14
14#include <litmus/litmus.h> 15#include <litmus/litmus.h>
15#include <litmus/sched_plugin.h> 16#include <litmus/sched_plugin.h>
@@ -17,10 +18,12 @@
17#include <litmus/debug_trace.h> 18#include <litmus/debug_trace.h>
18 19
19#include <litmus/rt_domain.h> 20#include <litmus/rt_domain.h>
21#include <litmus/reservations/ext_reservation.h>
20 22
21#include <litmus/trace.h> 23#include <litmus/trace.h>
22 24
23#include <litmus/bheap.h> 25#include <litmus/bheap.h>
26#include <litmus/binheap.h>
24 27
25/* Uncomment when debugging timer races... */ 28/* Uncomment when debugging timer races... */
26#if 0 29#if 0
@@ -49,38 +52,81 @@ static void default_release_jobs(rt_domain_t* rt, struct bheap* tasks)
49 52
50static unsigned int time2slot(lt_t time) 53static unsigned int time2slot(lt_t time)
51{ 54{
52 return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; 55 return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS;
53} 56}
54 57
55static enum hrtimer_restart on_release_timer(struct hrtimer *timer) 58int release_order(struct binheap_node *a, struct binheap_node *b)
59{
60 return lt_before(binheap_entry(a, struct release_heap, node)->release_time,
61 binheap_entry(b, struct release_heap, node)->release_time);
62}
63
64static void release_jobs_before_now(rt_domain_t* rt)
56{ 65{
57 unsigned long flags; 66 unsigned long flags;
67 struct rb_node *temp1, *temp2;
58 struct release_heap* rh; 68 struct release_heap* rh;
59 rh = container_of(timer, struct release_heap, timer);
60 69
61 TS_RELEASE_LATENCY(rh->release_time); 70 /* remove all heaps with release time earlier than now
71 * from the release queue and call release callback
72 */
73 while(!binheap_empty(&rt->release_queue.queue) &&
74 lt_before_eq(litmus_clock(), rt->earliest_release)) {
75 raw_spin_lock_irqsave(&rt->release_lock, flags);
76 VTRACE("CB has the release_lock 0x%p\n", &rt->release_lock);
62 77
63 VTRACE("on_release_timer(0x%p) starts.\n", timer); 78 TS_RELEASE_LATENCY(rh->release_time);
79 TS_RELEASE_START;
64 80
65 TS_RELEASE_START; 81 /* O(1) operation */
82 rh = binheap_top_entry(&rt->release_queue.queue, struct release_heap, node);
83 list_del_init(&rh->list);
66 84
85 binheap_delete_root(&rt->release_queue.queue, struct release_heap, node);
86 rt->earliest_release =
87 binheap_top_entry(&rt->release_queue.queue, struct release_heap, node)
88 ->release_time;
67 89
68 raw_spin_lock_irqsave(&rh->dom->release_lock, flags); 90 raw_spin_unlock_irqrestore(&rt->release_lock, flags);
69 VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); 91 VTRACE("CB returned release_lock 0x%p\n", &rt->release_lock);
70 /* remove from release queue */
71 list_del(&rh->list);
72 raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags);
73 VTRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
74 92
75 /* call release callback */ 93 rt->release_jobs(rt, &rh->heap);
76 rh->dom->release_jobs(rh->dom, &rh->heap);
77 /* WARNING: rh can be referenced from other CPUs from now on. */
78 94
79 TS_RELEASE_END; 95 TS_RELEASE_END;
96 }
97}
98
99static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
100{
101 rt_domain_t* rt;
102 struct release_heap* rh;
103
104 rt = container_of(timer, rt_domain_t, timer);
105
106 release_jobs_before_now(rt);
80 107
81 VTRACE("on_release_timer(0x%p) ends.\n", timer); 108 VTRACE("on_release_timer(0x%p) ends.\n", timer);
82 109
83 return HRTIMER_NORESTART; 110 /* when there are no more jobs to release */
111 if (!rt->first)
112 return HRTIMER_NORESTART;
113 hrtimer_set_expires(timer, ns_to_ktime(rt->earliest_release));
114
115 return HRTIMER_RESTART;
116}
117
118static void suspend_releases(rt_domain_t* rt)
119{
120 hrtimer_try_to_cancel(&rt->timer);
121}
122
123static void resume_releases(rt_domain_t* rt)
124{
125 release_jobs_before_now(rt);
126 if (rt->first) {
127 hrtimer_start(&rh->timer, ns_to_ktime(rt->earliest_release));
128 }
129
84} 130}
85 131
86/* allocated in litmus.c */ 132/* allocated in litmus.c */
@@ -90,35 +136,31 @@ struct release_heap* release_heap_alloc(int gfp_flags)
90{ 136{
91 struct release_heap* rh; 137 struct release_heap* rh;
92 rh= kmem_cache_alloc(release_heap_cache, gfp_flags); 138 rh= kmem_cache_alloc(release_heap_cache, gfp_flags);
93 if (rh) {
94 /* initialize timer */
95 hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
96 rh->timer.function = on_release_timer;
97 }
98 return rh; 139 return rh;
99} 140}
100 141
101void release_heap_free(struct release_heap* rh) 142void release_heap_free(struct release_heap* rh)
102{ 143{
103 /* make sure timer is no longer in use */ 144 /* make sure timer is no longer in use */
104 hrtimer_cancel(&rh->timer);
105 kmem_cache_free(release_heap_cache, rh); 145 kmem_cache_free(release_heap_cache, rh);
106} 146}
107 147
108/* Caller must hold release lock. 148/* For all variants of get_release_heap
149 * Caller must hold release lock.
109 * Will return heap for given time. If no such heap exists prior to 150 * Will return heap for given time. If no such heap exists prior to
110 * the invocation it will be created. 151 * the invocation it will be created.
111 */ 152 */
112static struct release_heap* get_release_heap(rt_domain_t *rt, 153static struct release_heap* __get_release_heap(rt_domain_t *rt,
113 struct task_struct* t, 154 lt_t release_time,
155 struct release_heap th,
114 int use_task_heap) 156 int use_task_heap)
115{ 157{
116 struct list_head* pos; 158 struct list_head* pos;
117 struct release_heap* heap = NULL; 159 struct release_heap* heap = NULL;
118 struct release_heap* rh; 160 struct release_heap* rh;
119 lt_t release_time = get_release(t);
120 unsigned int slot = time2slot(release_time); 161 unsigned int slot = time2slot(release_time);
121 162
163 /* loop is for hash collision, O(1) time complexity */
122 /* initialize pos for the case that the list is empty */ 164 /* initialize pos for the case that the list is empty */
123 pos = rt->release_queue.slot[slot].next; 165 pos = rt->release_queue.slot[slot].next;
124 list_for_each(pos, &rt->release_queue.slot[slot]) { 166 list_for_each(pos, &rt->release_queue.slot[slot]) {
@@ -138,39 +180,59 @@ static struct release_heap* get_release_heap(rt_domain_t *rt,
138 } 180 }
139 if (!heap && use_task_heap) { 181 if (!heap && use_task_heap) {
140 /* use pre-allocated release heap */ 182 /* use pre-allocated release heap */
141 rh = tsk_rt(t)->rel_heap; 183 rh = th;
142 184
143 rh->dom = rt;
144 rh->release_time = release_time; 185 rh->release_time = release_time;
145 186
146 /* add to release queue */ 187 /* add to release queue */
147 list_add(&rh->list, pos->prev); 188 list_add(&rh->list, pos->prev);
189 /* binheap_add is O(lg n) time complexity. It can't be helped
190 * if we want to be able to have 1 domain timer that we can disable
191 * easily upon domain preemption
192 */
193 binheap_add(&rh->node, ht->release_queue.queue, struct release_heap, node);
194
148 heap = rh; 195 heap = rh;
149 } 196 }
150 return heap; 197 return heap;
151} 198}
152 199
200static struct release_heap* get_release_heap(rt_domain_t *rt,
201 struct reservation* res,
202 int use_task_heap)
203{
204 __get_release_heap(rt, res->next_replenishment, res->rel_heap, use_task_heap);
205}
206
207static struct release_heap* get_release_heap(rt_domain_t *rt,
208 struct task_struct *t,
209 int use_task_heap)
210{
211 __get_release_heap(rt, get_release(t), tsk_rt(t)->rel_heap, use_task_heap);
212}
213
153static void reinit_release_heap(struct task_struct* t) 214static void reinit_release_heap(struct task_struct* t)
154{ 215{
155 struct release_heap* rh; 216 struct release_heap* rh;
156 217
157 /* use pre-allocated release heap */ 218 /* use pre-allocated release heap */
158 rh = tsk_rt(t)->rel_heap; 219 rh = tsk_rt(t)->rel_heap;
220 INIT_LIST_HEAD(&rh->list_head);
221 /* initialize */
222 bheap_init(&rh->heap);
223}
159 224
160 /* Make sure it is safe to use. The timer callback could still 225static void reinit_release_heap(struct reservation* t)
161 * be executing on another CPU; hrtimer_cancel() will wait 226{
162 * until the timer callback has completed. However, under no 227 struct release_heap* rh;
163 * circumstances should the timer be active (= yet to be
164 * triggered).
165 *
166 * WARNING: If the CPU still holds the release_lock at this point,
167 * deadlock may occur!
168 */
169 BUG_ON(hrtimer_cancel(&rh->timer));
170 228
229 /* use pre-allocated release heap */
230 rh = reservation->rel_heap;
231 INIT_LIST_HEAD(&rh->list_head);
171 /* initialize */ 232 /* initialize */
172 bheap_init(&rh->heap); 233 bheap_init(&rh->heap);
173} 234}
235
174/* arm_release_timer() - start local release timer or trigger 236/* arm_release_timer() - start local release timer or trigger
175 * remote timer (pull timer) 237 * remote timer (pull timer)
176 * 238 *
@@ -197,44 +259,46 @@ static void arm_release_timer(rt_domain_t *_rt)
197 list_for_each_safe(pos, safe, &list) { 259 list_for_each_safe(pos, safe, &list) {
198 /* pick task of work list */ 260 /* pick task of work list */
199 t = list_entry(pos, struct task_struct, rt_param.list); 261 t = list_entry(pos, struct task_struct, rt_param.list);
200 sched_trace_task_release(t); 262 //sched_trace_task_release(t);
201 list_del(pos); 263 list_del(pos);
202 264
203 /* put into release heap while holding release_lock */ 265 /* put into release heap while holding release_lock */
204 raw_spin_lock(&rt->release_lock); 266 raw_spin_lock(&rt->release_lock);
205 VTRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); 267 VTRACE("acquired the release_lock 0x%p\n", &rt->release_lock);
206 268
207 rh = get_release_heap(rt, t, 0); 269 rh = get_release_heap(rt, t, 0);
208 if (!rh) { 270 if (!rh) {
209 /* need to use our own, but drop lock first */ 271 /* need to use our own, but drop lock first */
210 raw_spin_unlock(&rt->release_lock); 272 raw_spin_unlock(&rt->release_lock);
211 VTRACE_TASK(t, "Dropped release_lock 0x%p\n", 273 VTRACE("dropped release_lock 0x%p\n",
212 &rt->release_lock); 274 &rt->release_lock);
213 275
214 reinit_release_heap(t); 276 reinit_release_heap(t);
215 VTRACE_TASK(t, "release_heap ready\n"); 277 VTRACE("release_heap ready\n");
216 278
217 raw_spin_lock(&rt->release_lock); 279 raw_spin_lock(&rt->release_lock);
218 VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n", 280 VTRACE("re-acquired release_lock 0x%p\n",
219 &rt->release_lock); 281 &rt->release_lock);
220 282
221 rh = get_release_heap(rt, t, 1); 283 rh = get_release_heap(rt, t, 1);
222 } 284 }
223 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 285 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
224 VTRACE_TASK(t, "arm_release_timer(): added to release heap\n"); 286 VTRACE("arm_release_timer(): added to release heap\n");
287
288 rh = binheap_top_entry(rt->release_queue.queue, struct release_heap, node);
289 rt->earliest_release = rh->release_time;
225 290
226 raw_spin_unlock(&rt->release_lock); 291 raw_spin_unlock(&rt->release_lock);
227 VTRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); 292 VTRACE("dropped the release_lock 0x%p\n", &rt->release_lock);
228 293
229 /* To avoid arming the timer multiple times, we only let the 294 /* To avoid arming the timer multiple times, we only let the
230 * owner do the arming (which is the "first" task to reference 295 * owner of the new earliest release heap do the arming.
231 * this release_heap anyway).
232 */ 296 */
233 if (rh == tsk_rt(t)->rel_heap) { 297 if (rh == tsk_rt(t)->rel_heap) {
234 VTRACE_TASK(t, "arming timer 0x%p\n", &rh->timer); 298 VTRACE("arming timer 0x%p\n", &rh->timer);
235 299
236 if (!hrtimer_is_hres_active(&rh->timer)) { 300 if (!hrtimer_is_hres_active(&rh->timer)) {
237 TRACE_TASK(t, "WARNING: no hires timer!!!\n"); 301 VTRACE("WARNING: no hires timer!!!\n");
238 } 302 }
239 303
240 /* we cannot arm the timer using hrtimer_start() 304 /* we cannot arm the timer using hrtimer_start()
@@ -246,7 +310,7 @@ static void arm_release_timer(rt_domain_t *_rt)
246 if (rt->release_master == NO_CPU && 310 if (rt->release_master == NO_CPU &&
247 target_cpu == NO_CPU) 311 target_cpu == NO_CPU)
248#endif 312#endif
249 hrtimer_start(&rh->timer, 313 hrtimer_start(&rt->timer,
250 ns_to_ktime(rh->release_time), 314 ns_to_ktime(rh->release_time),
251 HRTIMER_MODE_ABS_PINNED); 315 HRTIMER_MODE_ABS_PINNED);
252#ifdef CONFIG_RELEASE_MASTER 316#ifdef CONFIG_RELEASE_MASTER
@@ -255,15 +319,112 @@ static void arm_release_timer(rt_domain_t *_rt)
255 /* target_cpu overrides release master */ 319 /* target_cpu overrides release master */
256 (target_cpu != NO_CPU ? 320 (target_cpu != NO_CPU ?
257 target_cpu : rt->release_master), 321 target_cpu : rt->release_master),
258 &rh->info, &rh->timer, 322 &rt->info, &rt->timer,
259 ns_to_ktime(rh->release_time), 323 ns_to_ktime(rh->release_time),
260 HRTIMER_MODE_ABS_PINNED); 324 HRTIMER_MODE_ABS_PINNED);
261#endif 325#endif
262 } else 326 } else
263 VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); 327 VTRACE("timer 0x%p has been armed for earlier time\n", &rh->timer);
264 } 328 }
265} 329}
266 330
331/* arm_release_timer_res() - start local release timer or trigger
332 * remote timer (pull timer)
333 *
334 * Called by add_release_res() with:
335 * - tobe_lock taken
336 * - IRQ disabled
337 *
338 * TODO: find some way to combine this with the task version of this fuction
339 */
340#ifdef CONFIG_RELEASE_MASTER
341#define arm_release_timer_res(t) arm_release_timer_res_on((t), NO_CPU)
342static void arm_release_timer_res_on(rt_domain_t *_rt , int target_cpu)
343#else
344static void arm_release_timer_res(rt_domain_t *_rt)
345#endif
346{
347 rt_domain_t *rt = _rt;
348 struct list_head list;
349 struct list_head *pos, *safe;
350 struct reservation* res;
351 struct release_heap* rh;
352
353 VTRACE("arm_release_timer() at %llu\n", litmus_clock());
354 list_replace_init(&rt->tobe_released, &list);
355
356 list_for_each_safe(pos, safe, &list) {
357 /* pick task of work list */
358 res = list_entry(pos, struct reservation, ln);
359 //sched_trace_task_release(t);
360 list_del(pos);
361
362 /* put into release heap while holding release_lock */
363 raw_spin_lock(&rt->release_lock);
364 VTRACE("acquired the release_lock 0x%p\n", &rt->release_lock);
365
366 rh = get_release_heap(rt, res, 0);
367 if (!rh) {
368 /* need to use our own, but drop lock first */
369 raw_spin_unlock(&rt->release_lock);
370 VTRACE("dropped release_lock 0x%p\n",
371 &rt->release_lock);
372
373 reinit_release_heap(t);
374 VTRACE("release_heap ready\n");
375
376 raw_spin_lock(&rt->release_lock);
377 VTRACE("re-acquired release_lock 0x%p\n",
378 &rt->release_lock);
379
380 rh = get_release_heap(rt, res, 1);
381 }
382 bheap_insert(rt->order, &rh->heap, res->heap_node);
383 list_add_tail(&res->ln, &rh->list_head);
384 VTRACE("arm_release_timer(): added to release heap\n");
385
386 rh = binheap_top_entry(rt->release_queue.queue, struct release_heap, node);
387 rt->earliest_release = rh->release_time;
388
389 raw_spin_unlock(&rt->release_lock);
390 VTRACE("dropped the release_lock 0x%p\n", &rt->release_lock);
391
392 /* To avoid arming the timer multiple times, we only let the
393 * owner of the new earliest release heap do the arming.
394 */
395 if (rh == res->rel_heap) {
396 VTRACE("arming timer 0x%p\n", &rh->timer);
397
398 if (!hrtimer_is_hres_active(&rh->timer)) {
399 VTRACE("WARNING: no hires timer!!!\n");
400 }
401
402 /* we cannot arm the timer using hrtimer_start()
403 * as it may deadlock on rq->lock
404 *
405 * PINNED mode is ok on both local and remote CPU
406 */
407#ifdef CONFIG_RELEASE_MASTER
408 if (rt->release_master == NO_CPU &&
409 target_cpu == NO_CPU)
410#endif
411 hrtimer_start(&rt->timer,
412 ns_to_ktime(rh->release_time),
413 HRTIMER_MODE_ABS_PINNED);
414#ifdef CONFIG_RELEASE_MASTER
415 else
416 hrtimer_start_on(
417 /* target_cpu overrides release master */
418 (target_cpu != NO_CPU ?
419 target_cpu : rt->release_master),
420 &rt->info, &rt->timer,
421 ns_to_ktime(rh->release_time),
422 HRTIMER_MODE_ABS_PINNED);
423#endif
424 } else
425 VTRACE("timer 0x%p has been armed for earlier time\n", &rh->timer);
426 }
427}
267void rt_domain_init(rt_domain_t *rt, 428void rt_domain_init(rt_domain_t *rt,
268 bheap_prio_t order, 429 bheap_prio_t order,
269 check_resched_needed_t check, 430 check_resched_needed_t check,
@@ -288,11 +449,16 @@ void rt_domain_init(rt_domain_t *rt,
288 INIT_LIST_HEAD(&rt->tobe_released); 449 INIT_LIST_HEAD(&rt->tobe_released);
289 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) 450 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
290 INIT_LIST_HEAD(&rt->release_queue.slot[i]); 451 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
452 INIT_BINHEAP_HANDLE(&rt->release_queue.queue, release_order);
453 rt->release_queue.earliest_release = 0;
291 454
292 raw_spin_lock_init(&rt->ready_lock); 455 raw_spin_lock_init(&rt->ready_lock);
293 raw_spin_lock_init(&rt->release_lock); 456 raw_spin_lock_init(&rt->release_lock);
294 raw_spin_lock_init(&rt->tobe_lock); 457 raw_spin_lock_init(&rt->tobe_lock);
295 458
459 hrtimer_init(&rt->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
460 rt->timer.function = on_release_timer;
461
296 rt->check_resched = check; 462 rt->check_resched = check;
297 rt->release_jobs = release; 463 rt->release_jobs = release;
298 rt->order = order; 464 rt->order = order;
@@ -315,6 +481,13 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new)
315 rt->check_resched(rt); 481 rt->check_resched(rt);
316} 482}
317 483
484void __add_ready_res(rt_domain_t* rt, struct reservation* new)
485{
486 BUG_ON(bheap_node_in_heap(new->heap_node));
487
488 bheap_insert(rt->order, &rt->ready_queue, new->heap_node);
489}
490
318/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. 491/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable.
319 * @tasks - the newly released tasks 492 * @tasks - the newly released tasks
320 */ 493 */
@@ -324,10 +497,15 @@ void __merge_ready(rt_domain_t* rt, struct bheap* tasks)
324 rt->check_resched(rt); 497 rt->check_resched(rt);
325} 498}
326 499
500void __merge_ready_res(rt_domain_t* rt, struct bheap* res)
501{
502 bheap_union(rt->order, &rt->ready_queue, tasks);
503 rt->check_resched(rt);
504}
327 505
328#ifdef CONFIG_RELEASE_MASTER 506#ifdef CONFIG_RELEASE_MASTER
329void __add_release_on(rt_domain_t* rt, struct task_struct *task, 507void __add_release_on(rt_domain_t* rt, struct task_struct *task,
330 int target_cpu) 508 int target_cpu)
331{ 509{
332 TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n", 510 TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n",
333 get_release(task), target_cpu); 511 get_release(task), target_cpu);
@@ -336,6 +514,14 @@ void __add_release_on(rt_domain_t* rt, struct task_struct *task,
336 514
337 arm_release_timer_on(rt, target_cpu); 515 arm_release_timer_on(rt, target_cpu);
338} 516}
517
518void __add_release_res_on(rt_domain_t* rt, struct reservation *res,
519 int target_cpu)
520{
521 list_add(&res->ln, &rt->tobe_released);
522
523 arm_release_timer_res_on(rt, target_cpu);
524}
339#endif 525#endif
340 526
341/* add_release - add a real-time task to the rt release queue. 527/* add_release - add a real-time task to the rt release queue.
@@ -349,3 +535,10 @@ void __add_release(rt_domain_t* rt, struct task_struct *task)
349 535
350 arm_release_timer(rt); 536 arm_release_timer(rt);
351} 537}
538
539void __add_release_res(rt_domain_t* rt, struct reservation *reservation)
540{
541 list_add(&res->ln, &rt->tobe_released);
542
543 arm_release_timer_res(rt);
544}
diff --git a/litmus/sched_ext_res.c b/litmus/sched_ext_res.c
index 0a3270346656..bf4dc151a4d7 100644
--- a/litmus/sched_ext_res.c
+++ b/litmus/sched_ext_res.c
@@ -13,7 +13,7 @@
13#include <litmus/litmus_proc.h> 13#include <litmus/litmus_proc.h>
14#include <litmus/sched_trace.h> 14#include <litmus/sched_trace.h>
15 15
16#include <litmus/reservations/reservation.h> 16#include <litmus/reservations/ext_reservation.h>
17#include <litmus/reservations/alloc.h> 17#include <litmus/reservations/alloc.h>
18 18
19struct pres_task_state { 19struct pres_task_state {
@@ -32,6 +32,8 @@ struct pres_cpu_state {
32 struct task_struct* scheduled; 32 struct task_struct* scheduled;
33}; 33};
34 34
35struct gedf_reservation_environment gedf_env;
36
35static DEFINE_PER_CPU(struct pres_cpu_state, pres_cpu_state); 37static DEFINE_PER_CPU(struct pres_cpu_state, pres_cpu_state);
36 38
37#define cpu_state_for(cpu_id) (&per_cpu(pres_cpu_state, cpu_id)) 39#define cpu_state_for(cpu_id) (&per_cpu(pres_cpu_state, cpu_id))
@@ -529,25 +531,18 @@ static void pres_setup_domain_proc(void)
529 } 531 }
530} 532}
531 533
532static long pres_activate_plugin(void) 534static long ext_res_activate_plugin(void)
533{ 535{
534 int cpu; 536 int cpu;
535 struct pres_cpu_state *state; 537
538 gedf_reservation_environment_init(&gedf_env);
536 539
537 for_each_online_cpu(cpu) { 540 for_each_online_cpu(cpu) {
538 TRACE("Initializing CPU%d...\n", cpu); 541 TRACE("Initializing CPU%d...\n", cpu);
539 542 gedf_env->cpu_mapping[cpu] = cpu;
540 state = cpu_state_for(cpu); 543 gedf_env->cpu_entries[cpu].id = cpu;
541
542 raw_spin_lock_init(&state->lock);
543 state->cpu = cpu;
544 state->scheduled = NULL;
545
546 sup_init(&state->sup_env);
547
548 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
549 state->timer.function = on_scheduling_timer;
550 } 544 }
545 gedf_env->num_cpus = cpu;
551 546
552 pres_setup_domain_proc(); 547 pres_setup_domain_proc();
553 548
@@ -586,8 +581,8 @@ static long pres_deactivate_plugin(void)
586 return 0; 581 return 0;
587} 582}
588 583
589static struct sched_plugin pres_plugin = { 584static struct sched_plugin ext_res_plugin = {
590 .plugin_name = "P-RES", 585 .plugin_name = "EXT-RES",
591 .schedule = pres_schedule, 586 .schedule = pres_schedule,
592 .task_block = pres_task_block, 587 .task_block = pres_task_block,
593 .task_wake_up = pres_task_resume, 588 .task_wake_up = pres_task_resume,
@@ -597,16 +592,16 @@ static struct sched_plugin pres_plugin = {
597 .task_exit = pres_task_exit, 592 .task_exit = pres_task_exit,
598 .complete_job = complete_job_oneshot, 593 .complete_job = complete_job_oneshot,
599 .get_domain_proc_info = pres_get_domain_proc_info, 594 .get_domain_proc_info = pres_get_domain_proc_info,
600 .activate_plugin = pres_activate_plugin, 595 .activate_plugin = ext_res_activate_plugin,
601 .deactivate_plugin = pres_deactivate_plugin, 596 .deactivate_plugin = pres_deactivate_plugin,
602 .reservation_create = pres_reservation_create, 597 .reservation_create = pres_reservation_create,
603 .current_budget = pres_current_budget, 598 .current_budget = pres_current_budget,
604}; 599};
605 600
606static int __init init_pres(void) 601static int __init init_ext_res(void)
607{ 602{
608 return register_sched_plugin(&pres_plugin); 603 return register_sched_plugin(&ext_res_plugin);
609} 604}
610 605
611module_init(init_pres); 606module_init(init_ext_res);
612 607