diff options
-rw-r--r-- | include/litmus/rt_domain.h | 23 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 7 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 4 | ||||
-rw-r--r-- | include/litmus/servers.h | 221 | ||||
-rw-r--r-- | kernel/hrtimer.c | 7 | ||||
-rw-r--r-- | litmus/Kconfig | 12 | ||||
-rw-r--r-- | litmus/Makefile | 4 | ||||
-rw-r--r-- | litmus/bheap.c | 3 | ||||
-rw-r--r-- | litmus/edf_common.c | 4 | ||||
-rw-r--r-- | litmus/litmus.c | 13 | ||||
-rw-r--r-- | litmus/rt_domain.c | 59 | ||||
-rw-r--r-- | litmus/sched_edf_hsb.c | 2556 | ||||
-rw-r--r-- | litmus/sched_edf_hsb_noslack.c | 2556 | ||||
-rw-r--r-- | litmus/servers.c | 857 |
14 files changed, 6288 insertions, 38 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index ac249292e866..0756f30f1387 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -80,7 +80,7 @@ void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, | |||
80 | 80 | ||
81 | void __add_ready(rt_domain_t* rt, struct task_struct *new); | 81 | void __add_ready(rt_domain_t* rt, struct task_struct *new); |
82 | void __merge_ready(rt_domain_t* rt, struct bheap *tasks); | 82 | void __merge_ready(rt_domain_t* rt, struct bheap *tasks); |
83 | void __add_release(rt_domain_t* rt, struct task_struct *task); | 83 | int __add_release(rt_domain_t* rt, struct task_struct *task); |
84 | 84 | ||
85 | static inline struct task_struct* __take_ready(rt_domain_t* rt) | 85 | static inline struct task_struct* __take_ready(rt_domain_t* rt) |
86 | { | 86 | { |
@@ -140,26 +140,31 @@ static inline struct task_struct* take_ready(rt_domain_t* rt) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | 142 | ||
143 | static inline void add_release(rt_domain_t* rt, struct task_struct *task) | 143 | static inline int add_release(rt_domain_t* rt, struct task_struct *task) |
144 | { | 144 | { |
145 | int rv; | ||
145 | unsigned long flags; | 146 | unsigned long flags; |
146 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); | 147 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); |
147 | __add_release(rt, task); | 148 | rv = __add_release(rt, task); |
148 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | 149 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); |
150 | return rv; | ||
149 | } | 151 | } |
150 | 152 | ||
151 | #ifdef CONFIG_RELEASE_MASTER | 153 | #ifdef CONFIG_RELEASE_MASTER |
152 | void __add_release_on(rt_domain_t* rt, struct task_struct *task, | 154 | int __add_release_on(rt_domain_t* rt, struct task_struct *task, |
153 | int target_cpu); | 155 | int target_cpu); |
154 | 156 | ||
155 | static inline void add_release_on(rt_domain_t* rt, | 157 | static inline int add_release_on(rt_domain_t* rt, |
156 | struct task_struct *task, | 158 | struct task_struct *task, |
157 | int target_cpu) | 159 | int target_cpu) |
158 | { | 160 | { |
161 | int rv; | ||
159 | unsigned long flags; | 162 | unsigned long flags; |
160 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); | 163 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); |
161 | __add_release_on(rt, task, target_cpu); | 164 | rv = __add_release_on(rt, task, target_cpu); |
162 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | 165 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); |
166 | |||
167 | return rv; | ||
163 | } | 168 | } |
164 | #endif | 169 | #endif |
165 | 170 | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 5de422c742f6..d40fdadba62c 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #ifndef _LINUX_RT_PARAM_H_ | 5 | #ifndef _LINUX_RT_PARAM_H_ |
6 | #define _LINUX_RT_PARAM_H_ | 6 | #define _LINUX_RT_PARAM_H_ |
7 | 7 | ||
8 | struct server; | ||
8 | /* Litmus time type. */ | 9 | /* Litmus time type. */ |
9 | typedef unsigned long long lt_t; | 10 | typedef unsigned long long lt_t; |
10 | 11 | ||
@@ -22,7 +23,7 @@ static inline int lt_after_eq(lt_t a, lt_t b) | |||
22 | 23 | ||
23 | /* different types of clients */ | 24 | /* different types of clients */ |
24 | typedef enum { | 25 | typedef enum { |
25 | RT_CLASS_HARD, | 26 | RT_CLASS_HARD = 0, |
26 | RT_CLASS_SOFT, | 27 | RT_CLASS_SOFT, |
27 | RT_CLASS_BEST_EFFORT | 28 | RT_CLASS_BEST_EFFORT |
28 | } task_class_t; | 29 | } task_class_t; |
@@ -191,12 +192,16 @@ struct rt_param { | |||
191 | 192 | ||
192 | /* Pointer to the page shared between userspace and kernel. */ | 193 | /* Pointer to the page shared between userspace and kernel. */ |
193 | struct control_page * ctrl_page; | 194 | struct control_page * ctrl_page; |
195 | |||
196 | /* Used for plugin-specific information */ | ||
197 | void* plugin_data; | ||
194 | }; | 198 | }; |
195 | 199 | ||
196 | /* Possible RT flags */ | 200 | /* Possible RT flags */ |
197 | #define RT_F_RUNNING 0x00000000 | 201 | #define RT_F_RUNNING 0x00000000 |
198 | #define RT_F_SLEEP 0x00000001 | 202 | #define RT_F_SLEEP 0x00000001 |
199 | #define RT_F_EXIT_SEM 0x00000008 | 203 | #define RT_F_EXIT_SEM 0x00000008 |
204 | #define RT_F_BLOCK 0x00000010 | ||
200 | 205 | ||
201 | #endif | 206 | #endif |
202 | 207 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 7ca34cb13881..e12a993299b5 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -71,8 +71,8 @@ struct st_resume_data { /* A task resumes. */ | |||
71 | 71 | ||
72 | struct st_action_data { | 72 | struct st_action_data { |
73 | u64 when; | 73 | u64 when; |
74 | u8 action; | 74 | u16 action; |
75 | u8 __unused[7]; | 75 | u8 __unused[6]; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | struct st_sys_release_data { | 78 | struct st_sys_release_data { |
diff --git a/include/litmus/servers.h b/include/litmus/servers.h new file mode 100644 index 000000000000..b907feca50bd --- /dev/null +++ b/include/litmus/servers.h | |||
@@ -0,0 +1,221 @@ | |||
1 | #ifndef _LINUX_SERVERS_H_ | ||
2 | #define _LINUX_SERVERS_H_ | ||
3 | |||
4 | struct server; | ||
5 | struct periodic_server; | ||
6 | struct proc_dir_entry; | ||
7 | struct server_domain; | ||
8 | struct server_release_heap; | ||
9 | struct completion_timer; | ||
10 | struct server_proc; | ||
11 | |||
12 | #define SERVER_RELEASE_QUEUE_SLOTS 127 | ||
13 | |||
14 | #define SERVER_FMT "{server/%d:%d}" | ||
15 | #define SERVER_ARGS(s) (s)->id, (s)->job_no | ||
16 | #define TASK_FMT "(%s/%d:%d)" | ||
17 | #define TASK_ARGS(t) (t)->comm, (t)->pid, (t)->rt_param.job_params.job_no | ||
18 | |||
19 | #define server_task(s) (((s)->cpu != NO_CPU)?s->domain->linked_tasks[(s)->cpu]:NULL) | ||
20 | #define is_server_linked(s) ((s)->cpu != NO_CPU) | ||
21 | |||
22 | /* | ||
23 | * A single schedulable server. | ||
24 | */ | ||
25 | typedef struct server { | ||
26 | /* Specified by the user */ | ||
27 | int id; | ||
28 | lt_t wcet; | ||
29 | lt_t period; | ||
30 | |||
31 | /* Optional */ | ||
32 | int type; | ||
33 | void* data; | ||
34 | |||
35 | /* Managed internally */ | ||
36 | lt_t deadline; | ||
37 | lt_t release; | ||
38 | lt_t budget; /* The remaining budget for current period */ | ||
39 | int job_no; /* Current job of server */ | ||
40 | int cpu; /* CPU the server is running on or NO_CPU */ | ||
41 | |||
42 | struct server_domain *domain; | ||
43 | |||
44 | /* For membership in collections */ | ||
45 | struct bheap_node *hn; | ||
46 | struct list_head list; | ||
47 | |||
48 | /* Used for grouped releases */ | ||
49 | struct server_release_heap *release_heap; | ||
50 | struct list_head release_list; | ||
51 | } server_t; | ||
52 | |||
53 | /* | ||
54 | * Called when a server exhausts its budget. | ||
55 | */ | ||
56 | typedef void (*server_completed_t)(struct server *server, | ||
57 | struct task_struct *was_running); | ||
58 | /* | ||
59 | * Called when a group of servers release | ||
60 | */ | ||
61 | typedef void (*servers_released_t)(struct list_head *servers); | ||
62 | /* | ||
63 | * Used to read server entries. | ||
64 | */ | ||
65 | typedef int (*admit_server_t)(unsigned long long wcet, | ||
66 | unsigned long long period, int cpu); | ||
67 | /* | ||
68 | * Lists all servers for a proc entry by calling list_server on each. | ||
69 | */ | ||
70 | typedef void (*list_servers_t)(struct server_proc *proc); | ||
71 | /* | ||
72 | * Stop all servers. Used to destroy servers on a proc entry rewrite. | ||
73 | */ | ||
74 | typedef void (*stop_servers_t)(void); | ||
75 | |||
76 | /* | ||
77 | * Useful tools for scheduling servers. | ||
78 | */ | ||
79 | typedef struct server_domain { | ||
80 | /* Collection of grouped releases */ | ||
81 | raw_spinlock_t release_lock; | ||
82 | struct list_head release_queue[SERVER_RELEASE_QUEUE_SLOTS]; | ||
83 | |||
84 | /* List of tasks to be added to the grouped releases */ | ||
85 | raw_spinlock_t tobe_lock; | ||
86 | struct list_head tobe_released; | ||
87 | |||
88 | /* CPU on which to release servers */ | ||
89 | int release_master; | ||
90 | |||
91 | /* Per CPU information for running servers */ | ||
92 | struct completion_timer* completion_timers; | ||
93 | server_t** linked_servers; | ||
94 | struct task_struct** linked_tasks; | ||
95 | lt_t* start_times; | ||
96 | |||
97 | /* Used to lock firing of the completion timer. | ||
98 | * This is needed here and not for the release timer because | ||
99 | * the completion timer actually modifies the state of the | ||
100 | * server itself. | ||
101 | */ | ||
102 | raw_spinlock_t* completion_lock; | ||
103 | |||
104 | /* Event callbacks */ | ||
105 | server_completed_t server_completed; | ||
106 | servers_released_t servers_released; | ||
107 | |||
108 | /* Proc entries for controlling groups of servers */ | ||
109 | struct list_head server_procs; | ||
110 | } server_domain_t; | ||
111 | |||
112 | /* | ||
113 | * A group of servers releasing simultaneously. | ||
114 | */ | ||
115 | typedef struct server_release_heap { | ||
116 | /* Servers to be released */ | ||
117 | struct list_head servers; | ||
118 | lt_t release_time; | ||
119 | |||
120 | /* For membership in the domain */ | ||
121 | struct list_head list; | ||
122 | |||
123 | /* For callbacks */ | ||
124 | server_domain_t *domain; | ||
125 | |||
126 | struct hrtimer timer; | ||
127 | struct hrtimer_start_on_info info; | ||
128 | } server_release_heap_t; | ||
129 | |||
130 | /* | ||
131 | * A timer for managing server completions. Can be managed concurrently. | ||
132 | */ | ||
133 | typedef struct completion_timer { | ||
134 | int armed; /* Is the timer armed or not? Seperate from the timer | ||
135 | * so that it can be used to disarm a timer which | ||
136 | * is already firing. | ||
137 | */ | ||
138 | int cpu; /* CPU where the server is running. This is not the | ||
139 | * cpu on which the timer will fire. | ||
140 | */ | ||
141 | struct hrtimer timer; | ||
142 | struct hrtimer_start_on_info info; | ||
143 | struct server_domain *domain; /* For callbacks */ | ||
144 | } completion_timer_t; | ||
145 | |||
146 | /* | ||
147 | * A proc directory entry which controls a group of servers. | ||
148 | */ | ||
149 | typedef struct server_proc { | ||
150 | struct proc_dir_entry *entry; | ||
151 | struct list_head list; | ||
152 | admit_server_t admit_server; /* Add a server from the entry */ | ||
153 | list_servers_t list_servers; /* List each server in the entry */ | ||
154 | stop_servers_t stop_servers; /* Disables all servers in the entry */ | ||
155 | char* page; /* Used internally by proc */ | ||
156 | int length; /* Used internally by proc */ | ||
157 | } server_proc_t; | ||
158 | |||
159 | /* | ||
160 | * Initialize and exit servers | ||
161 | */ | ||
162 | void server_init(server_t *server, server_domain_t *domain, int id, | ||
163 | lt_t wcet, lt_t period, int grouped); | ||
164 | void server_destroy(server_t *server); | ||
165 | |||
166 | /* | ||
167 | * Memory manage servers on the module slabs. | ||
168 | */ | ||
169 | server_t* server_alloc(int gfp_flags); | ||
170 | void server_free(server_t *server); | ||
171 | |||
172 | /* | ||
173 | * Initialize and exit the server domain. | ||
174 | */ | ||
175 | void server_domain_init(server_domain_t *domain, | ||
176 | servers_released_t servers_released, | ||
177 | server_completed_t server_completed, | ||
178 | int release_master, raw_spinlock_t* completion_lock); | ||
179 | void server_domain_destroy(server_domain_t *domain); | ||
180 | |||
181 | /* | ||
182 | * Adds the next release of the server to the domain's timer. | ||
183 | */ | ||
184 | int add_server_release(server_t *server, server_domain_t *server_domain); | ||
185 | |||
186 | /* | ||
187 | * Runs a task on the server. | ||
188 | */ | ||
189 | void server_run(server_t *server, struct task_struct *task); | ||
190 | |||
191 | /* | ||
192 | * Stops server execution. | ||
193 | */ | ||
194 | void server_stop(server_t *server); | ||
195 | |||
196 | /* | ||
197 | * Begins a server's next period. | ||
198 | */ | ||
199 | void server_release(server_t *server); | ||
200 | |||
201 | /* | ||
202 | * Set the next period to begin at the given time. | ||
203 | */ | ||
204 | void server_release_at(server_t *server, lt_t time); | ||
205 | |||
206 | /* | ||
207 | * Call once for every server which should be printed by list_servers. | ||
208 | */ | ||
209 | void list_server(server_t *server, int cpu, server_proc_t *proc); | ||
210 | |||
211 | /* | ||
212 | * Create and destroy a proc dir entry with the given file name. | ||
213 | */ | ||
214 | server_proc_t* server_proc_init(server_domain_t *domain, | ||
215 | struct proc_dir_entry *proc_dir, char *file, | ||
216 | admit_server_t admit_server, | ||
217 | list_servers_t list_servers, | ||
218 | stop_servers_t stop_servers); | ||
219 | void server_proc_exit(server_proc_t *proc); | ||
220 | |||
221 | #endif | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cb49883b64e5..43931ad2395c 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -52,6 +52,9 @@ | |||
52 | 52 | ||
53 | #include <trace/events/timer.h> | 53 | #include <trace/events/timer.h> |
54 | 54 | ||
55 | #undef TRACE | ||
56 | #define TRACE(x, args...) do { } while(0) | ||
57 | |||
55 | /* | 58 | /* |
56 | * The timer bases: | 59 | * The timer bases: |
57 | * | 60 | * |
@@ -1062,7 +1065,7 @@ void hrtimer_pull(void) | |||
1062 | { | 1065 | { |
1063 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 1066 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); |
1064 | struct hrtimer_start_on_info *info; | 1067 | struct hrtimer_start_on_info *info; |
1065 | struct list_head *pos, *safe, list; | 1068 | struct list_head *pos, *safe, *prev, list; |
1066 | 1069 | ||
1067 | raw_spin_lock(&base->lock); | 1070 | raw_spin_lock(&base->lock); |
1068 | list_replace_init(&base->to_pull, &list); | 1071 | list_replace_init(&base->to_pull, &list); |
@@ -1073,6 +1076,8 @@ void hrtimer_pull(void) | |||
1073 | TRACE("pulled timer 0x%x\n", info->timer); | 1076 | TRACE("pulled timer 0x%x\n", info->timer); |
1074 | list_del(pos); | 1077 | list_del(pos); |
1075 | hrtimer_start(info->timer, info->time, info->mode); | 1078 | hrtimer_start(info->timer, info->time, info->mode); |
1079 | atomic_set(&info->state, HRTIMER_START_ON_INACTIVE); | ||
1080 | prev = pos; | ||
1076 | } | 1081 | } |
1077 | } | 1082 | } |
1078 | 1083 | ||
diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc8308cf0..08f4d5f62a71 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -34,6 +34,14 @@ config RELEASE_MASTER | |||
34 | (http://www.cs.unc.edu/~anderson/papers.html). | 34 | (http://www.cs.unc.edu/~anderson/papers.html). |
35 | Currently only supported by GSN-EDF. | 35 | Currently only supported by GSN-EDF. |
36 | 36 | ||
37 | config EDF_HSB_SLACK_STEALING | ||
38 | bool "EDF-HSB Slack Stealing" | ||
39 | depends on ARCH_HAS_SEND_PULL_TIMERS | ||
40 | default y | ||
41 | help | ||
42 | Allow servers in EDF-HSB to donate slack when they have no real-time | ||
43 | tasks to execute. | ||
44 | |||
37 | endmenu | 45 | endmenu |
38 | 46 | ||
39 | menu "Real-Time Synchronization" | 47 | menu "Real-Time Synchronization" |
@@ -102,8 +110,8 @@ config SCHED_TASK_TRACE | |||
102 | config SCHED_TASK_TRACE_SHIFT | 110 | config SCHED_TASK_TRACE_SHIFT |
103 | int "Buffer size for sched_trace_xxx() events" | 111 | int "Buffer size for sched_trace_xxx() events" |
104 | depends on SCHED_TASK_TRACE | 112 | depends on SCHED_TASK_TRACE |
105 | range 8 13 | 113 | range 8 24 |
106 | default 9 | 114 | default 24 |
107 | help | 115 | help |
108 | 116 | ||
109 | Select the buffer size of sched_trace_xxx() events as a power of two. | 117 | Select the buffer size of sched_trace_xxx() events as a power of two. |
diff --git a/litmus/Makefile b/litmus/Makefile index ad9936e07b83..9468312b39e4 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -16,8 +16,10 @@ obj-y = sched_plugin.o litmus.o \ | |||
16 | srp.o \ | 16 | srp.o \ |
17 | bheap.o \ | 17 | bheap.o \ |
18 | ctrldev.o \ | 18 | ctrldev.o \ |
19 | servers.o \ | ||
19 | sched_gsn_edf.o \ | 20 | sched_gsn_edf.o \ |
20 | sched_psn_edf.o | 21 | sched_edf_hsb.o \ |
22 | sched_edf_hsb_noslack.o | ||
21 | 23 | ||
22 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 24 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
23 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 25 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
diff --git a/litmus/bheap.c b/litmus/bheap.c index 528af97f18a6..cf9a0f842c17 100644 --- a/litmus/bheap.c +++ b/litmus/bheap.c | |||
@@ -276,6 +276,9 @@ void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap, | |||
276 | pos = heap->head; | 276 | pos = heap->head; |
277 | while (pos != node) { | 277 | while (pos != node) { |
278 | prev = pos; | 278 | prev = pos; |
279 | /* a dereferencing error here means that | ||
280 | * the node was not in this heap | ||
281 | */ | ||
279 | pos = pos->next; | 282 | pos = pos->next; |
280 | } | 283 | } |
281 | /* we have prev, now remove node */ | 284 | /* we have prev, now remove node */ |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b44dc2d8d1e..1bb3452c5437 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -114,5 +114,7 @@ int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) | |||
114 | */ | 114 | */ |
115 | 115 | ||
116 | /* make sure to get non-rt stuff out of the way */ | 116 | /* make sure to get non-rt stuff out of the way */ |
117 | return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t); | 117 | return !is_realtime(t) || |
118 | (get_deadline(__next_ready(rt)) != get_deadline(t) && | ||
119 | edf_higher_prio(__next_ready(rt), t)); | ||
118 | } | 120 | } |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 26938acacafc..64f82aa0e246 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <litmus/rt_domain.h> | 16 | #include <litmus/rt_domain.h> |
17 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
19 | #include <litmus/servers.h> | ||
19 | 20 | ||
20 | /* Number of RT tasks that exist in the system */ | 21 | /* Number of RT tasks that exist in the system */ |
21 | atomic_t rt_task_count = ATOMIC_INIT(0); | 22 | atomic_t rt_task_count = ATOMIC_INIT(0); |
@@ -375,12 +376,12 @@ void litmus_exit_task(struct task_struct* tsk) | |||
375 | 376 | ||
376 | litmus->task_exit(tsk); | 377 | litmus->task_exit(tsk); |
377 | 378 | ||
378 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | 379 | if (!bheap_node_in_heap(tsk_rt(tsk)->heap_node)) { |
379 | bheap_node_free(tsk_rt(tsk)->heap_node); | 380 | bheap_node_free(tsk_rt(tsk)->heap_node); |
380 | release_heap_free(tsk_rt(tsk)->rel_heap); | 381 | release_heap_free(tsk_rt(tsk)->rel_heap); |
381 | 382 | reinit_litmus_state(tsk, 1); | |
383 | } | ||
382 | atomic_dec(&rt_task_count); | 384 | atomic_dec(&rt_task_count); |
383 | reinit_litmus_state(tsk, 1); | ||
384 | } | 385 | } |
385 | } | 386 | } |
386 | 387 | ||
@@ -527,7 +528,7 @@ static int __init _init_litmus(void) | |||
527 | 528 | ||
528 | register_sched_plugin(&linux_sched_plugin); | 529 | register_sched_plugin(&linux_sched_plugin); |
529 | 530 | ||
530 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | 531 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); |
531 | release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); | 532 | release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); |
532 | 533 | ||
533 | #ifdef CONFIG_MAGIC_SYSRQ | 534 | #ifdef CONFIG_MAGIC_SYSRQ |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 81a5ac16f164..011a38159491 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -147,29 +147,41 @@ static struct release_heap* get_release_heap(rt_domain_t *rt, | |||
147 | return heap; | 147 | return heap; |
148 | } | 148 | } |
149 | 149 | ||
150 | static void reinit_release_heap(struct task_struct* t) | 150 | static int reinit_release_heap(struct task_struct* t) |
151 | { | 151 | { |
152 | int rv = 0; | ||
152 | struct release_heap* rh; | 153 | struct release_heap* rh; |
153 | 154 | ||
154 | /* use pre-allocated release heap */ | 155 | /* use pre-allocated release heap */ |
155 | rh = tsk_rt(t)->rel_heap; | 156 | rh = tsk_rt(t)->rel_heap; |
156 | 157 | ||
157 | /* Make sure it is safe to use. The timer callback could still | 158 | /* |
158 | * be executing on another CPU; hrtimer_cancel() will wait | ||
159 | * until the timer callback has completed. However, under no | ||
160 | * circumstances should the timer be active (= yet to be | ||
161 | * triggered). | ||
162 | * | ||
163 | * WARNING: If the CPU still holds the release_lock at this point, | 159 | * WARNING: If the CPU still holds the release_lock at this point, |
164 | * deadlock may occur! | 160 | * deadlock may occur! |
165 | */ | 161 | */ |
166 | BUG_ON(hrtimer_cancel(&rh->timer)); | 162 | rv = hrtimer_try_to_cancel(&rh->timer); |
163 | |||
164 | /* The timer callback is running, it is useless to add | ||
165 | * to the release heap now. | ||
166 | */ | ||
167 | if (rv == -1) { | ||
168 | rv = 0; | ||
169 | goto out; | ||
170 | } | ||
171 | |||
172 | /* Under no cirumstances should the timer have been active | ||
173 | * but not running. | ||
174 | */ | ||
175 | BUG_ON(rv == 1); | ||
176 | rv = 1; | ||
167 | 177 | ||
168 | /* initialize */ | 178 | /* initialize */ |
169 | bheap_init(&rh->heap); | 179 | bheap_init(&rh->heap); |
170 | #ifdef CONFIG_RELEASE_MASTER | 180 | #ifdef CONFIG_RELEASE_MASTER |
171 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | 181 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); |
172 | #endif | 182 | #endif |
183 | out: | ||
184 | return rv; | ||
173 | } | 185 | } |
174 | /* arm_release_timer() - start local release timer or trigger | 186 | /* arm_release_timer() - start local release timer or trigger |
175 | * remote timer (pull timer) | 187 | * remote timer (pull timer) |
@@ -180,11 +192,12 @@ static void reinit_release_heap(struct task_struct* t) | |||
180 | */ | 192 | */ |
181 | #ifdef CONFIG_RELEASE_MASTER | 193 | #ifdef CONFIG_RELEASE_MASTER |
182 | #define arm_release_timer(t) arm_release_timer_on((t), NO_CPU) | 194 | #define arm_release_timer(t) arm_release_timer_on((t), NO_CPU) |
183 | static void arm_release_timer_on(rt_domain_t *_rt , int target_cpu) | 195 | static int arm_release_timer_on(rt_domain_t *_rt , int target_cpu) |
184 | #else | 196 | #else |
185 | static void arm_release_timer(rt_domain_t *_rt) | 197 | static int arm_release_timer(rt_domain_t *_rt) |
186 | #endif | 198 | #endif |
187 | { | 199 | { |
200 | int rv = 1; | ||
188 | rt_domain_t *rt = _rt; | 201 | rt_domain_t *rt = _rt; |
189 | struct list_head list; | 202 | struct list_head list; |
190 | struct list_head *pos, *safe; | 203 | struct list_head *pos, *safe; |
@@ -211,9 +224,15 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
211 | VTRACE_TASK(t, "Dropped release_lock 0x%p\n", | 224 | VTRACE_TASK(t, "Dropped release_lock 0x%p\n", |
212 | &rt->release_lock); | 225 | &rt->release_lock); |
213 | 226 | ||
214 | reinit_release_heap(t); | 227 | rv = reinit_release_heap(t); |
215 | VTRACE_TASK(t, "release_heap ready\n"); | 228 | VTRACE_TASK(t, "release_heap ready\n"); |
216 | 229 | ||
230 | /* Bail! The heap we should be using just released right | ||
231 | * before we added ourselves to it. | ||
232 | */ | ||
233 | if (!rv) | ||
234 | goto out; | ||
235 | |||
217 | raw_spin_lock(&rt->release_lock); | 236 | raw_spin_lock(&rt->release_lock); |
218 | VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n", | 237 | VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n", |
219 | &rt->release_lock); | 238 | &rt->release_lock); |
@@ -257,6 +276,9 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
257 | } else | 276 | } else |
258 | VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); | 277 | VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); |
259 | } | 278 | } |
279 | |||
280 | out: | ||
281 | return rv; | ||
260 | } | 282 | } |
261 | 283 | ||
262 | void rt_domain_init(rt_domain_t *rt, | 284 | void rt_domain_init(rt_domain_t *rt, |
@@ -319,9 +341,11 @@ void __merge_ready(rt_domain_t* rt, struct bheap* tasks) | |||
319 | 341 | ||
320 | 342 | ||
321 | #ifdef CONFIG_RELEASE_MASTER | 343 | #ifdef CONFIG_RELEASE_MASTER |
322 | void __add_release_on(rt_domain_t* rt, struct task_struct *task, | 344 | int __add_release_on(rt_domain_t* rt, struct task_struct *task, |
323 | int target_cpu) | 345 | int target_cpu) |
324 | { | 346 | { |
347 | int rv; | ||
348 | |||
325 | TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n", | 349 | TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n", |
326 | get_release(task), target_cpu); | 350 | get_release(task), target_cpu); |
327 | list_add(&tsk_rt(task)->list, &rt->tobe_released); | 351 | list_add(&tsk_rt(task)->list, &rt->tobe_released); |
@@ -330,17 +354,20 @@ void __add_release_on(rt_domain_t* rt, struct task_struct *task, | |||
330 | /* start release timer */ | 354 | /* start release timer */ |
331 | TS_SCHED2_START(task); | 355 | TS_SCHED2_START(task); |
332 | 356 | ||
333 | arm_release_timer_on(rt, target_cpu); | 357 | rv = arm_release_timer_on(rt, target_cpu); |
334 | 358 | ||
335 | TS_SCHED2_END(task); | 359 | TS_SCHED2_END(task); |
360 | |||
361 | return rv; | ||
336 | } | 362 | } |
337 | #endif | 363 | #endif |
338 | 364 | ||
339 | /* add_release - add a real-time task to the rt release queue. | 365 | /* add_release - add a real-time task to the rt release queue. |
340 | * @task: the sleeping task | 366 | * @task: the sleeping task |
341 | */ | 367 | */ |
342 | void __add_release(rt_domain_t* rt, struct task_struct *task) | 368 | int __add_release(rt_domain_t* rt, struct task_struct *task) |
343 | { | 369 | { |
370 | int rv; | ||
344 | TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); | 371 | TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); |
345 | list_add(&tsk_rt(task)->list, &rt->tobe_released); | 372 | list_add(&tsk_rt(task)->list, &rt->tobe_released); |
346 | task->rt_param.domain = rt; | 373 | task->rt_param.domain = rt; |
@@ -348,8 +375,10 @@ void __add_release(rt_domain_t* rt, struct task_struct *task) | |||
348 | /* start release timer */ | 375 | /* start release timer */ |
349 | TS_SCHED2_START(task); | 376 | TS_SCHED2_START(task); |
350 | 377 | ||
351 | arm_release_timer(rt); | 378 | rv = arm_release_timer(rt); |
352 | 379 | ||
353 | TS_SCHED2_END(task); | 380 | TS_SCHED2_END(task); |
381 | |||
382 | return rv; | ||
354 | } | 383 | } |
355 | 384 | ||
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c new file mode 100644 index 000000000000..d74f4337ac2b --- /dev/null +++ b/litmus/sched_edf_hsb.c | |||
@@ -0,0 +1,2556 @@ | |||
1 | /* | ||
2 | * litmus/sched_edf_hsb.c | ||
3 | * | ||
4 | * Implentation of the EDF-HSB scheduling algorithm. | ||
5 | * | ||
6 | * The following 6 events are fired by timers and not handled by | ||
7 | * the plugin infrastructure itself: | ||
8 | * | ||
9 | * release_[hrt|srt|be]_jobs | ||
10 | * [hrt|be]_server_released | ||
11 | * server_completed (for HRT, SRT, and BE) | ||
12 | * | ||
13 | * The following 4 events are caused by a write to the proc entry | ||
14 | * and should never be run when the plugin is already running: | ||
15 | * stop_[hrt|be]_servers | ||
16 | * admit_[hrt|be]_server | ||
17 | * | ||
18 | * TODO system for removing tasks from their release queues | ||
19 | * TODO clean up link_to_cpu and check_slack args | ||
20 | * TODO move slack completion into release | ||
21 | * TODO fix concurrent arms | ||
22 | * TODO slack and BE servers, include slack higher prio | ||
23 | * TODO start servers should no longer be cessary | ||
24 | * TODO harmonize order of method arguments | ||
25 | * TODO test crazy task_new hack | ||
26 | * TODO remove bheap_node_in_heap check in litmus_exit_task | ||
27 | */ | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/ctype.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | |||
36 | #include <litmus/litmus.h> | ||
37 | #include <litmus/bheap.h> | ||
38 | #include <litmus/jobs.h> | ||
39 | #include <litmus/litmus_proc.h> | ||
40 | #include <litmus/sched_plugin.h> | ||
41 | #include <litmus/edf_common.h> | ||
42 | #include <litmus/sched_trace.h> | ||
43 | #include <litmus/servers.h> | ||
44 | #define DEBUG_EDF_HSB | ||
45 | |||
46 | /* DOES NOT WORK */ | ||
47 | //#define SLACK_ON_MASTER | ||
48 | |||
49 | #define BE_PROC_NAME "be_servers" | ||
50 | #define HRT_PROC_NAME "hrt_servers" | ||
51 | #define BE_SERVER_BASE 100 | ||
52 | #define IDLE_SLACK_BASE 1000 | ||
53 | #define SLACK_MIN NSEC_PER_MSEC | ||
54 | |||
55 | /* SCHED_TRACE action events */ | ||
56 | #define SERVER_COMPLETED_ACTION 1 | ||
57 | #define SERVER_RELEASED_ACTION 2 | ||
58 | #define NO_SLACK_ACTION 3 | ||
59 | #define SLACK_RUN_ACTION 4 | ||
60 | #define SLACK_STOP_ACTION 5 | ||
61 | #define SLACK_RECLAIM_ACTION 6 | ||
62 | #define SLACK_EXPIRED_ACTION 7 | ||
63 | #define SLACK_DONATED_ACTION 8 | ||
64 | #define CANDIDATE_ADDED_ACTION 9 | ||
65 | |||
66 | /* Uncomment for human readable time */ | ||
67 | #define TIME(x) \ | ||
68 | (x) | ||
69 | /* ({lt_t y = x; \ */ | ||
70 | /* do_div(y, NSEC_PER_MSEC); \ */ | ||
71 | /* y;}) */ | ||
72 | #define TRACE_TIMER(fmt, args...) \ | ||
73 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | ||
74 | TRACE_ARGS, ## args, TIME(litmus_clock())) | ||
75 | #define TRACE_TASK_TIMER(t, fmt, args...) \ | ||
76 | TRACE_TIMER("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
77 | (t)->rt_param.job_params.job_no, ## args) | ||
78 | |||
79 | /* | ||
80 | * Useful debugging macros. Remove for actual use as they cause | ||
81 | * a lot of lock contention. | ||
82 | */ | ||
83 | #ifdef DEBUG_EDF_HSB | ||
84 | |||
85 | #define TRACE_SUB(fmt, args...) \ | ||
86 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ | ||
87 | TRACE_ARGS, ## args) | ||
88 | #define TRACE_TASK_SUB(t, fmt, args...) \ | ||
89 | TRACE_SUB(TASK_FMT " " fmt, TASK_ARGS(t), ##args) | ||
90 | #define TRACE_SERVER_SUB(s, fmt, args...) \ | ||
91 | TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
92 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) \ | ||
93 | TRACE_TASK_SUB(t, SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
94 | #else | ||
95 | #define TRACE_SUB(fmt, args...) | ||
96 | #define TRACE_TASK_SUB(t, fmt, args...) | ||
97 | #define TRACE_SERVER_SUB(s, fmt, args...) | ||
98 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) | ||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Different types of servers | ||
103 | */ | ||
104 | typedef enum { | ||
105 | S_HRT, | ||
106 | S_SRT, | ||
107 | S_BE, | ||
108 | S_SLACK | ||
109 | } server_type_t; | ||
110 | |||
111 | /* | ||
112 | * A server running HRT tasks | ||
113 | */ | ||
114 | typedef struct { | ||
115 | server_t server; | ||
116 | rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ | ||
117 | int ready; /* False if waiting for next release */ | ||
118 | int no_slack; | ||
119 | struct hrtimer slack_timer; /* Server has no slack when: | ||
120 | * (deadline - budget) <= current_time. | ||
121 | */ | ||
122 | struct hrtimer_start_on_info slack_timer_info; | ||
123 | } hrt_server_t; | ||
124 | |||
125 | /* | ||
126 | * State of a single CPU | ||
127 | */ | ||
128 | typedef struct { | ||
129 | int cpu; | ||
130 | struct task_struct* scheduled; /* Task that should be running */ | ||
131 | struct task_struct* linked; /* Task that actually is running */ | ||
132 | server_t *scheduled_server; | ||
133 | server_t *linked_server; /* The server running on this cpu. | ||
134 | * Note that what it is 'running' is | ||
135 | * linked, not scheduled. | ||
136 | */ | ||
137 | hrt_server_t hrt_server; /* One HRT server per CPU */ | ||
138 | struct bheap_node* hn; /* For the cpu_heap */ | ||
139 | } cpu_entry_t; | ||
140 | |||
141 | /* | ||
142 | * Data assigned to each task | ||
143 | */ | ||
144 | typedef struct task_data { | ||
145 | server_t *srt_server; /* If the task is SRT, its server */ | ||
146 | struct list_head candidate_list; /* List of slack canditates */ | ||
147 | struct task_struct *owner; | ||
148 | } task_data_t; | ||
149 | |||
150 | /* CPU state */ | ||
151 | DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries); | ||
152 | static struct bheap cpu_heap; | ||
153 | static struct bheap_node cpu_heap_node[NR_CPUS]; | ||
154 | /* Task domains */ | ||
155 | static rt_domain_t srt_domain; | ||
156 | static rt_domain_t be_domain; | ||
157 | /* Useful tools for server scheduling */ | ||
158 | static server_domain_t server_domain; | ||
159 | /* BE server support */ | ||
160 | static struct list_head be_servers; | ||
161 | static struct bheap be_ready_servers; | ||
162 | /* Slack support */ | ||
163 | static struct list_head slack_queue; | ||
164 | static struct list_head slack_candidates; | ||
165 | /* CPU which will release tasks and global servers */ | ||
166 | static int edf_hsb_release_master; | ||
167 | /* Cache to store task_data structs */ | ||
168 | static struct kmem_cache *task_data_cache; | ||
169 | |||
170 | static struct proc_dir_entry *edf_hsb_proc_dir = NULL; | ||
171 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; | ||
172 | |||
173 | #define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task))) | ||
174 | #define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on)) | ||
175 | #define task_job_no(task) (tsk_rt(task)->job_params.job_no) | ||
176 | #define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data) | ||
177 | #define task_srt_server(task) ((server_t*)task_data(task)->srt_server) | ||
178 | #define server_slack(s) ((server_t*)(s)->data) | ||
179 | #define server_has_slack(s) (server_slack(s)->deadline != 0) | ||
180 | #define local_cpu_entry (&__get_cpu_var(cpu_entries)) | ||
181 | #define global_lock (&srt_domain.ready_lock) | ||
182 | #define is_active_plugin (litmus == &edf_hsb_plugin) | ||
183 | |||
184 | /* | ||
185 | * This only works if items are deleted with list_del_init. | ||
186 | */ | ||
187 | static inline int head_in_list(struct list_head *head) | ||
188 | { | ||
189 | BUG_ON(!head); | ||
190 | return !(head->next == head->prev && head->prev == head); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Returns slack server running the task or NULL if N/A. | ||
195 | */ | ||
196 | static inline server_t* task_slack_server(struct task_struct *task) | ||
197 | { | ||
198 | server_t *slack_server = NULL; | ||
199 | if (task->rt_param.linked_on != NO_CPU) { | ||
200 | slack_server = task_linked_entry(task)->linked_server; | ||
201 | if (slack_server->type != S_SLACK) | ||
202 | slack_server = NULL; | ||
203 | } | ||
204 | return slack_server; | ||
205 | } | ||
206 | |||
207 | static task_data_t* task_data_alloc(int gfp_flags) | ||
208 | { | ||
209 | return kmem_cache_alloc(task_data_cache, gfp_flags); | ||
210 | } | ||
211 | |||
212 | static void task_data_free(task_data_t* data) | ||
213 | { | ||
214 | kmem_cache_free(task_data_cache, data); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Donating servers pre-allocate a server for slack to avoid runtime | ||
219 | * calls to kmalloc. | ||
220 | */ | ||
221 | static void server_slack_create(server_t *donator) | ||
222 | { | ||
223 | server_t *slack = server_alloc(GFP_ATOMIC); | ||
224 | |||
225 | server_init(slack, &server_domain, -donator->id, 0, 0, 1); | ||
226 | slack->type = S_SLACK; | ||
227 | slack->data = donator; | ||
228 | donator->data = slack; | ||
229 | } | ||
230 | |||
231 | |||
232 | static void server_slack_destroy(server_t *donator) | ||
233 | { | ||
234 | server_t *slack = (server_t*)donator->data; | ||
235 | |||
236 | donator->data = NULL; | ||
237 | server_destroy(slack); | ||
238 | server_free(slack); | ||
239 | } | ||
240 | |||
241 | static void remove_slack(server_t *slack) | ||
242 | { | ||
243 | if (!slack) | ||
244 | return; | ||
245 | TRACE_SERVER_SUB(slack, "slack removed"); | ||
246 | //////sched_trace_action(NULL, SLACK_EXPIRED_ACTION); | ||
247 | |||
248 | if (head_in_list(&slack->list)) | ||
249 | list_del_init(&slack->list); | ||
250 | slack->deadline = 0; | ||
251 | slack->budget = 0; | ||
252 | slack->wcet = 0; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Slack queue is EDF. | ||
257 | */ | ||
258 | static void add_slack(server_t *slack) | ||
259 | { | ||
260 | struct list_head *pos; | ||
261 | server_t *queued; | ||
262 | |||
263 | TRACE_SERVER_SUB(slack, "slack added"); | ||
264 | |||
265 | if (head_in_list(&slack->list)) { | ||
266 | TRACE_SERVER_SUB(slack, "already in list"); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | list_for_each_prev(pos, &slack_queue) { | ||
271 | queued = list_entry(pos, server_t, list); | ||
272 | if (lt_before_eq(queued->deadline, slack->deadline)) { | ||
273 | __list_add(&slack->list, pos, pos->next); | ||
274 | return; | ||
275 | } | ||
276 | } | ||
277 | list_add(&slack->list, &slack_queue); | ||
278 | } | ||
279 | |||
280 | static inline struct task_struct* get_candidate(struct list_head *pos) | ||
281 | { | ||
282 | struct task_struct *task = NULL; | ||
283 | task_data_t *data; | ||
284 | if (!list_empty(pos)) { | ||
285 | data = list_entry(pos, task_data_t, candidate_list); | ||
286 | task = data->owner; | ||
287 | } | ||
288 | return task; | ||
289 | } | ||
290 | |||
291 | static inline lt_t real_deadline(struct task_struct *task) | ||
292 | { | ||
293 | server_t *server = task_srt_server(task); | ||
294 | int job_diff = server->job_no - task_job_no(task); | ||
295 | return get_deadline(task) - job_diff * get_rt_period(task); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Candidate queue is EDF. | ||
300 | */ | ||
301 | static void add_slack_candidate(struct task_struct *task) | ||
302 | { | ||
303 | struct list_head *pos; | ||
304 | struct task_struct *queued; | ||
305 | |||
306 | TRACE_TASK_SUB(task, "candidate added"); | ||
307 | |||
308 | list_for_each_prev(pos, &slack_candidates) { | ||
309 | queued = get_candidate(pos); | ||
310 | if (lt_before_eq(real_deadline(queued), real_deadline(task))) { | ||
311 | __list_add(&task_data(task)->candidate_list, | ||
312 | pos, pos->next); | ||
313 | return; | ||
314 | } | ||
315 | } | ||
316 | list_add(&task_data(task)->candidate_list, &slack_candidates); | ||
317 | } | ||
318 | |||
319 | static void donate_slack(server_t *donator) | ||
320 | { | ||
321 | server_t *slack = (server_t*)donator->data; | ||
322 | hrt_server_t *hrt_server; | ||
323 | |||
324 | TRACE_SERVER_SUB(donator, "%llu slack donated", TIME(donator->budget)); | ||
325 | |||
326 | if (donator->type == S_HRT) { | ||
327 | hrt_server = container_of(donator, hrt_server_t, server); | ||
328 | BUG_ON(!hrt_server->ready); | ||
329 | } | ||
330 | |||
331 | slack->wcet = donator->budget; | ||
332 | slack->budget = donator->budget; | ||
333 | slack->deadline = donator->deadline; | ||
334 | |||
335 | add_slack(slack); | ||
336 | } | ||
337 | |||
338 | #ifdef CONFIG_EDF_HSB_SLACK_STEALING | ||
339 | /* | ||
340 | * Donate any available slack from a server. | ||
341 | */ | ||
342 | static noinline void check_donate_slack(server_t *donator, struct task_struct *was_scheduled) | ||
343 | { | ||
344 | server_t *slack = server_slack(donator); | ||
345 | hrt_server_t *hrt_server; | ||
346 | int donate = 0; | ||
347 | |||
348 | TRACE_SERVER_SUB(donator, "checking donation"); | ||
349 | |||
350 | if (!slack) | ||
351 | return; | ||
352 | |||
353 | /* Donating small amounts of slack will result in excess migrations */ | ||
354 | if (donator->budget < SLACK_MIN) | ||
355 | return; | ||
356 | |||
357 | if (server_has_slack(donator)) { | ||
358 | TRACE_SERVER_SUB(donator, "dead: %d, rel: %d, job: %d already donated", | ||
359 | slack->deadline, slack->release, slack->job_no); | ||
360 | return; | ||
361 | } | ||
362 | |||
363 | if (donator->type == S_HRT) { | ||
364 | hrt_server = container_of(donator, hrt_server_t, server); | ||
365 | } | ||
366 | |||
367 | /* Donate if the server is waiting for a task release */ | ||
368 | if ((donator->type == S_SRT && | ||
369 | donator->job_no <= task_job_no(was_scheduled)) || | ||
370 | (donator->type == S_HRT && | ||
371 | hrt_server->no_slack && hrt_server->ready && | ||
372 | !__jobs_pending(&hrt_server->hrt_domain)) || | ||
373 | (donator->type == S_BE && | ||
374 | !__jobs_pending(&be_domain))) { | ||
375 | donate = 1; | ||
376 | } | ||
377 | |||
378 | if (!donate) | ||
379 | return; | ||
380 | |||
381 | ////sched_trace_action(was_scheduled, SLACK_DONATED_ACTION); | ||
382 | |||
383 | donate_slack(donator); | ||
384 | } | ||
385 | |||
386 | #else | ||
387 | #define check_donate_slack(a, b) | ||
388 | #endif | ||
389 | |||
390 | /* | ||
391 | * Adds the task to the candidate queue if it is eligible for slack stealing. | ||
392 | */ | ||
393 | static void check_slack_candidate(struct task_struct *task) | ||
394 | { | ||
395 | TRACE_TASK_SUB(task, "checking for candidate"); | ||
396 | if (is_srt(task) && | ||
397 | /* The task has been synchronously released */ | ||
398 | task_job_no(task) > 2 && | ||
399 | /* The SRT task is behind its server */ | ||
400 | task_srt_server(task)->job_no > task_job_no(task) && | ||
401 | /* The task hasn't already been added to the list */ | ||
402 | !head_in_list(&task_data(task)->candidate_list)) { | ||
403 | |||
404 | add_slack_candidate(task); | ||
405 | } else if (is_srt(task) && | ||
406 | is_released(task, litmus_clock()) && | ||
407 | !is_queued(task)) { | ||
408 | TRACE_TASK_SUB(task, "candidate has been released!"); | ||
409 | __add_ready(&srt_domain, task); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Returns the next eligible slack server. This will remove any expired | ||
415 | * slack servers still present in the list. | ||
416 | */ | ||
417 | static noinline server_t* next_eligible_slack_server(void) | ||
418 | { | ||
419 | server_t *next_slack = NULL; | ||
420 | lt_t now = litmus_clock(); | ||
421 | |||
422 | while (!list_empty(&slack_queue)) { | ||
423 | next_slack = list_entry(slack_queue.next, server_t, list); | ||
424 | BUG_ON(!next_slack); | ||
425 | |||
426 | if (lt_after(next_slack->deadline, now) && | ||
427 | lt_after(next_slack->budget, SLACK_MIN) && | ||
428 | !is_server_linked(next_slack)) { | ||
429 | break; | ||
430 | } else { | ||
431 | /* Slack has expired or has too little time */ | ||
432 | BUG_ON(next_slack->id == 1001); | ||
433 | remove_slack(next_slack); | ||
434 | next_slack = NULL; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | return next_slack; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Returns the next SRT task that is tardy or will be tardy. If none | ||
443 | * are available, will return a tardy BE task if present. | ||
444 | */ | ||
445 | static noinline struct task_struct* next_eligible_slack(void) | ||
446 | { | ||
447 | struct task_struct *next = get_candidate(slack_candidates.next); | ||
448 | |||
449 | while (next && task_srt_server(next)->job_no <= task_job_no(next)) { | ||
450 | list_del_init(&task_data(next)->candidate_list); | ||
451 | next = get_candidate(slack_candidates.next); | ||
452 | } | ||
453 | |||
454 | /* We couldn't find an SRT to schedule. Find a BE which is | ||
455 | * either tardy or cannot run due to a lack of servers. | ||
456 | */ | ||
457 | if (!next) { | ||
458 | next = __peek_ready(&be_domain); | ||
459 | } | ||
460 | |||
461 | return next; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Order BE tasks FIFO. | ||
466 | */ | ||
467 | static inline int be_higher_prio(struct task_struct *first, struct task_struct *second) | ||
468 | { | ||
469 | return lt_before(get_release(first), get_release(second)) || | ||
470 | |||
471 | /* Break by PID */ | ||
472 | (get_release(first) == get_release(second) && | ||
473 | (first->pid < second->pid)); | ||
474 | } | ||
475 | |||
476 | static int be_ready_order(struct bheap_node *a, struct bheap_node *b) | ||
477 | { | ||
478 | struct task_struct *first, *second; | ||
479 | first = bheap2task(a); | ||
480 | second = bheap2task(b); | ||
481 | if (!first || !second) | ||
482 | return first && !second; | ||
483 | return be_higher_prio(first, second); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Order servers by EDF. | ||
488 | */ | ||
489 | static inline int server_higher_prio(server_t *first, server_t *second) | ||
490 | { | ||
491 | return lt_before(first->deadline, second->deadline) || | ||
492 | /* Break by id */ | ||
493 | (first->deadline == second->deadline && | ||
494 | first->id < second->id); | ||
495 | } | ||
496 | |||
497 | static int server_order(struct bheap_node *a, struct bheap_node *b) | ||
498 | { | ||
499 | server_t *first, *second; | ||
500 | first = a->value; | ||
501 | second = b->value; | ||
502 | return server_higher_prio(first, second); | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Order CPU's by deadlines of their servers. | ||
507 | */ | ||
508 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | ||
509 | { | ||
510 | cpu_entry_t *first, *second; | ||
511 | first = a->value; | ||
512 | second = b->value; | ||
513 | if (first->linked && second->linked) { | ||
514 | return !server_higher_prio(first->linked_server, | ||
515 | second->linked_server); | ||
516 | } | ||
517 | return second->linked && !first->linked; | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Move the CPU entry to the correct position in the queue. | ||
522 | */ | ||
523 | static inline void update_cpu_position(cpu_entry_t *entry) | ||
524 | { | ||
525 | if (likely(bheap_node_in_heap(entry->hn))) | ||
526 | bheap_delete(server_order, &cpu_heap, entry->hn); | ||
527 | /* Don't leave HRT CPUs in the heap as its order only matters | ||
528 | * for global preempts. | ||
529 | */ | ||
530 | if (!entry->linked || !is_hrt(entry->linked)) | ||
531 | bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn); | ||
532 | } | ||
533 | |||
534 | static inline cpu_entry_t* lowest_prio_cpu(void) | ||
535 | { | ||
536 | struct bheap_node *hn = bheap_peek(cpu_lower_prio, &cpu_heap); | ||
537 | return (hn) ? hn->value : NULL; | ||
538 | } | ||
539 | |||
540 | static inline int check_hrt_server_initialized(hrt_server_t *hrt_server) | ||
541 | { | ||
542 | return hrt_server->server.wcet && hrt_server->server.period; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Arms the slack timer for the server, if necessary. | ||
547 | */ | ||
548 | static void slack_timer_arm(hrt_server_t *hrt_server) | ||
549 | { | ||
550 | int cpu, err; | ||
551 | cpu_entry_t *entry; | ||
552 | struct hrtimer *timer; | ||
553 | lt_t now = litmus_clock(), when_to_fire; | ||
554 | |||
555 | if (!check_hrt_server_initialized(hrt_server)) { | ||
556 | TRACE_SERVER_SUB(&hrt_server->server, "not initialized"); | ||
557 | return; | ||
558 | } | ||
559 | |||
560 | timer = &hrt_server->slack_timer; | ||
561 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
562 | |||
563 | #ifdef SLACK_ON_MASTER | ||
564 | if (edf_hsb_release_master != NO_CPU) | ||
565 | cpu = edf_hsb_release_master; | ||
566 | else | ||
567 | #endif | ||
568 | cpu = entry->cpu; | ||
569 | |||
570 | when_to_fire = hrt_server->server.deadline - hrt_server->server.budget; | ||
571 | |||
572 | /* Ensure the timer is needed */ | ||
573 | if (hrtimer_active(timer) || hrt_server->server.deadline == 0 || | ||
574 | hrt_server->no_slack || hrt_server->server.budget == 0 || | ||
575 | !hrt_server->ready) { | ||
576 | TRACE_SERVER_SUB(&hrt_server->server, | ||
577 | "not arming slack timer on P%d, %d %d %d %d %d", | ||
578 | entry->cpu, | ||
579 | hrtimer_active(timer), hrt_server->server.deadline == 0, | ||
580 | hrt_server->no_slack, hrt_server->server.budget == 0, | ||
581 | !hrt_server->ready); | ||
582 | return; | ||
583 | } | ||
584 | |||
585 | if (when_to_fire >= hrt_server->server.deadline) { | ||
586 | TRACE_SUB("wtf: %llu, dead: %llu, bud: %llu", | ||
587 | when_to_fire, hrt_server->server.deadline, | ||
588 | hrt_server->server.budget); | ||
589 | BUG_ON(1); | ||
590 | } | ||
591 | |||
592 | /* Arm timer */ | ||
593 | if (lt_after_eq(now, when_to_fire)) { | ||
594 | /* 'Fire' immediately */ | ||
595 | TRACE_SERVER_SUB(&hrt_server->server, | ||
596 | "immediate: %llu", when_to_fire); | ||
597 | hrt_server->no_slack = 1; | ||
598 | } else if (cpu != smp_processor_id()) { | ||
599 | err = hrtimer_start_on(cpu, | ||
600 | &hrt_server->slack_timer_info, | ||
601 | &hrt_server->slack_timer, | ||
602 | ns_to_ktime(when_to_fire), | ||
603 | HRTIMER_MODE_ABS_PINNED); | ||
604 | if (err) | ||
605 | TRACE_SERVER_SUB(&hrt_server->server, "failed to arm slack"); | ||
606 | } else { | ||
607 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), | ||
608 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
609 | } | ||
610 | |||
611 | TRACE_SUB("slack timer 0x%x armed to fire at %llu on P%d", | ||
612 | timer, TIME(when_to_fire), entry->cpu); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Does nothing if the slack timer is not armed. | ||
617 | */ | ||
618 | static inline void slack_timer_cancel(hrt_server_t *hrt_server) | ||
619 | { | ||
620 | int ret; | ||
621 | if (hrtimer_active(&hrt_server->slack_timer)) { | ||
622 | ret = hrtimer_try_to_cancel(&hrt_server->slack_timer); | ||
623 | if (ret == -1) { | ||
624 | TRACE_SERVER_SUB(&hrt_server->server, | ||
625 | "slack timer was running concurrently"); | ||
626 | } else { | ||
627 | TRACE_SERVER_SUB(&hrt_server->server, | ||
628 | "slack timer cancelled"); | ||
629 | } | ||
630 | } else { | ||
631 | TRACE_SERVER_SUB(&hrt_server->server, "slack not active"); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | /* | ||
636 | * Handles subtraction of lt_t without underflows. | ||
637 | */ | ||
638 | static inline lt_t lt_subtract(lt_t a, lt_t b) | ||
639 | { | ||
640 | long long sub = (long long)a - (long long)b; | ||
641 | if (sub >= 0) | ||
642 | return sub; | ||
643 | else | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | static void requeue_server(server_t *server, lt_t now) | ||
648 | { | ||
649 | int added = 0; | ||
650 | hrt_server_t *hrt_server; | ||
651 | |||
652 | if (server->type == S_SRT) | ||
653 | return; | ||
654 | |||
655 | if (server->type == S_SLACK) { | ||
656 | add_slack(server); | ||
657 | return; | ||
658 | } | ||
659 | |||
660 | if (lt_before(now, server->release)) { | ||
661 | added = add_server_release(server, &server_domain); | ||
662 | } | ||
663 | |||
664 | if (!added) { | ||
665 | /* Mark servers as released */ | ||
666 | if (server->type == S_HRT) { | ||
667 | TRACE_SERVER_SUB(server, "P%d now ready at %llu", now); | ||
668 | hrt_server = container_of(server, hrt_server_t, server); | ||
669 | hrt_server->ready = 1; | ||
670 | remove_slack(server_slack(server)); | ||
671 | hrt_server->no_slack = 0; | ||
672 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
673 | } else if (server->type == S_BE) { | ||
674 | TRACE_SERVER_SUB(server, "BE added to ready"); | ||
675 | bheap_insert(server_order, &be_ready_servers, server->hn); | ||
676 | } | ||
677 | } else { | ||
678 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
679 | } | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Absorbs a task's execution time into its donator. | ||
684 | */ | ||
685 | static void reclaim_slack(server_t *slack) | ||
686 | { | ||
687 | lt_t exec; | ||
688 | server_t *donator = server_slack(slack); | ||
689 | |||
690 | if (!donator || lt_before_eq(slack->deadline, litmus_clock())) | ||
691 | return; | ||
692 | |||
693 | /* SRT servers do not ever reclaim slack */ | ||
694 | ////sched_trace_action(NULL, SLACK_RECLAIM_ACTION); | ||
695 | |||
696 | exec = slack->wcet - slack->budget; | ||
697 | TRACE_SERVER_SUB(donator, "reclaiming %llu slack", TIME(exec)); | ||
698 | |||
699 | BUG_ON(is_server_linked(donator)); | ||
700 | BUG_ON(!slack->wcet); | ||
701 | BUG_ON(!donator->budget); | ||
702 | |||
703 | donator->budget = lt_subtract(donator->budget, exec); | ||
704 | slack->wcet = slack->budget; | ||
705 | |||
706 | /* If budget exhausted, server needs to wait for next release */ | ||
707 | if (!donator->budget) { | ||
708 | TRACE_SERVER_SUB(donator, "exhausted by slack"); | ||
709 | } | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Begins server execution and arms any timers necessary. | ||
714 | */ | ||
715 | static noinline void link_server(cpu_entry_t *entry, | ||
716 | server_t *next_server) | ||
717 | { | ||
718 | |||
719 | if (entry->linked) { | ||
720 | /* Massive state check */ | ||
721 | if (next_server->type == S_SRT) { | ||
722 | /* SRT task cannot get ahead of its server */ | ||
723 | BUG_ON(next_server->job_no + 1 < task_job_no(entry->linked)); | ||
724 | BUG_ON(lt_after(get_deadline(entry->linked), | ||
725 | next_server->deadline)); | ||
726 | } else if (next_server->type == S_HRT) { | ||
727 | /* HRT servers should never, ever migrate */ | ||
728 | BUG_ON(entry->cpu != task_cpu(entry->linked)); | ||
729 | BUG_ON(!entry->hrt_server.ready); | ||
730 | } else if (next_server->type == S_SLACK) { | ||
731 | /* Should have already been removed from slack list */ | ||
732 | BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list)); | ||
733 | BUG_ON(is_be(entry->linked) && is_queued(entry->linked)); | ||
734 | ////sched_trace_action(entry->linked, SLACK_RUN_ACTION); | ||
735 | BUG_ON(is_srt(entry->linked) && | ||
736 | task_srt_server(entry->linked)->job_no <= | ||
737 | task_job_no(entry->linked)); | ||
738 | } else { /* BE */ | ||
739 | /* Should have already been removed from ready heap */ | ||
740 | BUG_ON(bheap_node_in_heap(next_server->hn)); | ||
741 | BUG_ON(is_queued(entry->linked)); | ||
742 | ////sched_trace_action(entry->linked, next_server->id); | ||
743 | } | ||
744 | |||
745 | if (next_server->type != S_SLACK && | ||
746 | (head_in_list(&server_slack(next_server)->list))) { | ||
747 | remove_slack(server_slack(next_server)); | ||
748 | } | ||
749 | |||
750 | entry->linked_server = next_server; | ||
751 | server_run(entry->linked_server, entry->linked); | ||
752 | } | ||
753 | |||
754 | /* Timer necessary whenever an HRT is not running */ | ||
755 | if (!entry->linked || !is_hrt(entry->linked)) | ||
756 | slack_timer_arm(&entry->hrt_server); | ||
757 | else | ||
758 | slack_timer_cancel(&entry->hrt_server); | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | * Stops server execution and timers. This will also re-add servers | ||
763 | * to any collections they should be members of. | ||
764 | */ | ||
765 | static noinline void unlink_server(cpu_entry_t *entry, int requeue) | ||
766 | { | ||
767 | server_t *server = entry->linked_server; | ||
768 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
769 | |||
770 | BUG_ON(!entry->linked_server); | ||
771 | |||
772 | server_stop(entry->linked_server); | ||
773 | server = entry->linked_server; | ||
774 | entry->linked_server = NULL; | ||
775 | |||
776 | if (!requeue) | ||
777 | return; | ||
778 | |||
779 | if (server->type == S_SLACK && server->deadline) { | ||
780 | add_slack(server); | ||
781 | ////sched_trace_action(entry->linked, SLACK_STOP_ACTION); | ||
782 | |||
783 | /* Donator needs to absorb slack execution time */ | ||
784 | reclaim_slack(server); | ||
785 | } else if (server->type != S_SRT) { | ||
786 | requeue_server(server, litmus_clock()); | ||
787 | } | ||
788 | |||
789 | if (server->type == S_HRT && hrt_server->ready) | ||
790 | BUG_ON(head_in_list(&server_slack(server)->list)); | ||
791 | } | ||
792 | |||
793 | static void requeue(struct task_struct *task, rt_domain_t *domain); | ||
794 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, struct task_struct *task); | ||
795 | |||
796 | /* Update the link of a CPU. | ||
797 | * Handles the case where the to-be-linked task is already | ||
798 | * scheduled on a different CPU. The last argument is only needed | ||
799 | * for BE tasks as their servers can't be determined here. | ||
800 | */ | ||
801 | static noinline void link_to_cpu(cpu_entry_t *entry, | ||
802 | struct task_struct* linked, | ||
803 | server_t* next_server) | ||
804 | { | ||
805 | cpu_entry_t *sched; | ||
806 | server_t *tmp_server; | ||
807 | struct task_struct *tmp_task; | ||
808 | int on_cpu; | ||
809 | |||
810 | BUG_ON(linked && !is_realtime(linked)); | ||
811 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | ||
812 | BUG_ON(entry->cpu == edf_hsb_release_master); | ||
813 | |||
814 | if (linked) | ||
815 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", | ||
816 | entry->cpu); | ||
817 | |||
818 | /* Currently linked task is set to be unlinked. */ | ||
819 | if (entry->linked) { | ||
820 | unlink_server(entry, 1); | ||
821 | entry->linked->rt_param.linked_on = NO_CPU; | ||
822 | entry->linked = NULL; | ||
823 | } | ||
824 | |||
825 | /* Link new task to CPU. */ | ||
826 | if (linked) { | ||
827 | set_rt_flags(linked, RT_F_RUNNING); | ||
828 | /* Handle task is already scheduled somewhere! */ | ||
829 | on_cpu = linked->rt_param.scheduled_on; | ||
830 | if (on_cpu != NO_CPU) { | ||
831 | sched = &per_cpu(cpu_entries, on_cpu); | ||
832 | /* This should only happen if not linked already */ | ||
833 | BUG_ON(sched->linked == linked); | ||
834 | |||
835 | if (entry != sched && | ||
836 | sched->linked && is_hrt(sched->linked)) { | ||
837 | /* We are already scheduled on a CPU with an HRT */ | ||
838 | TRACE_TASK_SUB(linked, | ||
839 | "cannot move to scheduled CPU P%d", | ||
840 | sched->cpu); | ||
841 | |||
842 | requeue_server(next_server, litmus_clock()); | ||
843 | requeue(linked, get_rt_domain(entry, linked)); | ||
844 | |||
845 | linked = NULL; | ||
846 | next_server = NULL; | ||
847 | } else if (entry != sched) { | ||
848 | /* Link to the CPU we are scheduled on by swapping | ||
849 | * with that CPU's linked task. | ||
850 | */ | ||
851 | BUG_ON(is_hrt(linked)); | ||
852 | |||
853 | TRACE_TASK_SUB(linked,"already scheduled on P%d", | ||
854 | sched->cpu); | ||
855 | |||
856 | tmp_task = sched->linked; | ||
857 | tmp_server = sched->linked_server; | ||
858 | |||
859 | if (tmp_task) | ||
860 | unlink_server(sched, 0); | ||
861 | |||
862 | linked->rt_param.linked_on = sched->cpu; | ||
863 | sched->linked = linked; | ||
864 | link_server(sched, next_server); | ||
865 | |||
866 | update_cpu_position(sched); | ||
867 | |||
868 | linked = tmp_task; | ||
869 | next_server = tmp_server; | ||
870 | } | ||
871 | } | ||
872 | if (linked) /* Might be NULL due to swap */ | ||
873 | linked->rt_param.linked_on = entry->cpu; | ||
874 | } | ||
875 | entry->linked = linked; | ||
876 | link_server(entry, next_server); | ||
877 | update_cpu_position(entry); | ||
878 | |||
879 | BUG_ON(!entry->linked && entry->linked_server); | ||
880 | |||
881 | if (linked) | ||
882 | TRACE_TASK_SERVER_SUB(linked, next_server, | ||
883 | "linked to %d", entry->cpu); | ||
884 | else | ||
885 | TRACE_SUB("NULL linked to %d", entry->cpu); | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * Grab the local HRT or global SRT or BE domain for the task. | ||
890 | */ | ||
891 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, | ||
892 | struct task_struct *task) | ||
893 | { | ||
894 | if (is_hrt(task)) | ||
895 | return &entry->hrt_server.hrt_domain; | ||
896 | else if (is_srt(task)) | ||
897 | return &srt_domain; | ||
898 | else /* BE */ | ||
899 | return &be_domain; | ||
900 | } | ||
901 | |||
902 | /* | ||
903 | * Ensures the task is not linked anywhere nor present in any ready queues. | ||
904 | */ | ||
905 | static noinline void unlink(struct task_struct* t) | ||
906 | { | ||
907 | cpu_entry_t *entry; | ||
908 | |||
909 | BUG_ON(!t); | ||
910 | |||
911 | if (t->rt_param.linked_on != NO_CPU) { | ||
912 | /* Unlink */ | ||
913 | entry = task_linked_entry(t); | ||
914 | link_to_cpu(entry, NULL, NULL); | ||
915 | } else if (is_queued(t)) { | ||
916 | entry = task_sched_entry(t); | ||
917 | |||
918 | /* A task that is unlinked due to a slack server must be treated | ||
919 | * differently. It is probably queued in a release_queue, but | ||
920 | * a race condition could allow is_released() to return true | ||
921 | * even when the task has not yet been released. Attempting | ||
922 | * to remove the task in this case would be disastrous. | ||
923 | */ | ||
924 | if (entry->scheduled == t && | ||
925 | entry->scheduled_server && /* Can be NULL on task_new */ | ||
926 | entry->scheduled_server->type == S_SLACK) { | ||
927 | |||
928 | TRACE_TASK_SUB(t, "unlinked on slack server"); | ||
929 | |||
930 | } else if (is_released(t, litmus_clock())) { | ||
931 | /* This is an interesting situation: t is scheduled, | ||
932 | * but has already been unlinked. It was re-added to | ||
933 | * a ready queue of some sort but now needs to | ||
934 | * be removed. This usually happens when a job has | ||
935 | * been preempted but completes before it is | ||
936 | * descheduled. | ||
937 | */ | ||
938 | TRACE_TASK_SUB(t, "removing from domain"); | ||
939 | remove(get_rt_domain(entry, t), t); | ||
940 | BUG_ON(is_queued(t)); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | if (head_in_list(&task_data(t)->candidate_list)) { | ||
945 | list_del_init(&task_data(t)->candidate_list); | ||
946 | } | ||
947 | |||
948 | } | ||
949 | |||
950 | /* | ||
951 | * A job generated by a HRT task is eligible if either the job's deadline | ||
952 | * is earlier than the server's next deadline, or the server has zero slack | ||
953 | * time in its current period. | ||
954 | */ | ||
955 | static inline int is_eligible(struct task_struct *task, | ||
956 | hrt_server_t *hrt_server) | ||
957 | { | ||
958 | TRACE_TASK_SUB(task, "%d %d %llu %llu", | ||
959 | hrt_server->ready, hrt_server->no_slack, | ||
960 | hrt_server->server.deadline, | ||
961 | get_deadline(task)); | ||
962 | return hrt_server->ready && !is_server_linked(&hrt_server->server) && | ||
963 | (hrt_server->no_slack || | ||
964 | lt_after_eq(hrt_server->server.deadline, get_deadline(task))); | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * Set the server to release at the closest preceding deadline to time. | ||
969 | */ | ||
970 | static inline void catchup_server(server_t *server, lt_t time) | ||
971 | { | ||
972 | lt_t diff, sub; | ||
973 | |||
974 | diff = time - server->deadline; | ||
975 | sub = diff % server->period; | ||
976 | |||
977 | server_release_at(server, time - sub); | ||
978 | TRACE_SERVER_SUB(server, "catching up to %llu", time); | ||
979 | } | ||
980 | |||
981 | static noinline int catchup_srt_server(struct task_struct *task) | ||
982 | { | ||
983 | int jobs, rv = 0; | ||
984 | lt_t release; | ||
985 | lt_t now = litmus_clock(); | ||
986 | server_t *srt_server = task_srt_server(task); | ||
987 | |||
988 | if (lt_before(srt_server->deadline, now) && | ||
989 | srt_server->job_no > 1) { | ||
990 | /* Calculate the number of jobs behind the server is */ | ||
991 | jobs = lt_subtract(now, srt_server->deadline) / | ||
992 | srt_server->period + 1; | ||
993 | |||
994 | /* Get the new release */ | ||
995 | release = srt_server->release + jobs * srt_server->period; | ||
996 | |||
997 | TRACE_SERVER_SUB(srt_server, "catching up to %llu, job %d", | ||
998 | release, srt_server->job_no + jobs); | ||
999 | |||
1000 | BUG_ON(jobs < 1); | ||
1001 | |||
1002 | /* Update server state */ | ||
1003 | server_release_at(srt_server, release); | ||
1004 | srt_server->job_no += jobs - 1; | ||
1005 | |||
1006 | /* Force task to take characteristics of server */ | ||
1007 | tsk_rt(task)->job_params.release = srt_server->release; | ||
1008 | tsk_rt(task)->job_params.deadline = srt_server->deadline; | ||
1009 | |||
1010 | rv = 1; | ||
1011 | |||
1012 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1013 | |||
1014 | } else if (lt_before(srt_server->deadline, now) && | ||
1015 | srt_server->job_no <= 1) { | ||
1016 | |||
1017 | server_release_at(srt_server, get_release(task)); | ||
1018 | srt_server->job_no = task_job_no(task); | ||
1019 | } | ||
1020 | |||
1021 | BUG_ON(srt_server->job_no == 0); | ||
1022 | |||
1023 | return rv; | ||
1024 | } | ||
1025 | |||
1026 | /* | ||
1027 | * If the server is eligible, return the next eligible job. If the server is | ||
1028 | * ineligible or there are no eligible jobs, returns NULL. This will re-release | ||
1029 | * any servers that are behind. | ||
1030 | */ | ||
1031 | static noinline struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | ||
1032 | { | ||
1033 | lt_t now = litmus_clock(); | ||
1034 | lt_t dead, slack, budget; | ||
1035 | struct task_struct *task = __peek_ready(&hrt_server->hrt_domain); | ||
1036 | |||
1037 | /* Catch up server if it is initialized, not running, and late */ | ||
1038 | if (check_hrt_server_initialized(hrt_server) && | ||
1039 | !is_server_linked(&hrt_server->server)) { | ||
1040 | |||
1041 | dead = hrt_server->server.deadline; | ||
1042 | budget = hrt_server->server.budget; | ||
1043 | slack = lt_subtract(dead, budget); | ||
1044 | |||
1045 | TRACE_SERVER_SUB(&hrt_server->server, "dead: %llu, budget: %llu" | ||
1046 | "now: %llu, slack: %llu", | ||
1047 | TIME(dead), TIME(budget), TIME(now), TIME(slack)); | ||
1048 | |||
1049 | if (!head_in_list(&hrt_server->server.release_list) && | ||
1050 | lt_before_eq(dead, now)) { | ||
1051 | /* The server missed a release */ | ||
1052 | catchup_server(&hrt_server->server, now); | ||
1053 | TRACE_SERVER_SUB(&hrt_server->server, "now ready"); | ||
1054 | hrt_server->ready = 1; | ||
1055 | remove_slack(server_slack(&hrt_server->server)); | ||
1056 | hrt_server->no_slack = 0; | ||
1057 | |||
1058 | slack = lt_subtract(hrt_server->server.deadline, | ||
1059 | hrt_server->server.budget); | ||
1060 | |||
1061 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1062 | } | ||
1063 | |||
1064 | /* If the slack timer is active, this is not necessary */ | ||
1065 | if (!hrtimer_active(&hrt_server->slack_timer) && hrt_server->ready) { | ||
1066 | if (lt_before_eq(slack, now) && !hrt_server->no_slack) { | ||
1067 | /* The server missed the shift to no slack */ | ||
1068 | TRACE_SERVER_SUB(&hrt_server->server, "no slack: %llu", | ||
1069 | TIME(slack)); | ||
1070 | hrt_server->no_slack = 1; | ||
1071 | ////sched_trace_action(task, NO_SLACK_ACTION); | ||
1072 | } else { | ||
1073 | slack_timer_arm(hrt_server); | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | } else { | ||
1078 | TRACE_SERVER_SUB(&hrt_server->server, "%llu %d %llu %d %d", | ||
1079 | hrt_server->server.deadline, | ||
1080 | is_server_linked(&hrt_server->server), | ||
1081 | now, check_hrt_server_initialized(hrt_server), | ||
1082 | !is_server_linked(&hrt_server->server)); | ||
1083 | } | ||
1084 | |||
1085 | if (!hrt_server->server.budget || | ||
1086 | (task && !is_eligible(task, hrt_server))) { | ||
1087 | |||
1088 | if (!hrt_server->server.budget && | ||
1089 | !head_in_list(&hrt_server->server.release_list)) { | ||
1090 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); | ||
1091 | catchup_server(&hrt_server->server, now); | ||
1092 | requeue_server(&hrt_server->server, now); | ||
1093 | slack_timer_arm(hrt_server); | ||
1094 | } | ||
1095 | |||
1096 | if (task) { | ||
1097 | TRACE_TASK_SUB(task, "not eligible, budget: %llu", | ||
1098 | TIME(hrt_server->server.budget)); | ||
1099 | } | ||
1100 | task = NULL; | ||
1101 | |||
1102 | /* Donate slack if we have nothing to schedule */ | ||
1103 | if (hrt_server->ready && hrt_server->no_slack) { | ||
1104 | check_donate_slack(&hrt_server->server, NULL); | ||
1105 | } | ||
1106 | } | ||
1107 | |||
1108 | return task; | ||
1109 | } | ||
1110 | |||
1111 | /* | ||
1112 | * This will catch up the SRT's server if it is behind. | ||
1113 | */ | ||
1114 | static noinline struct task_struct* next_eligible_srt(void) | ||
1115 | { | ||
1116 | int done = 0; | ||
1117 | struct task_struct *next_srt; | ||
1118 | |||
1119 | while (!done) { | ||
1120 | next_srt = __peek_ready(&srt_domain); | ||
1121 | |||
1122 | /* A blocking task might pollute the SRT domain if the | ||
1123 | * task blocked while it was being run by a slack server. | ||
1124 | * Remove and ignore this task. | ||
1125 | */ | ||
1126 | while (next_srt && (get_rt_flags(next_srt) == RT_F_BLOCK || | ||
1127 | unlikely(!is_realtime(next_srt)) || | ||
1128 | tsk_rt(next_srt)->linked_on != NO_CPU)) { | ||
1129 | TRACE_TASK_SUB(next_srt, "removing finished task"); | ||
1130 | remove(&srt_domain, next_srt); | ||
1131 | next_srt = __peek_ready(&srt_domain); | ||
1132 | } | ||
1133 | |||
1134 | /* If the task blocked for awhile or has otherwise not been | ||
1135 | * accessed, its server could have fallen behind. | ||
1136 | */ | ||
1137 | if (next_srt) { | ||
1138 | done = !catchup_srt_server(next_srt); | ||
1139 | |||
1140 | /* The parameters were modified. Re-insert the task. */ | ||
1141 | if (!done) { | ||
1142 | remove(&srt_domain, next_srt); | ||
1143 | __add_ready(&srt_domain, next_srt); | ||
1144 | } else if (is_server_linked(task_srt_server(next_srt))){ | ||
1145 | remove(&srt_domain, next_srt); | ||
1146 | done = 0; | ||
1147 | } | ||
1148 | } else { | ||
1149 | done = 1; | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | return next_srt; | ||
1154 | } | ||
1155 | |||
1156 | static inline server_t* next_be_server(void) | ||
1157 | { | ||
1158 | struct bheap_node *hn = bheap_peek(server_order, &be_ready_servers); | ||
1159 | return (hn) ? hn->value : NULL; | ||
1160 | } | ||
1161 | |||
1162 | static noinline server_t* next_eligible_be_server(void) | ||
1163 | { | ||
1164 | server_t *be_server = next_be_server(); | ||
1165 | lt_t now = litmus_clock(); | ||
1166 | |||
1167 | /* Catch up any late be servers. This happens when the servers could | ||
1168 | * not find tasks to schedule or if the system is overutilized. | ||
1169 | */ | ||
1170 | while (be_server && (lt_before_eq(be_server->deadline, now) || | ||
1171 | is_server_linked(be_server))) { | ||
1172 | if (!be_server->deadline) { | ||
1173 | TRACE_SERVER_SUB(be_server, "not intialized"); | ||
1174 | return NULL; | ||
1175 | } | ||
1176 | bheap_delete(server_order, &be_ready_servers, | ||
1177 | be_server->hn); | ||
1178 | |||
1179 | if (is_server_linked(be_server)) { | ||
1180 | TRACE_SERVER_SUB(be_server, "linked"); | ||
1181 | be_server = next_be_server(); | ||
1182 | return NULL; | ||
1183 | } | ||
1184 | |||
1185 | catchup_server(be_server, now); | ||
1186 | check_donate_slack(be_server, NULL); | ||
1187 | bheap_insert(server_order, &be_ready_servers, | ||
1188 | be_server->hn); | ||
1189 | be_server = next_be_server(); | ||
1190 | TRACE_SERVER_SUB(be_server, "catching up BE server"); | ||
1191 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); /* Release */ | ||
1192 | } | ||
1193 | |||
1194 | if (be_server && lt_before(now, be_server->release)) { | ||
1195 | TRACE_SERVER_SUB(be_server, "not released"); | ||
1196 | be_server = NULL; | ||
1197 | } | ||
1198 | |||
1199 | if (be_server) { | ||
1200 | TRACE_SERVER_SUB(be_server, "dead: %llu, rel: %llu, budget: %llu", | ||
1201 | be_server->deadline, be_server->release, | ||
1202 | be_server->budget); | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | return be_server; | ||
1207 | } | ||
1208 | |||
1209 | /* | ||
1210 | * Adds a task to the appropriate queue (ready / release) in a domain. | ||
1211 | */ | ||
1212 | static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | ||
1213 | { | ||
1214 | lt_t now = litmus_clock(); | ||
1215 | int was_added; | ||
1216 | |||
1217 | BUG_ON(!is_realtime(task)); | ||
1218 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1219 | list_del_init(&task_data(task)->candidate_list); | ||
1220 | } | ||
1221 | |||
1222 | check_slack_candidate(task); | ||
1223 | |||
1224 | if (is_queued(task)) { | ||
1225 | TRACE_TASK_SUB(task, "not requeueing, already queued"); | ||
1226 | } else if (is_released(task, now)) { | ||
1227 | TRACE_TASK_SUB(task, "requeuing on ready %llu %llu %llu %llu", | ||
1228 | get_release(task), get_deadline(task), | ||
1229 | get_rt_period(task), now); | ||
1230 | __add_ready(domain, task); | ||
1231 | } else { | ||
1232 | /* Task needs to wait until it is released */ | ||
1233 | TRACE_TASK_SUB(task, "requeuing on release"); | ||
1234 | |||
1235 | was_added = add_release(domain, task); | ||
1236 | |||
1237 | /* The release time happened before we added ourselves | ||
1238 | * to the heap. We can now add to ready. | ||
1239 | */ | ||
1240 | if (!was_added) { | ||
1241 | TRACE_TASK_SUB(task, "missed release, going to ready"); | ||
1242 | __add_ready(domain, task); | ||
1243 | } | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | static inline void earlier_server_task(server_t *first, | ||
1248 | struct task_struct *first_task, | ||
1249 | server_t *second, | ||
1250 | struct task_struct *second_task, | ||
1251 | server_t **server, | ||
1252 | struct task_struct **task) | ||
1253 | { | ||
1254 | if (!first || | ||
1255 | (second && lt_before_eq(second->deadline, first->deadline))) { | ||
1256 | *server = second; | ||
1257 | *task = second_task; | ||
1258 | } else { | ||
1259 | *server = first; | ||
1260 | *task = first_task; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | /* | ||
1265 | * Set server and task to the next server and task respectively. | ||
1266 | * If entry is not null, the next server will see if it can schedule | ||
1267 | * entry's linked task. | ||
1268 | */ | ||
1269 | static void next_global_task(cpu_entry_t *entry, | ||
1270 | server_t **next_server, | ||
1271 | struct task_struct **next_task) | ||
1272 | { | ||
1273 | struct task_struct *next_srt, *next_be, *next_slack; | ||
1274 | server_t *be_server, *slack_server, *srt_server; | ||
1275 | |||
1276 | *next_server = NULL; | ||
1277 | *next_task = NULL; | ||
1278 | |||
1279 | next_srt = next_eligible_srt(); | ||
1280 | srt_server = (next_srt) ? task_srt_server(next_srt) : NULL; | ||
1281 | |||
1282 | next_be = __peek_ready(&be_domain); | ||
1283 | be_server = next_eligible_be_server(); | ||
1284 | |||
1285 | next_slack = next_eligible_slack(); | ||
1286 | slack_server = next_eligible_slack_server(); | ||
1287 | |||
1288 | TRACE_SUB("be_server: %d, next_be: %d, next_srt: %d, slack_server: %d " | ||
1289 | "next_slack: %d", (be_server) ? be_server->id : -1, | ||
1290 | (next_be) ? next_be->pid : -1, | ||
1291 | (next_srt) ? next_srt->pid : -1, | ||
1292 | (slack_server) ? slack_server->id : -1, | ||
1293 | (next_slack) ? next_slack->pid : -1); | ||
1294 | |||
1295 | /* Check if the servers can schedule the task linked to entry */ | ||
1296 | if (entry && entry->linked) { | ||
1297 | |||
1298 | if (entry->linked_server->type == S_BE && | ||
1299 | (!next_be || | ||
1300 | lt_before(get_release(entry->linked), | ||
1301 | get_release(next_be)))) { | ||
1302 | |||
1303 | next_be = entry->linked; | ||
1304 | } else if (entry->linked_server->type == S_SLACK && | ||
1305 | (!next_slack || | ||
1306 | lt_before(get_deadline(entry->linked), | ||
1307 | get_deadline(next_slack)))) { | ||
1308 | |||
1309 | next_slack = entry->linked; | ||
1310 | } | ||
1311 | } | ||
1312 | |||
1313 | /* Remove tasks without servers and vice versa from contention */ | ||
1314 | if (!next_be || !be_server) { | ||
1315 | next_be = NULL; | ||
1316 | be_server = NULL; | ||
1317 | } | ||
1318 | if (!next_slack || !slack_server) { | ||
1319 | next_slack = NULL; | ||
1320 | slack_server = NULL; | ||
1321 | } | ||
1322 | |||
1323 | /* Favor BE servers. If we don't, then a BE server might lose | ||
1324 | * out to its own slack. | ||
1325 | */ | ||
1326 | if (slack_server && be_server && | ||
1327 | be_server->deadline == slack_server->deadline) { | ||
1328 | next_slack = NULL; | ||
1329 | slack_server = NULL; | ||
1330 | } | ||
1331 | |||
1332 | /* There is probably a better way to do this */ | ||
1333 | earlier_server_task(srt_server, next_srt, | ||
1334 | be_server, next_be, | ||
1335 | next_server, next_task); | ||
1336 | earlier_server_task(*next_server, *next_task, | ||
1337 | slack_server, next_slack, | ||
1338 | next_server, next_task); | ||
1339 | |||
1340 | //BUG_ON(*next_server && lt_before(litmus_clock(), *next_server->release)); | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * Remove the task and server from any ready queues. | ||
1345 | */ | ||
1346 | static void remove_from_ready(server_t *server, struct task_struct *task, | ||
1347 | cpu_entry_t *entry) | ||
1348 | { | ||
1349 | server_t *slack; | ||
1350 | rt_domain_t *domain; | ||
1351 | BUG_ON(!server); | ||
1352 | BUG_ON(!entry); | ||
1353 | BUG_ON(!task); | ||
1354 | |||
1355 | if (server->type == S_SLACK) { | ||
1356 | TRACE_SERVER_SUB(server, "removed from slack list"); | ||
1357 | list_del_init(&server->list); | ||
1358 | |||
1359 | /* Remove from consideration of BE servers */ | ||
1360 | if (is_be(task) && is_queued(task)) { | ||
1361 | TRACE_TASK_SUB(task, "BE removed from ready"); | ||
1362 | remove(&be_domain, task); | ||
1363 | } | ||
1364 | |||
1365 | /* Remove from consideration of slack servers */ | ||
1366 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1367 | TRACE_TASK_SUB(task, "deleting candidate"); | ||
1368 | list_del_init(&task_data(task)->candidate_list); | ||
1369 | } | ||
1370 | } else { | ||
1371 | slack = server_slack(server); | ||
1372 | if (slack && head_in_list(&slack->list)) { | ||
1373 | remove_slack(slack); | ||
1374 | } | ||
1375 | if (server->type == S_BE) { | ||
1376 | TRACE_SERVER_SUB(server, "server removed from ready"); | ||
1377 | BUG_ON(!server->hn); | ||
1378 | bheap_delete(server_order, &be_ready_servers, | ||
1379 | server->hn); | ||
1380 | } | ||
1381 | if (is_queued(task)) { | ||
1382 | domain = get_rt_domain(entry, task); | ||
1383 | BUG_ON(!domain); | ||
1384 | TRACE_TASK_SUB(task, "removed from ready"); | ||
1385 | remove(domain, task); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | BUG_ON(!task_data(task)); | ||
1390 | } | ||
1391 | |||
1392 | static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int); | ||
1393 | |||
1394 | /* | ||
1395 | * Finds and links the next server and task to an entry with no linked task. | ||
1396 | */ | ||
1397 | static void edf_hsb_pick_next(cpu_entry_t *entry) | ||
1398 | { | ||
1399 | struct task_struct *next_task, *linked; | ||
1400 | server_t *next_server; | ||
1401 | |||
1402 | BUG_ON(entry->linked); | ||
1403 | |||
1404 | next_task = next_eligible_hrt(&entry->hrt_server); | ||
1405 | if (next_task) | ||
1406 | next_server = &entry->hrt_server.server; | ||
1407 | else | ||
1408 | next_global_task(NULL, &next_server, &next_task); | ||
1409 | |||
1410 | |||
1411 | if (next_task) { | ||
1412 | remove_from_ready(next_server, next_task, entry); | ||
1413 | check_for_slack_preempt(next_task, next_server, entry, 1); | ||
1414 | TRACE_TASK_SERVER_SUB(next_task, next_server, | ||
1415 | "removing and picked"); | ||
1416 | |||
1417 | /* A slack preemption could cause something that was already | ||
1418 | * running to be 'swapped' to this CPU in link_to_cpu. | ||
1419 | */ | ||
1420 | if (entry->linked) { | ||
1421 | linked = entry->linked; | ||
1422 | unlink(entry->linked); | ||
1423 | requeue(linked, get_rt_domain(entry, linked)); | ||
1424 | TRACE_TASK_SUB(linked, "preempted next pick"); | ||
1425 | } | ||
1426 | link_to_cpu(entry, next_task, next_server); | ||
1427 | } | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * Preempt the currently running server and task with new ones. | ||
1432 | * It is possible that either only the server or the task is different here. | ||
1433 | */ | ||
1434 | static void preempt(cpu_entry_t *entry, struct task_struct *next, | ||
1435 | server_t *next_server, int slack_resched) | ||
1436 | { | ||
1437 | struct task_struct *linked; | ||
1438 | rt_domain_t *domain; | ||
1439 | |||
1440 | TRACE_TASK_SERVER_SUB(next, next_server, | ||
1441 | "preempting on P%d", entry->cpu); | ||
1442 | |||
1443 | remove_from_ready(next_server, next, entry); | ||
1444 | |||
1445 | check_for_slack_preempt(next, next_server, entry, slack_resched); | ||
1446 | linked = entry->linked; | ||
1447 | link_to_cpu(entry, next, next_server); | ||
1448 | |||
1449 | /* No need for this if only the server was preempted */ | ||
1450 | if (!linked || linked != entry->linked) { | ||
1451 | if (linked) { | ||
1452 | domain = get_rt_domain(entry, linked); | ||
1453 | requeue(linked, domain); | ||
1454 | } | ||
1455 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1456 | } | ||
1457 | } | ||
1458 | |||
1459 | /* | ||
1460 | * Causes a preemption if: | ||
1461 | * 1. task is being run by a slack server on a different CPU | ||
1462 | * 2. slack donated by server is running a task on a different CPU | ||
1463 | */ | ||
1464 | static noinline void check_for_slack_preempt(struct task_struct *task, | ||
1465 | server_t *server, | ||
1466 | cpu_entry_t *next_entry, | ||
1467 | int resched) | ||
1468 | { | ||
1469 | cpu_entry_t *entry = NULL; | ||
1470 | server_t *slack = server_slack(server); | ||
1471 | struct task_struct *slack_task; | ||
1472 | |||
1473 | /* The task is currently being run by another server */ | ||
1474 | if (tsk_rt(task)->linked_on != NO_CPU) { | ||
1475 | entry = task_linked_entry(task); | ||
1476 | |||
1477 | if (entry != next_entry) { | ||
1478 | TRACE_TASK_SUB(task, "was on P%d", entry->cpu); | ||
1479 | |||
1480 | unlink(task); | ||
1481 | |||
1482 | /* if (resched) { */ | ||
1483 | /* edf_hsb_pick_next(entry); */ | ||
1484 | /* preempt_if_preemptable(entry->scheduled, entry->cpu); */ | ||
1485 | /* } */ | ||
1486 | } | ||
1487 | } | ||
1488 | |||
1489 | /* The server's slack is currently being run */ | ||
1490 | if (slack && is_server_linked(slack)) { | ||
1491 | entry = &per_cpu(cpu_entries, slack->cpu); | ||
1492 | slack_task = server_task(slack); | ||
1493 | |||
1494 | unlink(slack_task); | ||
1495 | remove_slack(slack); | ||
1496 | requeue(slack_task, get_rt_domain(entry, slack_task)); | ||
1497 | |||
1498 | if (entry != next_entry && resched) { | ||
1499 | TRACE_SERVER_SUB(slack, "was on P%d", entry->cpu); | ||
1500 | /* Force a reschedule */ | ||
1501 | edf_hsb_pick_next(entry); | ||
1502 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1503 | } else { | ||
1504 | /* This can only happen on a preemption. If a preemption | ||
1505 | * happens, the task will be requeued elsewhere. | ||
1506 | * Obviously the next task has already been chosen. | ||
1507 | */ | ||
1508 | TRACE_SERVER_SUB(slack, "was on local P%d", entry->cpu); | ||
1509 | } | ||
1510 | } | ||
1511 | } | ||
1512 | |||
1513 | /* | ||
1514 | * Check for any necessary non-hrt preemptions. | ||
1515 | */ | ||
1516 | static void check_for_global_preempt(void) | ||
1517 | { | ||
1518 | cpu_entry_t *entry, *sched; | ||
1519 | server_t *next_server; | ||
1520 | int on_cpu; | ||
1521 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ | ||
1522 | |||
1523 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { | ||
1524 | /* HRT cpus should not be in this heap */ | ||
1525 | BUG_ON(entry->linked && is_hrt(entry->linked)); | ||
1526 | |||
1527 | next_global_task(entry, &next_server, &next_task); | ||
1528 | |||
1529 | if (!next_server) | ||
1530 | break; | ||
1531 | |||
1532 | /* Preempt only if we have an earlier deadline */ | ||
1533 | if (entry->linked && | ||
1534 | !lt_before(next_server->deadline, | ||
1535 | entry->linked_server->deadline)) { | ||
1536 | break; | ||
1537 | } | ||
1538 | |||
1539 | /* If we are scheduled on another CPU, the link code | ||
1540 | * will force us to link to that CPU and try and link | ||
1541 | * that CPU's task to this CPU. This is impossible | ||
1542 | * if that CPU has linked HRT tasks which cannot | ||
1543 | * migrate. | ||
1544 | */ | ||
1545 | on_cpu = next_task->rt_param.scheduled_on; | ||
1546 | if (on_cpu != NO_CPU) { | ||
1547 | sched = &per_cpu(cpu_entries, on_cpu); | ||
1548 | |||
1549 | if (sched != entry && sched->linked && | ||
1550 | is_hrt(sched->linked)) { | ||
1551 | |||
1552 | TRACE_TASK_SUB(next_task, | ||
1553 | "Already on P%d", | ||
1554 | sched->cpu); | ||
1555 | break; | ||
1556 | } | ||
1557 | } | ||
1558 | |||
1559 | /* We do not reschedule if this causes a slack preemption | ||
1560 | * because we will detect if we should reschedule on the | ||
1561 | * next iteration of the loop. | ||
1562 | */ | ||
1563 | preempt(entry, next_task, next_server, | ||
1564 | 0 /* Don't reschedule on a slack preemption */); | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * Correct local link after a change to the local HRT domain. | ||
1570 | */ | ||
1571 | static void check_for_hrt_preempt(cpu_entry_t *entry) | ||
1572 | { | ||
1573 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1574 | struct task_struct *next_hrt = next_eligible_hrt(hrt_server); | ||
1575 | |||
1576 | if (next_hrt && | ||
1577 | (!entry->linked || !is_hrt(entry->linked) || | ||
1578 | !is_eligible(entry->linked, hrt_server) || | ||
1579 | edf_preemption_needed(&hrt_server->hrt_domain, entry->linked))) { | ||
1580 | |||
1581 | preempt(entry, next_hrt, &hrt_server->server, 1); | ||
1582 | |||
1583 | } else { | ||
1584 | TRACE_SERVER_SUB(&hrt_server->server, "not HRT preempting"); | ||
1585 | } | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Assumes called with local irqs disabled. | ||
1590 | */ | ||
1591 | static void job_arrival(struct task_struct *task, cpu_entry_t *entry) | ||
1592 | { | ||
1593 | int was_empty; | ||
1594 | |||
1595 | BUG_ON(task_cpu(task) == NO_CPU); | ||
1596 | |||
1597 | TRACE_TASK_SUB(task, "arriving on P%d", entry->cpu); | ||
1598 | |||
1599 | if (is_hrt(task)) { | ||
1600 | requeue(task, &entry->hrt_server.hrt_domain); | ||
1601 | check_for_hrt_preempt(entry); | ||
1602 | } else if (is_srt(task)) { | ||
1603 | requeue(task, &srt_domain); | ||
1604 | check_for_global_preempt(); | ||
1605 | } else /* BE */ { | ||
1606 | was_empty = !__jobs_pending(&be_domain); | ||
1607 | requeue(task, &be_domain); | ||
1608 | |||
1609 | /* Only way this could cause a preemption is if an eligible | ||
1610 | * BE server could not queue up a task. | ||
1611 | */ | ||
1612 | if (was_empty && __jobs_pending(&be_domain)) | ||
1613 | check_for_global_preempt(); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | /****************************************************************************** | ||
1618 | * Timer methods | ||
1619 | ******************************************************************************/ | ||
1620 | |||
1621 | /* | ||
1622 | * Merges a group of released HRT tasks into a ready queue and checks | ||
1623 | * for preeemptions. | ||
1624 | */ | ||
1625 | static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1626 | { | ||
1627 | unsigned long flags; | ||
1628 | struct task_struct *first; | ||
1629 | cpu_entry_t *entry; | ||
1630 | |||
1631 | raw_spin_lock_irqsave(global_lock, flags); | ||
1632 | |||
1633 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; | ||
1634 | entry = task_sched_entry(first); | ||
1635 | |||
1636 | BUG_ON(!first || !is_hrt(first)); | ||
1637 | TRACE_TASK(first, "HRT tasks released at %llu on P%d\n", | ||
1638 | TIME(litmus_clock()), task_cpu(first)); | ||
1639 | |||
1640 | __merge_ready(domain, tasks); | ||
1641 | check_for_hrt_preempt(entry); | ||
1642 | |||
1643 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1644 | } | ||
1645 | |||
1646 | /* | ||
1647 | * Merges a group of released tasks into a ready queue and checks to see | ||
1648 | * if scheduled needs to be called. | ||
1649 | */ | ||
1650 | static void release_srt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1651 | { | ||
1652 | unsigned long flags; | ||
1653 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1654 | |||
1655 | raw_spin_lock_irqsave(global_lock, flags); | ||
1656 | |||
1657 | TRACE_TASK(first, "SRT tasks released at %llu\n", TIME(litmus_clock())); | ||
1658 | |||
1659 | __merge_ready(domain, tasks); | ||
1660 | check_for_global_preempt(); | ||
1661 | |||
1662 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1663 | } | ||
1664 | |||
1665 | /* | ||
1666 | * Merges a group of released tasks into a ready queue and checks to see | ||
1667 | * if scheduled needs to be called. | ||
1668 | */ | ||
1669 | static void release_be_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1670 | { | ||
1671 | unsigned long flags; | ||
1672 | int was_empty; | ||
1673 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1674 | |||
1675 | TRACE_TASK(first, "BE tasks released at %llu\n", TIME(litmus_clock()));; | ||
1676 | |||
1677 | raw_spin_lock_irqsave(global_lock, flags); | ||
1678 | |||
1679 | was_empty = !__jobs_pending(domain); | ||
1680 | __merge_ready(domain, tasks); | ||
1681 | if (was_empty) { | ||
1682 | /* Only way this could cause a preemption is if an BE server | ||
1683 | * could not find a task to run. | ||
1684 | */ | ||
1685 | check_for_global_preempt(); | ||
1686 | } | ||
1687 | |||
1688 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1689 | } | ||
1690 | |||
1691 | static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer) | ||
1692 | { | ||
1693 | unsigned long flags; | ||
1694 | hrt_server_t *server = container_of(timer, hrt_server_t, slack_timer); | ||
1695 | cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); | ||
1696 | |||
1697 | raw_spin_lock_irqsave(global_lock, flags); | ||
1698 | |||
1699 | TRACE_TIMER("slack timer fired for P%d", entry->cpu); | ||
1700 | BUG_ON(!server->ready); | ||
1701 | ////sched_trace_action(entry->linked, NO_SLACK_ACTION); | ||
1702 | |||
1703 | /* Set new state of entry */ | ||
1704 | server->no_slack = 1; | ||
1705 | check_for_hrt_preempt(entry); | ||
1706 | |||
1707 | /* Donate slack if the HRT server cannot run anything */ | ||
1708 | if (!entry->linked || !is_hrt(entry->linked)) { | ||
1709 | check_donate_slack(&server->server, NULL); | ||
1710 | check_for_global_preempt(); | ||
1711 | } | ||
1712 | |||
1713 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1714 | |||
1715 | return HRTIMER_NORESTART; | ||
1716 | } | ||
1717 | |||
1718 | static void job_completion(cpu_entry_t *entry, struct task_struct* task) | ||
1719 | { | ||
1720 | server_t *server = entry->linked_server; | ||
1721 | set_rt_flags(task, RT_F_SLEEP); | ||
1722 | |||
1723 | TRACE_TASK_SUB(task, "completed"); | ||
1724 | |||
1725 | unlink(task); | ||
1726 | check_donate_slack(server, task); | ||
1727 | |||
1728 | /* If a slack server completed an SRT task, the work for the | ||
1729 | * next job arrival has already been done. | ||
1730 | */ | ||
1731 | if (server->type == S_SLACK && is_srt(task)) { | ||
1732 | tsk_rt(task)->job_params.job_no++; | ||
1733 | sched_trace_task_release(task); | ||
1734 | TRACE_TASK_SERVER_SUB(task, server, "catching up SRT, " | ||
1735 | "rel: %llu, dead: %llu", | ||
1736 | TIME(get_release(task)), | ||
1737 | TIME(get_deadline(task))); | ||
1738 | } else if (server->type == S_SRT) { | ||
1739 | /* If the task is behind the server it must release immediately, | ||
1740 | * leaving its release time and deadline unchanged. | ||
1741 | */ | ||
1742 | if (server->job_no > tsk_rt(task)->job_params.job_no) { | ||
1743 | TRACE_TASK_SUB(task, "catching up"); | ||
1744 | tsk_rt(task)->job_params.job_no++; | ||
1745 | } else { | ||
1746 | /* Otherwise release them both */ | ||
1747 | prepare_for_next_period(task); | ||
1748 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1749 | TIME(get_release(task)), | ||
1750 | TIME(get_deadline(task))); | ||
1751 | server_release(server); | ||
1752 | } | ||
1753 | } else { | ||
1754 | prepare_for_next_period(task); | ||
1755 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1756 | TIME(get_release(task)), | ||
1757 | TIME(get_deadline(task))); | ||
1758 | } | ||
1759 | |||
1760 | if (is_released(task, litmus_clock())) | ||
1761 | sched_trace_task_release(task); | ||
1762 | |||
1763 | /* Don't requeue a blocking task */ | ||
1764 | if (is_running(task)) | ||
1765 | job_arrival(task, entry); | ||
1766 | |||
1767 | sched_trace_task_completion(task, 1); | ||
1768 | } | ||
1769 | |||
1770 | /* | ||
1771 | * Assumes called with local irqs disabled. | ||
1772 | */ | ||
1773 | static void server_completed(server_t *server, struct task_struct *task) | ||
1774 | { | ||
1775 | hrt_server_t *hrt_server; | ||
1776 | cpu_entry_t *entry = task_linked_entry(task); | ||
1777 | |||
1778 | BUG_ON(entry->linked != task); | ||
1779 | BUG_ON(entry->linked_server != server); | ||
1780 | |||
1781 | if (server->type == S_SRT) { | ||
1782 | TRACE_TASK_SUB(task, "must wait on server"); | ||
1783 | |||
1784 | /* The job must now take the priority and release time | ||
1785 | * of the next server. We do this so that we can still | ||
1786 | * use rt_domain and other handy methods to still work | ||
1787 | * with SRT jobs. Because this can ONLY happen if the | ||
1788 | * task's job number gets behind the server's, we can | ||
1789 | * easily detect the job catching up later. | ||
1790 | */ | ||
1791 | tsk_rt(task)->job_params.release = server->deadline; | ||
1792 | tsk_rt(task)->job_params.deadline = server->deadline + | ||
1793 | get_rt_period(task); | ||
1794 | TRACE_TASK_SUB(task, "waiting, new dead: %llu, new rel: %llu", | ||
1795 | TIME(get_deadline(task)), | ||
1796 | TIME(get_release(task))); | ||
1797 | |||
1798 | } else if (server->type == S_HRT) { | ||
1799 | /* Update state of HRT server */ | ||
1800 | hrt_server = container_of(server, hrt_server_t, server); | ||
1801 | hrt_server->ready = 0; | ||
1802 | TRACE_SERVER_SUB(server, "P%d no longer ready", entry->cpu); | ||
1803 | |||
1804 | if (hrtimer_active(&hrt_server->slack_timer)) | ||
1805 | slack_timer_cancel(hrt_server); | ||
1806 | } | ||
1807 | |||
1808 | if (server->type != S_SLACK) { | ||
1809 | server_release(server); | ||
1810 | } | ||
1811 | |||
1812 | sched_trace_action(task, SERVER_COMPLETED_ACTION); | ||
1813 | |||
1814 | unlink(task); | ||
1815 | requeue(task, get_rt_domain(entry, task)); | ||
1816 | |||
1817 | /* We know this CPU needs to pick its next task */ | ||
1818 | edf_hsb_pick_next(entry); | ||
1819 | |||
1820 | /* Only cause a reschedule if something new was scheduled. A task | ||
1821 | * could merely have swapped servers. | ||
1822 | */ | ||
1823 | if (entry->linked != task) | ||
1824 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1825 | else | ||
1826 | entry->scheduled_server = entry->linked_server; | ||
1827 | } | ||
1828 | |||
1829 | static void hrt_server_released(server_t *server) | ||
1830 | { | ||
1831 | hrt_server_t *hrt_server = container_of(server, hrt_server_t, server); | ||
1832 | cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
1833 | |||
1834 | BUG_ON(hrtimer_active(&hrt_server->slack_timer)); | ||
1835 | TRACE_SERVER_SUB(server, "HRT server released on P%d", entry->cpu); | ||
1836 | |||
1837 | hrt_server->no_slack = 0; | ||
1838 | hrt_server->ready = 1; | ||
1839 | remove_slack(server_slack(&hrt_server->server)); | ||
1840 | |||
1841 | check_for_hrt_preempt(entry); | ||
1842 | |||
1843 | /* Ensure slack timer is only running if the current | ||
1844 | * job is not HRT. | ||
1845 | */ | ||
1846 | if (entry->linked && is_hrt(entry->linked)) | ||
1847 | slack_timer_cancel(hrt_server); | ||
1848 | else | ||
1849 | slack_timer_arm(hrt_server); | ||
1850 | } | ||
1851 | |||
1852 | static void servers_released(struct list_head *servers) | ||
1853 | { | ||
1854 | int was_be = 0; | ||
1855 | unsigned long flags; | ||
1856 | struct list_head *pos, *safe; | ||
1857 | server_t *server; | ||
1858 | |||
1859 | raw_spin_lock_irqsave(global_lock, flags); | ||
1860 | |||
1861 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
1862 | TRACE_TIMER("Servers released"); | ||
1863 | |||
1864 | list_for_each_safe(pos, safe, servers) { | ||
1865 | server = list_entry(pos, server_t, release_list); | ||
1866 | |||
1867 | list_del_init(pos); | ||
1868 | |||
1869 | if (server->type == S_BE) { | ||
1870 | check_donate_slack(server, NULL); | ||
1871 | was_be = 1; | ||
1872 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
1873 | TRACE_SERVER_SUB(server, "inserting BE server"); | ||
1874 | bheap_insert(server_order, &be_ready_servers, | ||
1875 | server->hn); | ||
1876 | check_donate_slack(server, NULL); | ||
1877 | } else { /* HRT server */ | ||
1878 | hrt_server_released(server); | ||
1879 | } | ||
1880 | } | ||
1881 | |||
1882 | if (was_be) | ||
1883 | check_for_global_preempt(); | ||
1884 | |||
1885 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1886 | } | ||
1887 | |||
1888 | /****************************************************************************** | ||
1889 | * Server management methods | ||
1890 | ******************************************************************************/ | ||
1891 | |||
1892 | static int curr_be = 0; | ||
1893 | |||
1894 | /* | ||
1895 | * A BE server has been added in a proc entry. | ||
1896 | */ | ||
1897 | static int admit_be_server(unsigned long long wcet, | ||
1898 | unsigned long long period, | ||
1899 | int cpu) | ||
1900 | { | ||
1901 | int rv = 0; | ||
1902 | server_t *be_server; | ||
1903 | |||
1904 | if (cpu != NO_CPU) { | ||
1905 | rv = -EINVAL; | ||
1906 | goto out; | ||
1907 | } | ||
1908 | |||
1909 | be_server = server_alloc(GFP_ATOMIC); | ||
1910 | server_init(be_server, &server_domain, | ||
1911 | BE_SERVER_BASE + ++curr_be, | ||
1912 | wcet, period, 1); | ||
1913 | be_server->type = S_BE; | ||
1914 | server_slack_create(be_server); | ||
1915 | |||
1916 | TRACE_SERVER_SUB(be_server, "admitted BE server"); | ||
1917 | |||
1918 | list_add(&be_server->list, &be_servers); | ||
1919 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
1920 | |||
1921 | out: | ||
1922 | return rv; | ||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Output all BE servers to a proc entry. | ||
1927 | */ | ||
1928 | static void list_be_servers(server_proc_t *proc) | ||
1929 | { | ||
1930 | struct list_head *pos; | ||
1931 | server_t *be_server; | ||
1932 | |||
1933 | list_for_each(pos, &be_servers) { | ||
1934 | be_server = list_entry(pos, server_t, list); | ||
1935 | list_server(be_server, NO_CPU, proc); | ||
1936 | } | ||
1937 | } | ||
1938 | |||
1939 | /* | ||
1940 | * Halts and destroys all BE servers. | ||
1941 | */ | ||
1942 | static void stop_be_servers(void) | ||
1943 | { | ||
1944 | server_t *be_server; | ||
1945 | struct list_head *pos, *safe; | ||
1946 | |||
1947 | list_for_each_safe(pos, safe, &be_servers) { | ||
1948 | be_server = list_entry(pos, server_t, list); | ||
1949 | |||
1950 | list_del_init(pos); | ||
1951 | if (bheap_node_in_heap(be_server->hn)) | ||
1952 | bheap_delete(server_order, &be_ready_servers, | ||
1953 | be_server->hn); | ||
1954 | server_slack_destroy(be_server); | ||
1955 | server_destroy(be_server); | ||
1956 | server_free(be_server); | ||
1957 | } | ||
1958 | } | ||
1959 | |||
1960 | /* | ||
1961 | * An HRT server has been added in a proc entry. | ||
1962 | */ | ||
1963 | static int admit_hrt_server(unsigned long long wcet, | ||
1964 | unsigned long long period, | ||
1965 | int cpu) | ||
1966 | { | ||
1967 | cpu_entry_t *entry = &per_cpu(cpu_entries, cpu); | ||
1968 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1969 | struct hrtimer *slack_timer = &hrt_server->slack_timer; | ||
1970 | |||
1971 | server_init(&hrt_server->server, &server_domain, | ||
1972 | cpu, wcet, period, 1); | ||
1973 | server_slack_create(&hrt_server->server); | ||
1974 | hrt_server->no_slack = 0; | ||
1975 | hrt_server->ready = 1; | ||
1976 | hrt_server->server.type = S_HRT; | ||
1977 | |||
1978 | edf_domain_init(&hrt_server->hrt_domain, NULL, | ||
1979 | release_hrt_jobs); | ||
1980 | |||
1981 | hrtimer_init(slack_timer, | ||
1982 | CLOCK_MONOTONIC, | ||
1983 | HRTIMER_MODE_ABS); | ||
1984 | slack_timer->function = slack_timer_fire; | ||
1985 | |||
1986 | return 0; | ||
1987 | } | ||
1988 | |||
1989 | /* | ||
1990 | * Print all HRT servers to a proc entry. | ||
1991 | */ | ||
1992 | static void list_hrt_servers(server_proc_t *proc) | ||
1993 | { | ||
1994 | cpu_entry_t *entry; | ||
1995 | hrt_server_t *hrt_server; | ||
1996 | int cpu; | ||
1997 | |||
1998 | for_each_online_cpu(cpu) { | ||
1999 | entry = &per_cpu(cpu_entries, cpu); | ||
2000 | hrt_server = &entry->hrt_server; | ||
2001 | list_server(&hrt_server->server, cpu, proc); | ||
2002 | } | ||
2003 | } | ||
2004 | |||
2005 | /* | ||
2006 | * Stops all hrt server timers and resets all fields to 0. | ||
2007 | */ | ||
2008 | static void stop_hrt_servers(void) | ||
2009 | { | ||
2010 | int cpu; | ||
2011 | cpu_entry_t *entry; | ||
2012 | hrt_server_t *hrt_server; | ||
2013 | |||
2014 | for_each_online_cpu(cpu) { | ||
2015 | entry = &per_cpu(cpu_entries, cpu); | ||
2016 | hrt_server = &entry->hrt_server; | ||
2017 | |||
2018 | if (hrt_server->server.data) | ||
2019 | server_slack_destroy(&hrt_server->server); | ||
2020 | slack_timer_cancel(hrt_server); | ||
2021 | |||
2022 | hrt_server->no_slack = 0; | ||
2023 | hrt_server->ready = 0; | ||
2024 | hrt_server->server.period = 0; | ||
2025 | hrt_server->server.wcet = 0; | ||
2026 | } | ||
2027 | } | ||
2028 | |||
2029 | /* | ||
2030 | * Starts timers used to manage servers. | ||
2031 | */ | ||
2032 | static void start_servers(lt_t time) | ||
2033 | { | ||
2034 | int cpu; | ||
2035 | cpu_entry_t *entry; | ||
2036 | server_t *server; | ||
2037 | server_t *be_server; | ||
2038 | struct list_head *pos; | ||
2039 | |||
2040 | TRACE_SUB("starting servers at %llu", time); | ||
2041 | |||
2042 | /* Start HRT servers */ | ||
2043 | for_each_online_cpu(cpu) { | ||
2044 | entry = &per_cpu(cpu_entries, cpu); | ||
2045 | server = &entry->hrt_server.server; | ||
2046 | |||
2047 | if (!check_hrt_server_initialized(&entry->hrt_server)) | ||
2048 | goto loop_end; | ||
2049 | |||
2050 | /* Cause a catchup later */ | ||
2051 | server_release_at(server, time - server->period); | ||
2052 | entry->hrt_server.ready = 1; | ||
2053 | |||
2054 | TRACE("Setting up cpu %d to have timer deadline %llu\n", | ||
2055 | cpu, TIME(server->deadline)); | ||
2056 | loop_end: | ||
2057 | cpu = cpu; | ||
2058 | } | ||
2059 | |||
2060 | /* Start BE servers */ | ||
2061 | list_for_each(pos, &be_servers) { | ||
2062 | be_server = list_entry(pos, server_t, list); | ||
2063 | |||
2064 | if (!bheap_node_in_heap(be_server->hn)) | ||
2065 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
2066 | |||
2067 | /* Cause a catchup later */ | ||
2068 | server_release_at(be_server, time - be_server->period); | ||
2069 | |||
2070 | TRACE("Releasing BE server %d\n", be_server->id); | ||
2071 | TRACE_SERVER_SUB(be_server, "inserting be server"); | ||
2072 | } | ||
2073 | } | ||
2074 | |||
2075 | /****************************************************************************** | ||
2076 | * Plugin methods | ||
2077 | ******************************************************************************/ | ||
2078 | |||
2079 | static long edf_hsb_activate_plugin(void) | ||
2080 | { | ||
2081 | int cpu; | ||
2082 | cpu_entry_t *entry; | ||
2083 | #ifdef CONFIG_RELEASE_MASTER | ||
2084 | edf_hsb_release_master = atomic_read(&release_master_cpu); | ||
2085 | #else | ||
2086 | edf_hsb_release_master = NO_CPU; | ||
2087 | #endif | ||
2088 | server_domain.release_master = edf_hsb_release_master; | ||
2089 | |||
2090 | for_each_online_cpu(cpu) { | ||
2091 | entry = &per_cpu(cpu_entries, cpu); | ||
2092 | #ifdef CONFIG_RELEASE_MASTER | ||
2093 | if (cpu != edf_hsb_release_master) | ||
2094 | #endif | ||
2095 | update_cpu_position(entry); | ||
2096 | } | ||
2097 | |||
2098 | start_servers(litmus_clock()); | ||
2099 | |||
2100 | TRACE("activating EDF-HSB plugin.\n"); | ||
2101 | return 0; | ||
2102 | } | ||
2103 | |||
2104 | /* | ||
2105 | * Requires a processor be specified for any task run on the system. | ||
2106 | */ | ||
2107 | static long edf_hsb_admit_task(struct task_struct *task) | ||
2108 | { | ||
2109 | cpu_entry_t *entry = task_sched_entry(task); | ||
2110 | |||
2111 | TRACE_TASK(task, "Admitting\n"); | ||
2112 | |||
2113 | if (is_hrt(task)) { | ||
2114 | return check_hrt_server_initialized(&entry->hrt_server) && | ||
2115 | ((task_cpu(task) == task->rt_param.task_params.cpu) && | ||
2116 | (task_cpu(task) == entry->cpu)) ? 0 : -EINVAL; | ||
2117 | } else { | ||
2118 | /* If the task is not HRT, we don't want to force the user | ||
2119 | * to specify a CPU. | ||
2120 | */ | ||
2121 | return 0; | ||
2122 | } | ||
2123 | } | ||
2124 | |||
2125 | /* | ||
2126 | * Stops all servers from running. | ||
2127 | */ | ||
2128 | static long edf_hsb_deactivate_plugin(void) | ||
2129 | { | ||
2130 | cpu_entry_t *cpu_entry; | ||
2131 | hrt_server_t *hrt_server; | ||
2132 | unsigned long flags; | ||
2133 | int cpu; | ||
2134 | |||
2135 | local_irq_save(flags); | ||
2136 | |||
2137 | for_each_online_cpu(cpu) { | ||
2138 | cpu_entry = &per_cpu(cpu_entries, cpu); | ||
2139 | hrt_server = &cpu_entry->hrt_server; | ||
2140 | |||
2141 | slack_timer_cancel(hrt_server); | ||
2142 | |||
2143 | if (likely(bheap_node_in_heap(cpu_entry->hn))) | ||
2144 | bheap_delete(server_order, &cpu_heap, cpu_entry->hn); | ||
2145 | } | ||
2146 | |||
2147 | local_irq_restore(flags); | ||
2148 | |||
2149 | return 0; | ||
2150 | } | ||
2151 | |||
2152 | static void edf_hsb_task_block(struct task_struct *task) | ||
2153 | { | ||
2154 | unsigned long flags; | ||
2155 | cpu_entry_t *entry = task_sched_entry(task); | ||
2156 | struct task_struct *linked; | ||
2157 | server_t *linked_server; | ||
2158 | |||
2159 | TRACE_TASK(task, "block at %llu in state %llu\n", | ||
2160 | litmus_clock(), task->state); | ||
2161 | set_rt_flags(task, RT_F_BLOCK); | ||
2162 | |||
2163 | raw_spin_lock_irqsave(global_lock, flags); | ||
2164 | |||
2165 | linked = entry->linked; | ||
2166 | linked_server = entry->linked_server; | ||
2167 | |||
2168 | unlink(task); | ||
2169 | |||
2170 | /* TODO: necessary? */ | ||
2171 | if (task == linked) { | ||
2172 | check_donate_slack(linked_server, task); | ||
2173 | } | ||
2174 | |||
2175 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2176 | } | ||
2177 | |||
2178 | /* | ||
2179 | * A task leaves the system. | ||
2180 | */ | ||
2181 | static void edf_hsb_task_exit(struct task_struct *task) | ||
2182 | { | ||
2183 | unsigned long flags; | ||
2184 | cpu_entry_t *entry = task_sched_entry(task); | ||
2185 | |||
2186 | BUG_ON(!is_realtime(task)); | ||
2187 | TRACE_TASK(task, "RIP at %llu on P%d\n", | ||
2188 | TIME(litmus_clock()), tsk_rt(task)->scheduled_on); | ||
2189 | |||
2190 | raw_spin_lock_irqsave(global_lock, flags); | ||
2191 | |||
2192 | unlink(task); | ||
2193 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | ||
2194 | entry->scheduled = NULL; | ||
2195 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2196 | } | ||
2197 | if (is_srt(task)) { | ||
2198 | server_slack_destroy(task_srt_server(task)); | ||
2199 | server_destroy(task_srt_server(task)); | ||
2200 | server_free(task_srt_server(task)); | ||
2201 | task_data_free(tsk_rt(task)->plugin_data); | ||
2202 | } | ||
2203 | |||
2204 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * Attempts to determine the current scheduler state, then selects the | ||
2209 | * next task and updates the scheduler state. | ||
2210 | */ | ||
2211 | static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | ||
2212 | { | ||
2213 | unsigned long flags; | ||
2214 | int blocks, preempted, sleep, was_slack, np, hrt_preempt, donated; | ||
2215 | struct task_struct *curr; | ||
2216 | cpu_entry_t *entry = local_cpu_entry; | ||
2217 | |||
2218 | #ifdef CONFIG_RELEASE_MASTER | ||
2219 | /* Bail out early if we are the release master. | ||
2220 | * The release master never schedules any real-time tasks. | ||
2221 | */ | ||
2222 | if (edf_hsb_release_master == entry->cpu) { | ||
2223 | sched_state_task_picked(); | ||
2224 | return NULL; | ||
2225 | } | ||
2226 | #endif | ||
2227 | |||
2228 | raw_spin_lock_irqsave(global_lock, flags); | ||
2229 | |||
2230 | curr = entry->scheduled; | ||
2231 | |||
2232 | if (entry->scheduled && !is_realtime(prev)) { | ||
2233 | TRACE_TASK_SUB(entry->scheduled, "Stack deadlock!"); | ||
2234 | } | ||
2235 | |||
2236 | TRACE("server_budget: %llu, server_deadline: %llu, " | ||
2237 | "curr_time: %llu, no_slack: %d, ready: %d\n", | ||
2238 | TIME(entry->hrt_server.server.budget), | ||
2239 | TIME(entry->hrt_server.server.deadline), | ||
2240 | TIME(litmus_clock()), entry->hrt_server.no_slack, | ||
2241 | entry->hrt_server.ready); | ||
2242 | |||
2243 | /* Determine state */ | ||
2244 | blocks = curr && !is_running(curr); | ||
2245 | preempted = entry->scheduled != entry->linked; | ||
2246 | sleep = curr && get_rt_flags(curr) == RT_F_SLEEP; | ||
2247 | was_slack = !list_empty(&slack_queue); | ||
2248 | np = curr && is_np(curr); | ||
2249 | |||
2250 | TRACE("blocks: %d, preempted: %d, sleep: %d, np: %d\n", | ||
2251 | blocks, preempted, sleep, np); | ||
2252 | if (blocks) | ||
2253 | unlink(entry->scheduled); | ||
2254 | |||
2255 | /* If the task has gone to sleep or exhausted its budget, it | ||
2256 | * must complete its current job. | ||
2257 | */ | ||
2258 | if (sleep && !blocks && !preempted) | ||
2259 | job_completion(entry, entry->scheduled); | ||
2260 | |||
2261 | /* Pick the next task if there isn't one currently */ | ||
2262 | if (!entry->linked) | ||
2263 | edf_hsb_pick_next(entry); | ||
2264 | |||
2265 | /* Set task states */ | ||
2266 | if (entry->linked != entry->scheduled) { | ||
2267 | if (entry->linked) | ||
2268 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
2269 | if (entry->scheduled) | ||
2270 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
2271 | } | ||
2272 | |||
2273 | entry->scheduled = entry->linked; | ||
2274 | entry->scheduled_server = entry->linked_server; | ||
2275 | sched_state_task_picked(); | ||
2276 | |||
2277 | /* An non-HRT was preempted by an HRT task. Because of the way linking | ||
2278 | * works, it cannot link itself to anything else until the non-migratory | ||
2279 | * HRT task is scheduled. | ||
2280 | */ | ||
2281 | hrt_preempt = preempted && entry->linked && curr && | ||
2282 | is_hrt(entry->linked) && !is_hrt(curr); | ||
2283 | /* A server just donated slack */ | ||
2284 | donated = entry->linked && entry->linked_server->type != S_SLACK && | ||
2285 | head_in_list(&server_slack(entry->linked_server)->list); | ||
2286 | |||
2287 | if (hrt_preempt || donated) | ||
2288 | check_for_global_preempt(); | ||
2289 | |||
2290 | if (entry->scheduled) | ||
2291 | TRACE_TASK(entry->scheduled, "scheduled at %llu\n", | ||
2292 | TIME(litmus_clock())); | ||
2293 | else | ||
2294 | TRACE("NULL scheduled at %llu\n", TIME(litmus_clock())); | ||
2295 | |||
2296 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2297 | |||
2298 | if (!entry->scheduled && !next_eligible_slack_server()) { | ||
2299 | TRACE_SUB("A slack server has dissapeared!"); | ||
2300 | } | ||
2301 | |||
2302 | return entry->scheduled; | ||
2303 | } | ||
2304 | |||
2305 | /* | ||
2306 | * Prepare a task for running in RT mode | ||
2307 | */ | ||
2308 | static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | ||
2309 | { | ||
2310 | unsigned long flags; | ||
2311 | task_data_t *data; | ||
2312 | server_t *srt_server = NULL; | ||
2313 | cpu_entry_t *entry = task_sched_entry(task); | ||
2314 | |||
2315 | TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); | ||
2316 | |||
2317 | raw_spin_lock_irqsave(global_lock, flags); | ||
2318 | |||
2319 | /* Setup job parameters */ | ||
2320 | release_at(task, litmus_clock()); | ||
2321 | |||
2322 | /* Create SRT server */ | ||
2323 | if (is_srt(task)) { | ||
2324 | /* Create SRT server */ | ||
2325 | srt_server = server_alloc(GFP_ATOMIC); | ||
2326 | server_init(srt_server, &server_domain, | ||
2327 | task->pid, get_exec_cost(task), | ||
2328 | get_rt_period(task), 0); | ||
2329 | srt_server->type = S_SRT; | ||
2330 | |||
2331 | server_slack_create(srt_server); | ||
2332 | |||
2333 | } | ||
2334 | |||
2335 | /* Create task plugin data */ | ||
2336 | data = task_data_alloc(GFP_ATOMIC); | ||
2337 | data->owner = task; | ||
2338 | data->srt_server = srt_server; | ||
2339 | INIT_LIST_HEAD(&data->candidate_list); | ||
2340 | tsk_rt(task)->plugin_data = data; | ||
2341 | |||
2342 | /* Already running, update the cpu entry. | ||
2343 | * This tends to happen when the first tasks enter the system. | ||
2344 | */ | ||
2345 | if (running) { | ||
2346 | //BUG_ON(entry->scheduled); | ||
2347 | |||
2348 | #ifdef CONFIG_RELEASE_MASTER | ||
2349 | if (entry->cpu != edf_hsb_release_master) { | ||
2350 | #endif | ||
2351 | entry->scheduled = task; | ||
2352 | tsk_rt(task)->scheduled_on = task_cpu(task); | ||
2353 | #ifdef CONFIG_RELEASE_MASTER | ||
2354 | } else { | ||
2355 | /* do not schedule on release master */ | ||
2356 | /* Cannot preempt! Causing a preemption with a BE task | ||
2357 | * somehow leads to that task never blocking during | ||
2358 | * a synchronous release. This is a bug! | ||
2359 | */ | ||
2360 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
2361 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2362 | } | ||
2363 | #endif | ||
2364 | } else { | ||
2365 | task->rt_param.scheduled_on = NO_CPU; | ||
2366 | } | ||
2367 | |||
2368 | task->rt_param.linked_on = NO_CPU; | ||
2369 | job_arrival(task, entry); | ||
2370 | |||
2371 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2372 | } | ||
2373 | |||
2374 | static void edf_hsb_task_wake_up(struct task_struct *task) | ||
2375 | { | ||
2376 | lt_t now; | ||
2377 | unsigned long flags; | ||
2378 | cpu_entry_t *entry = task_sched_entry(task); | ||
2379 | |||
2380 | |||
2381 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), | ||
2382 | task_cpu(task), task->rt_param.task_params.cpu); | ||
2383 | |||
2384 | raw_spin_lock_irqsave(global_lock, flags); | ||
2385 | |||
2386 | if (!is_be(task)) { | ||
2387 | if (is_srt(task)) { | ||
2388 | catchup_srt_server(task); | ||
2389 | } | ||
2390 | |||
2391 | /* Non-BE tasks are not sporadic in this model */ | ||
2392 | set_rt_flags(task, RT_F_RUNNING); | ||
2393 | /* The job blocked while it was being run by a slack server */ | ||
2394 | if (is_queued(task)) { | ||
2395 | check_slack_candidate(task); | ||
2396 | goto out; | ||
2397 | } | ||
2398 | } else { | ||
2399 | /* Re-release all BE tasks on wake-up */ | ||
2400 | now = litmus_clock(); | ||
2401 | |||
2402 | if (is_tardy(task, now)) { | ||
2403 | release_at(task, now); | ||
2404 | sched_trace_task_release(task); | ||
2405 | } | ||
2406 | } | ||
2407 | |||
2408 | job_arrival(task, entry); | ||
2409 | |||
2410 | out: | ||
2411 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2412 | } | ||
2413 | |||
2414 | /* | ||
2415 | * Unused. | ||
2416 | */ | ||
2417 | static void edf_hsb_tick(struct task_struct *t) | ||
2418 | { | ||
2419 | } | ||
2420 | |||
2421 | |||
2422 | /****************************************************************************** | ||
2423 | * Plugin | ||
2424 | ******************************************************************************/ | ||
2425 | |||
2426 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = { | ||
2427 | .plugin_name = "EDF-HSB", | ||
2428 | |||
2429 | .activate_plugin = edf_hsb_activate_plugin, | ||
2430 | .deactivate_plugin = edf_hsb_deactivate_plugin, | ||
2431 | |||
2432 | .schedule = edf_hsb_schedule, | ||
2433 | .admit_task = edf_hsb_admit_task, | ||
2434 | .task_block = edf_hsb_task_block, | ||
2435 | .task_exit = edf_hsb_task_exit, | ||
2436 | .task_new = edf_hsb_task_new, | ||
2437 | .task_wake_up = edf_hsb_task_wake_up, | ||
2438 | .tick = edf_hsb_tick, | ||
2439 | |||
2440 | /* From jobs.h */ | ||
2441 | .complete_job = complete_job, | ||
2442 | .release_at = release_at, | ||
2443 | }; | ||
2444 | |||
2445 | static int __init init_edf_hsb(void) | ||
2446 | { | ||
2447 | cpu_entry_t *entry; | ||
2448 | hrt_server_t *hrt_server; | ||
2449 | server_t *idle_slack; | ||
2450 | int rv, cpu; | ||
2451 | |||
2452 | rv = register_sched_plugin(&edf_hsb_plugin); | ||
2453 | if (rv) { | ||
2454 | printk(KERN_ERR "Could not register plugin %s.\n", | ||
2455 | edf_hsb_plugin.plugin_name); | ||
2456 | goto out; | ||
2457 | } | ||
2458 | |||
2459 | rv = make_plugin_proc_dir(&edf_hsb_plugin, &edf_hsb_proc_dir); | ||
2460 | if (rv) { | ||
2461 | printk(KERN_ERR "Could not create %s procfs dir.\n", | ||
2462 | edf_hsb_plugin.plugin_name); | ||
2463 | goto out; | ||
2464 | } | ||
2465 | |||
2466 | |||
2467 | task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC); | ||
2468 | |||
2469 | /* Global domains */ | ||
2470 | edf_domain_init(&srt_domain, NULL, release_srt_jobs); | ||
2471 | rt_domain_init(&be_domain, be_ready_order, | ||
2472 | NULL, release_be_jobs); | ||
2473 | server_domain_init(&server_domain, servers_released, | ||
2474 | server_completed, NO_CPU, global_lock); | ||
2475 | |||
2476 | /* Server proc interfaces */ | ||
2477 | server_proc_init(&server_domain, | ||
2478 | edf_hsb_proc_dir, BE_PROC_NAME, | ||
2479 | admit_be_server, list_be_servers, | ||
2480 | stop_be_servers); | ||
2481 | server_proc_init(&server_domain, | ||
2482 | edf_hsb_proc_dir, HRT_PROC_NAME, | ||
2483 | admit_hrt_server, list_hrt_servers, | ||
2484 | stop_hrt_servers); | ||
2485 | |||
2486 | |||
2487 | /* Global collections */ | ||
2488 | bheap_init(&cpu_heap); | ||
2489 | bheap_init(&be_ready_servers); | ||
2490 | INIT_LIST_HEAD(&be_servers); | ||
2491 | INIT_LIST_HEAD(&slack_queue); | ||
2492 | INIT_LIST_HEAD(&slack_candidates); | ||
2493 | |||
2494 | for_each_online_cpu(cpu) { | ||
2495 | entry = &per_cpu(cpu_entries, cpu); | ||
2496 | hrt_server = &entry->hrt_server; | ||
2497 | |||
2498 | idle_slack = server_alloc(GFP_ATOMIC); | ||
2499 | server_init(idle_slack, &server_domain, | ||
2500 | IDLE_SLACK_BASE + cpu, | ||
2501 | LLONG_MAX, LLONG_MAX, 1); | ||
2502 | idle_slack->deadline = LLONG_MAX; | ||
2503 | idle_slack->budget = LLONG_MAX; | ||
2504 | idle_slack->job_no = 1; | ||
2505 | idle_slack->release = 1; | ||
2506 | idle_slack->type = S_SLACK; | ||
2507 | add_slack(idle_slack); | ||
2508 | |||
2509 | entry->cpu = cpu; | ||
2510 | entry->linked = NULL; | ||
2511 | entry->scheduled = NULL; | ||
2512 | entry->linked_server = NULL; | ||
2513 | |||
2514 | /* HRT server */ | ||
2515 | hrt_server->server.id = cpu; | ||
2516 | hrt_server->server.deadline = 0; | ||
2517 | hrt_server->server.period = 0; | ||
2518 | hrt_server->server.wcet = 0; | ||
2519 | hrt_server->ready = 0; | ||
2520 | |||
2521 | hrtimer_start_on_info_init(&hrt_server->slack_timer_info); | ||
2522 | |||
2523 | /* CPU entry bheap nodes */ | ||
2524 | entry->hn = &cpu_heap_node[cpu]; | ||
2525 | bheap_node_init(&entry->hn, entry); | ||
2526 | } | ||
2527 | |||
2528 | out: | ||
2529 | return rv; | ||
2530 | } | ||
2531 | |||
2532 | static void exit_edf_hsb(void) | ||
2533 | { | ||
2534 | int cpu; | ||
2535 | cpu_entry_t *entry; | ||
2536 | |||
2537 | stop_be_servers(); | ||
2538 | stop_hrt_servers(); | ||
2539 | |||
2540 | server_domain_destroy(&server_domain); | ||
2541 | |||
2542 | for_each_online_cpu(cpu) { | ||
2543 | entry = &per_cpu(cpu_entries, cpu); | ||
2544 | server_slack_destroy(&entry->hrt_server.server); | ||
2545 | server_destroy(&entry->hrt_server.server); | ||
2546 | } | ||
2547 | |||
2548 | if (edf_hsb_proc_dir) { | ||
2549 | remove_plugin_proc_dir(&edf_hsb_plugin); | ||
2550 | /* TODO: is this wrong? */ | ||
2551 | edf_hsb_proc_dir = NULL; | ||
2552 | } | ||
2553 | } | ||
2554 | |||
2555 | module_init(init_edf_hsb); | ||
2556 | module_exit(exit_edf_hsb); | ||
diff --git a/litmus/sched_edf_hsb_noslack.c b/litmus/sched_edf_hsb_noslack.c new file mode 100644 index 000000000000..4d91f99d4094 --- /dev/null +++ b/litmus/sched_edf_hsb_noslack.c | |||
@@ -0,0 +1,2556 @@ | |||
1 | /* | ||
2 | * litmus/sched_edf_hsb.c | ||
3 | * | ||
4 | * Implentation of the EDF-HSB scheduling algorithm. | ||
5 | * | ||
6 | * The following 6 events are fired by timers and not handled by | ||
7 | * the plugin infrastructure itself: | ||
8 | * | ||
9 | * release_[hrt|srt|be]_jobs | ||
10 | * [hrt|be]_server_released | ||
11 | * server_completed (for HRT, SRT, and BE) | ||
12 | * | ||
13 | * The following 4 events are caused by a write to the proc entry | ||
14 | * and should never be run when the plugin is already running: | ||
15 | * stop_[hrt|be]_servers | ||
16 | * admit_[hrt|be]_server | ||
17 | * | ||
18 | * TODO system for removing tasks from their release queues | ||
19 | * TODO clean up link_to_cpu and check_slack args | ||
20 | * TODO move slack completion into release | ||
21 | * TODO fix concurrent arms | ||
22 | * TODO slack and BE servers, include slack higher prio | ||
23 | * TODO start servers should no longer be cessary | ||
24 | * TODO harmonize order of method arguments | ||
25 | * TODO test crazy task_new hack | ||
26 | * TODO remove bheap_node_in_heap check in litmus_exit_task | ||
27 | */ | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/ctype.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | |||
36 | #include <litmus/litmus.h> | ||
37 | #include <litmus/bheap.h> | ||
38 | #include <litmus/jobs.h> | ||
39 | #include <litmus/litmus_proc.h> | ||
40 | #include <litmus/sched_plugin.h> | ||
41 | #include <litmus/edf_common.h> | ||
42 | #include <litmus/sched_trace.h> | ||
43 | #include <litmus/servers.h> | ||
44 | #define DEBUG_EDF_HSB | ||
45 | |||
46 | /* DOES NOT WORK */ | ||
47 | //#define SLACK_ON_MASTER | ||
48 | |||
49 | #define BE_PROC_NAME "be_servers" | ||
50 | #define HRT_PROC_NAME "hrt_servers" | ||
51 | #define BE_SERVER_BASE 100 | ||
52 | #define IDLE_SLACK_BASE 1000 | ||
53 | #define SLACK_MIN NSEC_PER_MSEC | ||
54 | |||
55 | /* SCHED_TRACE action events */ | ||
56 | #define SERVER_COMPLETED_ACTION 1 | ||
57 | #define SERVER_RELEASED_ACTION 2 | ||
58 | #define NO_SLACK_ACTION 3 | ||
59 | #define SLACK_RUN_ACTION 4 | ||
60 | #define SLACK_STOP_ACTION 5 | ||
61 | #define SLACK_RECLAIM_ACTION 6 | ||
62 | #define SLACK_EXPIRED_ACTION 7 | ||
63 | #define SLACK_DONATED_ACTION 8 | ||
64 | #define CANDIDATE_ADDED_ACTION 9 | ||
65 | |||
66 | /* Uncomment for human readable time */ | ||
67 | #define TIME(x) \ | ||
68 | (x) | ||
69 | /* ({lt_t y = x; \ */ | ||
70 | /* do_div(y, NSEC_PER_MSEC); \ */ | ||
71 | /* y;}) */ | ||
72 | #define TRACE_TIMER(fmt, args...) \ | ||
73 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | ||
74 | TRACE_ARGS, ## args, TIME(litmus_clock())) | ||
75 | #define TRACE_TASK_TIMER(t, fmt, args...) \ | ||
76 | TRACE_TIMER("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
77 | (t)->rt_param.job_params.job_no, ## args) | ||
78 | |||
79 | /* | ||
80 | * Useful debugging macros. Remove for actual use as they cause | ||
81 | * a lot of lock contention. | ||
82 | */ | ||
83 | #ifdef DEBUG_EDF_HSB | ||
84 | |||
85 | #define TRACE_SUB(fmt, args...) \ | ||
86 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ | ||
87 | TRACE_ARGS, ## args) | ||
88 | #define TRACE_TASK_SUB(t, fmt, args...) \ | ||
89 | TRACE_SUB(TASK_FMT " " fmt, TASK_ARGS(t), ##args) | ||
90 | #define TRACE_SERVER_SUB(s, fmt, args...) \ | ||
91 | TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
92 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) \ | ||
93 | TRACE_TASK_SUB(t, SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) | ||
94 | #else | ||
95 | #define TRACE_SUB(fmt, args...) | ||
96 | #define TRACE_TASK_SUB(t, fmt, args...) | ||
97 | #define TRACE_SERVER_SUB(s, fmt, args...) | ||
98 | #define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) | ||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Different types of servers | ||
103 | */ | ||
104 | typedef enum { | ||
105 | S_HRT, | ||
106 | S_SRT, | ||
107 | S_BE, | ||
108 | S_SLACK | ||
109 | } server_type_t; | ||
110 | |||
111 | /* | ||
112 | * A server running HRT tasks | ||
113 | */ | ||
114 | typedef struct { | ||
115 | server_t server; | ||
116 | rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ | ||
117 | int ready; /* False if waiting for next release */ | ||
118 | int no_slack; | ||
119 | struct hrtimer slack_timer; /* Server has no slack when: | ||
120 | * (deadline - budget) <= current_time. | ||
121 | */ | ||
122 | struct hrtimer_start_on_info slack_timer_info; | ||
123 | } hrt_server_t; | ||
124 | |||
125 | /* | ||
126 | * State of a single CPU | ||
127 | */ | ||
128 | typedef struct { | ||
129 | int cpu; | ||
130 | struct task_struct* scheduled; /* Task that should be running */ | ||
131 | struct task_struct* linked; /* Task that actually is running */ | ||
132 | server_t *scheduled_server; | ||
133 | server_t *linked_server; /* The server running on this cpu. | ||
134 | * Note that what it is 'running' is | ||
135 | * linked, not scheduled. | ||
136 | */ | ||
137 | hrt_server_t hrt_server; /* One HRT server per CPU */ | ||
138 | struct bheap_node* hn; /* For the cpu_heap */ | ||
139 | } cpu_entry_t; | ||
140 | |||
141 | /* | ||
142 | * Data assigned to each task | ||
143 | */ | ||
144 | typedef struct task_data { | ||
145 | server_t *srt_server; /* If the task is SRT, its server */ | ||
146 | struct list_head candidate_list; /* List of slack canditates */ | ||
147 | struct task_struct *owner; | ||
148 | } task_data_t; | ||
149 | |||
150 | /* CPU state */ | ||
151 | DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, noslack_cpu_entries); | ||
152 | static struct bheap cpu_heap; | ||
153 | static struct bheap_node cpu_heap_node[NR_CPUS]; | ||
154 | /* Task domains */ | ||
155 | static rt_domain_t srt_domain; | ||
156 | static rt_domain_t be_domain; | ||
157 | /* Useful tools for server scheduling */ | ||
158 | static server_domain_t server_domain; | ||
159 | /* BE server support */ | ||
160 | static struct list_head be_servers; | ||
161 | static struct bheap be_ready_servers; | ||
162 | /* Slack support */ | ||
163 | static struct list_head slack_queue; | ||
164 | static struct list_head slack_candidates; | ||
165 | /* CPU which will release tasks and global servers */ | ||
166 | static int edf_hsb_release_master; | ||
167 | /* Cache to store task_data structs */ | ||
168 | static struct kmem_cache *task_data_cache; | ||
169 | |||
170 | static struct proc_dir_entry *edf_hsb_proc_dir = NULL; | ||
171 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; | ||
172 | |||
173 | #define task_sched_entry(task) (&per_cpu(noslack_cpu_entries, task_cpu(task))) | ||
174 | #define task_linked_entry(task) (&per_cpu(noslack_cpu_entries, task->rt_param.linked_on)) | ||
175 | #define task_job_no(task) (tsk_rt(task)->job_params.job_no) | ||
176 | #define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data) | ||
177 | #define task_srt_server(task) ((server_t*)task_data(task)->srt_server) | ||
178 | #define server_slack(s) ((server_t*)(s)->data) | ||
179 | #define server_has_slack(s) (server_slack(s)->deadline != 0) | ||
180 | #define local_cpu_entry (&__get_cpu_var(noslack_cpu_entries)) | ||
181 | #define global_lock (&srt_domain.ready_lock) | ||
182 | #define is_active_plugin (litmus == &edf_hsb_plugin) | ||
183 | |||
184 | /* | ||
185 | * This only works if items are deleted with list_del_init. | ||
186 | */ | ||
187 | static inline int head_in_list(struct list_head *head) | ||
188 | { | ||
189 | BUG_ON(!head); | ||
190 | return !(head->next == head->prev && head->prev == head); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Returns slack server running the task or NULL if N/A. | ||
195 | */ | ||
196 | static inline server_t* task_slack_server(struct task_struct *task) | ||
197 | { | ||
198 | server_t *slack_server = NULL; | ||
199 | if (task->rt_param.linked_on != NO_CPU) { | ||
200 | slack_server = task_linked_entry(task)->linked_server; | ||
201 | if (slack_server->type != S_SLACK) | ||
202 | slack_server = NULL; | ||
203 | } | ||
204 | return slack_server; | ||
205 | } | ||
206 | |||
207 | static task_data_t* task_data_alloc(int gfp_flags) | ||
208 | { | ||
209 | return kmem_cache_alloc(task_data_cache, gfp_flags); | ||
210 | } | ||
211 | |||
212 | static void task_data_free(task_data_t* data) | ||
213 | { | ||
214 | kmem_cache_free(task_data_cache, data); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Donating servers pre-allocate a server for slack to avoid runtime | ||
219 | * calls to kmalloc. | ||
220 | */ | ||
221 | static void server_slack_create(server_t *donator) | ||
222 | { | ||
223 | server_t *slack = server_alloc(GFP_ATOMIC); | ||
224 | |||
225 | server_init(slack, &server_domain, -donator->id, 0, 0, 1); | ||
226 | slack->type = S_SLACK; | ||
227 | slack->data = donator; | ||
228 | donator->data = slack; | ||
229 | } | ||
230 | |||
231 | |||
232 | static void server_slack_destroy(server_t *donator) | ||
233 | { | ||
234 | server_t *slack = (server_t*)donator->data; | ||
235 | |||
236 | donator->data = NULL; | ||
237 | server_destroy(slack); | ||
238 | server_free(slack); | ||
239 | } | ||
240 | |||
241 | static void remove_slack(server_t *slack) | ||
242 | { | ||
243 | if (!slack) | ||
244 | return; | ||
245 | TRACE_SERVER_SUB(slack, "slack removed"); | ||
246 | //////sched_trace_action(NULL, SLACK_EXPIRED_ACTION); | ||
247 | |||
248 | if (head_in_list(&slack->list)) | ||
249 | list_del_init(&slack->list); | ||
250 | slack->deadline = 0; | ||
251 | slack->budget = 0; | ||
252 | slack->wcet = 0; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Slack queue is EDF. | ||
257 | */ | ||
258 | static void add_slack(server_t *slack) | ||
259 | { | ||
260 | struct list_head *pos; | ||
261 | server_t *queued; | ||
262 | |||
263 | TRACE_SERVER_SUB(slack, "slack added"); | ||
264 | |||
265 | if (head_in_list(&slack->list)) { | ||
266 | TRACE_SERVER_SUB(slack, "already in list"); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | list_for_each_prev(pos, &slack_queue) { | ||
271 | queued = list_entry(pos, server_t, list); | ||
272 | if (lt_before_eq(queued->deadline, slack->deadline)) { | ||
273 | __list_add(&slack->list, pos, pos->next); | ||
274 | return; | ||
275 | } | ||
276 | } | ||
277 | list_add(&slack->list, &slack_queue); | ||
278 | } | ||
279 | |||
280 | static inline struct task_struct* get_candidate(struct list_head *pos) | ||
281 | { | ||
282 | struct task_struct *task = NULL; | ||
283 | task_data_t *data; | ||
284 | if (!list_empty(pos)) { | ||
285 | data = list_entry(pos, task_data_t, candidate_list); | ||
286 | task = data->owner; | ||
287 | } | ||
288 | return task; | ||
289 | } | ||
290 | |||
291 | static inline lt_t real_deadline(struct task_struct *task) | ||
292 | { | ||
293 | server_t *server = task_srt_server(task); | ||
294 | int job_diff = server->job_no - task_job_no(task); | ||
295 | return get_deadline(task) - job_diff * get_rt_period(task); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Candidate queue is EDF. | ||
300 | */ | ||
301 | static void add_slack_candidate(struct task_struct *task) | ||
302 | { | ||
303 | struct list_head *pos; | ||
304 | struct task_struct *queued; | ||
305 | |||
306 | TRACE_TASK_SUB(task, "candidate added"); | ||
307 | |||
308 | list_for_each_prev(pos, &slack_candidates) { | ||
309 | queued = get_candidate(pos); | ||
310 | if (lt_before_eq(real_deadline(queued), real_deadline(task))) { | ||
311 | __list_add(&task_data(task)->candidate_list, | ||
312 | pos, pos->next); | ||
313 | return; | ||
314 | } | ||
315 | } | ||
316 | list_add(&task_data(task)->candidate_list, &slack_candidates); | ||
317 | } | ||
318 | |||
319 | static void donate_slack(server_t *donator) | ||
320 | { | ||
321 | server_t *slack = (server_t*)donator->data; | ||
322 | hrt_server_t *hrt_server; | ||
323 | |||
324 | TRACE_SERVER_SUB(donator, "%llu slack donated", TIME(donator->budget)); | ||
325 | |||
326 | if (donator->type == S_HRT) { | ||
327 | hrt_server = container_of(donator, hrt_server_t, server); | ||
328 | BUG_ON(!hrt_server->ready); | ||
329 | } | ||
330 | |||
331 | slack->wcet = donator->budget; | ||
332 | slack->budget = donator->budget; | ||
333 | slack->deadline = donator->deadline; | ||
334 | |||
335 | add_slack(slack); | ||
336 | } | ||
337 | |||
338 | #if 0 | ||
339 | /* | ||
340 | * Donate any available slack from a server. | ||
341 | */ | ||
342 | static noinline void check_donate_slack(server_t *donator, struct task_struct *was_scheduled) | ||
343 | { | ||
344 | server_t *slack = server_slack(donator); | ||
345 | hrt_server_t *hrt_server; | ||
346 | int donate = 0; | ||
347 | |||
348 | TRACE_SERVER_SUB(donator, "checking donation"); | ||
349 | |||
350 | if (!slack) | ||
351 | return; | ||
352 | |||
353 | /* Donating small amounts of slack will result in excess migrations */ | ||
354 | if (donator->budget < SLACK_MIN) | ||
355 | return; | ||
356 | |||
357 | if (server_has_slack(donator)) { | ||
358 | TRACE_SERVER_SUB(donator, "dead: %d, rel: %d, job: %d already donated", | ||
359 | slack->deadline, slack->release, slack->job_no); | ||
360 | return; | ||
361 | } | ||
362 | |||
363 | if (donator->type == S_HRT) { | ||
364 | hrt_server = container_of(donator, hrt_server_t, server); | ||
365 | } | ||
366 | |||
367 | /* Donate if the server is waiting for a task release */ | ||
368 | if ((donator->type == S_SRT && | ||
369 | donator->job_no <= task_job_no(was_scheduled)) || | ||
370 | (donator->type == S_HRT && | ||
371 | hrt_server->no_slack && hrt_server->ready && | ||
372 | !__jobs_pending(&hrt_server->hrt_domain)) || | ||
373 | (donator->type == S_BE && | ||
374 | !__jobs_pending(&be_domain))) { | ||
375 | donate = 1; | ||
376 | } | ||
377 | |||
378 | if (!donate) | ||
379 | return; | ||
380 | |||
381 | ////sched_trace_action(was_scheduled, SLACK_DONATED_ACTION); | ||
382 | |||
383 | donate_slack(donator); | ||
384 | } | ||
385 | |||
386 | #else | ||
387 | #define check_donate_slack(a, b) | ||
388 | #endif | ||
389 | |||
390 | /* | ||
391 | * Adds the task to the candidate queue if it is eligible for slack stealing. | ||
392 | */ | ||
393 | static void check_slack_candidate(struct task_struct *task) | ||
394 | { | ||
395 | TRACE_TASK_SUB(task, "checking for candidate"); | ||
396 | if (is_srt(task) && | ||
397 | /* The task has been synchronously released */ | ||
398 | task_job_no(task) > 2 && | ||
399 | /* The SRT task is behind its server */ | ||
400 | task_srt_server(task)->job_no > task_job_no(task) && | ||
401 | /* The task hasn't already been added to the list */ | ||
402 | !head_in_list(&task_data(task)->candidate_list)) { | ||
403 | |||
404 | add_slack_candidate(task); | ||
405 | } else if (is_srt(task) && | ||
406 | is_released(task, litmus_clock()) && | ||
407 | !is_queued(task)) { | ||
408 | TRACE_TASK_SUB(task, "candidate has been released!"); | ||
409 | __add_ready(&srt_domain, task); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Returns the next eligible slack server. This will remove any expired | ||
415 | * slack servers still present in the list. | ||
416 | */ | ||
417 | static noinline server_t* next_eligible_slack_server(void) | ||
418 | { | ||
419 | server_t *next_slack = NULL; | ||
420 | lt_t now = litmus_clock(); | ||
421 | |||
422 | while (!list_empty(&slack_queue)) { | ||
423 | next_slack = list_entry(slack_queue.next, server_t, list); | ||
424 | BUG_ON(!next_slack); | ||
425 | |||
426 | if (lt_after(next_slack->deadline, now) && | ||
427 | lt_after(next_slack->budget, SLACK_MIN) && | ||
428 | !is_server_linked(next_slack)) { | ||
429 | break; | ||
430 | } else { | ||
431 | /* Slack has expired or has too little time */ | ||
432 | BUG_ON(next_slack->id == 1001); | ||
433 | remove_slack(next_slack); | ||
434 | next_slack = NULL; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | return next_slack; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Returns the next SRT task that is tardy or will be tardy. If none | ||
443 | * are available, will return a tardy BE task if present. | ||
444 | */ | ||
445 | static noinline struct task_struct* next_eligible_slack(void) | ||
446 | { | ||
447 | struct task_struct *next = get_candidate(slack_candidates.next); | ||
448 | |||
449 | while (next && task_srt_server(next)->job_no <= task_job_no(next)) { | ||
450 | list_del_init(&task_data(next)->candidate_list); | ||
451 | next = get_candidate(slack_candidates.next); | ||
452 | } | ||
453 | |||
454 | /* We couldn't find an SRT to schedule. Find a BE which is | ||
455 | * either tardy or cannot run due to a lack of servers. | ||
456 | */ | ||
457 | if (!next) { | ||
458 | next = __peek_ready(&be_domain); | ||
459 | } | ||
460 | |||
461 | return next; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Order BE tasks FIFO. | ||
466 | */ | ||
467 | static inline int be_higher_prio(struct task_struct *first, struct task_struct *second) | ||
468 | { | ||
469 | return lt_before(get_release(first), get_release(second)) || | ||
470 | |||
471 | /* Break by PID */ | ||
472 | (get_release(first) == get_release(second) && | ||
473 | (first->pid < second->pid)); | ||
474 | } | ||
475 | |||
476 | static int be_ready_order(struct bheap_node *a, struct bheap_node *b) | ||
477 | { | ||
478 | struct task_struct *first, *second; | ||
479 | first = bheap2task(a); | ||
480 | second = bheap2task(b); | ||
481 | if (!first || !second) | ||
482 | return first && !second; | ||
483 | return be_higher_prio(first, second); | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Order servers by EDF. | ||
488 | */ | ||
489 | static inline int server_higher_prio(server_t *first, server_t *second) | ||
490 | { | ||
491 | return lt_before(first->deadline, second->deadline) || | ||
492 | /* Break by id */ | ||
493 | (first->deadline == second->deadline && | ||
494 | first->id < second->id); | ||
495 | } | ||
496 | |||
497 | static int server_order(struct bheap_node *a, struct bheap_node *b) | ||
498 | { | ||
499 | server_t *first, *second; | ||
500 | first = a->value; | ||
501 | second = b->value; | ||
502 | return server_higher_prio(first, second); | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Order CPU's by deadlines of their servers. | ||
507 | */ | ||
508 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | ||
509 | { | ||
510 | cpu_entry_t *first, *second; | ||
511 | first = a->value; | ||
512 | second = b->value; | ||
513 | if (first->linked && second->linked) { | ||
514 | return !server_higher_prio(first->linked_server, | ||
515 | second->linked_server); | ||
516 | } | ||
517 | return second->linked && !first->linked; | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Move the CPU entry to the correct position in the queue. | ||
522 | */ | ||
523 | static inline void update_cpu_position(cpu_entry_t *entry) | ||
524 | { | ||
525 | if (likely(bheap_node_in_heap(entry->hn))) | ||
526 | bheap_delete(server_order, &cpu_heap, entry->hn); | ||
527 | /* Don't leave HRT CPUs in the heap as its order only matters | ||
528 | * for global preempts. | ||
529 | */ | ||
530 | if (!entry->linked || !is_hrt(entry->linked)) | ||
531 | bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn); | ||
532 | } | ||
533 | |||
534 | static inline cpu_entry_t* lowest_prio_cpu(void) | ||
535 | { | ||
536 | struct bheap_node *hn = bheap_peek(cpu_lower_prio, &cpu_heap); | ||
537 | return (hn) ? hn->value : NULL; | ||
538 | } | ||
539 | |||
540 | static inline int check_hrt_server_initialized(hrt_server_t *hrt_server) | ||
541 | { | ||
542 | return hrt_server->server.wcet && hrt_server->server.period; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Arms the slack timer for the server, if necessary. | ||
547 | */ | ||
548 | static void slack_timer_arm(hrt_server_t *hrt_server) | ||
549 | { | ||
550 | int cpu, err; | ||
551 | cpu_entry_t *entry; | ||
552 | struct hrtimer *timer; | ||
553 | lt_t now = litmus_clock(), when_to_fire; | ||
554 | |||
555 | if (!check_hrt_server_initialized(hrt_server)) { | ||
556 | TRACE_SERVER_SUB(&hrt_server->server, "not initialized"); | ||
557 | return; | ||
558 | } | ||
559 | |||
560 | timer = &hrt_server->slack_timer; | ||
561 | entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
562 | |||
563 | #ifdef SLACK_ON_MASTER | ||
564 | if (edf_hsb_release_master != NO_CPU) | ||
565 | cpu = edf_hsb_release_master; | ||
566 | else | ||
567 | #endif | ||
568 | cpu = entry->cpu; | ||
569 | |||
570 | when_to_fire = hrt_server->server.deadline - hrt_server->server.budget; | ||
571 | |||
572 | /* Ensure the timer is needed */ | ||
573 | if (hrtimer_active(timer) || hrt_server->server.deadline == 0 || | ||
574 | hrt_server->no_slack || hrt_server->server.budget == 0 || | ||
575 | !hrt_server->ready) { | ||
576 | TRACE_SERVER_SUB(&hrt_server->server, | ||
577 | "not arming slack timer on P%d, %d %d %d %d %d", | ||
578 | entry->cpu, | ||
579 | hrtimer_active(timer), hrt_server->server.deadline == 0, | ||
580 | hrt_server->no_slack, hrt_server->server.budget == 0, | ||
581 | !hrt_server->ready); | ||
582 | return; | ||
583 | } | ||
584 | |||
585 | if (when_to_fire >= hrt_server->server.deadline) { | ||
586 | TRACE_SUB("wtf: %llu, dead: %llu, bud: %llu", | ||
587 | when_to_fire, hrt_server->server.deadline, | ||
588 | hrt_server->server.budget); | ||
589 | BUG_ON(1); | ||
590 | } | ||
591 | |||
592 | /* Arm timer */ | ||
593 | if (lt_after_eq(now, when_to_fire)) { | ||
594 | /* 'Fire' immediately */ | ||
595 | TRACE_SERVER_SUB(&hrt_server->server, | ||
596 | "immediate: %llu", when_to_fire); | ||
597 | hrt_server->no_slack = 1; | ||
598 | } else if (cpu != smp_processor_id()) { | ||
599 | err = hrtimer_start_on(cpu, | ||
600 | &hrt_server->slack_timer_info, | ||
601 | &hrt_server->slack_timer, | ||
602 | ns_to_ktime(when_to_fire), | ||
603 | HRTIMER_MODE_ABS_PINNED); | ||
604 | if (err) | ||
605 | TRACE_SERVER_SUB(&hrt_server->server, "failed to arm slack"); | ||
606 | } else { | ||
607 | __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire), | ||
608 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
609 | } | ||
610 | |||
611 | TRACE_SUB("slack timer 0x%x armed to fire at %llu on P%d", | ||
612 | timer, TIME(when_to_fire), entry->cpu); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Does nothing if the slack timer is not armed. | ||
617 | */ | ||
618 | static inline void slack_timer_cancel(hrt_server_t *hrt_server) | ||
619 | { | ||
620 | int ret; | ||
621 | if (hrtimer_active(&hrt_server->slack_timer)) { | ||
622 | ret = hrtimer_try_to_cancel(&hrt_server->slack_timer); | ||
623 | if (ret == -1) { | ||
624 | TRACE_SERVER_SUB(&hrt_server->server, | ||
625 | "slack timer was running concurrently"); | ||
626 | } else { | ||
627 | TRACE_SERVER_SUB(&hrt_server->server, | ||
628 | "slack timer cancelled"); | ||
629 | } | ||
630 | } else { | ||
631 | TRACE_SERVER_SUB(&hrt_server->server, "slack not active"); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | /* | ||
636 | * Handles subtraction of lt_t without underflows. | ||
637 | */ | ||
638 | static inline lt_t lt_subtract(lt_t a, lt_t b) | ||
639 | { | ||
640 | long long sub = (long long)a - (long long)b; | ||
641 | if (sub >= 0) | ||
642 | return sub; | ||
643 | else | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | static void requeue_server(server_t *server, lt_t now) | ||
648 | { | ||
649 | int added = 0; | ||
650 | hrt_server_t *hrt_server; | ||
651 | |||
652 | if (server->type == S_SRT) | ||
653 | return; | ||
654 | |||
655 | if (server->type == S_SLACK) { | ||
656 | add_slack(server); | ||
657 | return; | ||
658 | } | ||
659 | |||
660 | if (lt_before(now, server->release)) { | ||
661 | added = add_server_release(server, &server_domain); | ||
662 | } | ||
663 | |||
664 | if (!added) { | ||
665 | /* Mark servers as released */ | ||
666 | if (server->type == S_HRT) { | ||
667 | TRACE_SERVER_SUB(server, "P%d now ready at %llu", now); | ||
668 | hrt_server = container_of(server, hrt_server_t, server); | ||
669 | hrt_server->ready = 1; | ||
670 | remove_slack(server_slack(server)); | ||
671 | hrt_server->no_slack = 0; | ||
672 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
673 | } else if (server->type == S_BE) { | ||
674 | TRACE_SERVER_SUB(server, "BE added to ready"); | ||
675 | bheap_insert(server_order, &be_ready_servers, server->hn); | ||
676 | } | ||
677 | } else { | ||
678 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
679 | } | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Absorbs a task's execution time into its donator. | ||
684 | */ | ||
685 | static void reclaim_slack(server_t *slack) | ||
686 | { | ||
687 | lt_t exec; | ||
688 | server_t *donator = server_slack(slack); | ||
689 | |||
690 | if (!donator || lt_before_eq(slack->deadline, litmus_clock())) | ||
691 | return; | ||
692 | |||
693 | /* SRT servers do not ever reclaim slack */ | ||
694 | ////sched_trace_action(NULL, SLACK_RECLAIM_ACTION); | ||
695 | |||
696 | exec = slack->wcet - slack->budget; | ||
697 | TRACE_SERVER_SUB(donator, "reclaiming %llu slack", TIME(exec)); | ||
698 | |||
699 | BUG_ON(is_server_linked(donator)); | ||
700 | BUG_ON(!slack->wcet); | ||
701 | BUG_ON(!donator->budget); | ||
702 | |||
703 | donator->budget = lt_subtract(donator->budget, exec); | ||
704 | slack->wcet = slack->budget; | ||
705 | |||
706 | /* If budget exhausted, server needs to wait for next release */ | ||
707 | if (!donator->budget) { | ||
708 | TRACE_SERVER_SUB(donator, "exhausted by slack"); | ||
709 | } | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Begins server execution and arms any timers necessary. | ||
714 | */ | ||
715 | static noinline void link_server(cpu_entry_t *entry, | ||
716 | server_t *next_server) | ||
717 | { | ||
718 | |||
719 | if (entry->linked) { | ||
720 | /* Massive state check */ | ||
721 | if (next_server->type == S_SRT) { | ||
722 | /* SRT task cannot get ahead of its server */ | ||
723 | BUG_ON(next_server->job_no + 1 < task_job_no(entry->linked)); | ||
724 | BUG_ON(lt_after(get_deadline(entry->linked), | ||
725 | next_server->deadline)); | ||
726 | } else if (next_server->type == S_HRT) { | ||
727 | /* HRT servers should never, ever migrate */ | ||
728 | BUG_ON(entry->cpu != task_cpu(entry->linked)); | ||
729 | BUG_ON(!entry->hrt_server.ready); | ||
730 | } else if (next_server->type == S_SLACK) { | ||
731 | /* Should have already been removed from slack list */ | ||
732 | BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list)); | ||
733 | BUG_ON(is_be(entry->linked) && is_queued(entry->linked)); | ||
734 | ////sched_trace_action(entry->linked, SLACK_RUN_ACTION); | ||
735 | BUG_ON(is_srt(entry->linked) && | ||
736 | task_srt_server(entry->linked)->job_no <= | ||
737 | task_job_no(entry->linked)); | ||
738 | } else { /* BE */ | ||
739 | /* Should have already been removed from ready heap */ | ||
740 | BUG_ON(bheap_node_in_heap(next_server->hn)); | ||
741 | BUG_ON(is_queued(entry->linked)); | ||
742 | ////sched_trace_action(entry->linked, next_server->id); | ||
743 | } | ||
744 | |||
745 | if (next_server->type != S_SLACK && | ||
746 | (head_in_list(&server_slack(next_server)->list))) { | ||
747 | remove_slack(server_slack(next_server)); | ||
748 | } | ||
749 | |||
750 | entry->linked_server = next_server; | ||
751 | server_run(entry->linked_server, entry->linked); | ||
752 | } | ||
753 | |||
754 | /* Timer necessary whenever an HRT is not running */ | ||
755 | if (!entry->linked || !is_hrt(entry->linked)) | ||
756 | slack_timer_arm(&entry->hrt_server); | ||
757 | else | ||
758 | slack_timer_cancel(&entry->hrt_server); | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | * Stops server execution and timers. This will also re-add servers | ||
763 | * to any collections they should be members of. | ||
764 | */ | ||
765 | static noinline void unlink_server(cpu_entry_t *entry, int requeue) | ||
766 | { | ||
767 | server_t *server = entry->linked_server; | ||
768 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
769 | |||
770 | BUG_ON(!entry->linked_server); | ||
771 | |||
772 | server_stop(entry->linked_server); | ||
773 | server = entry->linked_server; | ||
774 | entry->linked_server = NULL; | ||
775 | |||
776 | if (!requeue) | ||
777 | return; | ||
778 | |||
779 | if (server->type == S_SLACK && server->deadline) { | ||
780 | add_slack(server); | ||
781 | ////sched_trace_action(entry->linked, SLACK_STOP_ACTION); | ||
782 | |||
783 | /* Donator needs to absorb slack execution time */ | ||
784 | reclaim_slack(server); | ||
785 | } else if (server->type != S_SRT) { | ||
786 | requeue_server(server, litmus_clock()); | ||
787 | } | ||
788 | |||
789 | if (server->type == S_HRT && hrt_server->ready) | ||
790 | BUG_ON(head_in_list(&server_slack(server)->list)); | ||
791 | } | ||
792 | |||
793 | static void requeue(struct task_struct *task, rt_domain_t *domain); | ||
794 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, struct task_struct *task); | ||
795 | |||
796 | /* Update the link of a CPU. | ||
797 | * Handles the case where the to-be-linked task is already | ||
798 | * scheduled on a different CPU. The last argument is only needed | ||
799 | * for BE tasks as their servers can't be determined here. | ||
800 | */ | ||
801 | static noinline void link_to_cpu(cpu_entry_t *entry, | ||
802 | struct task_struct* linked, | ||
803 | server_t* next_server) | ||
804 | { | ||
805 | cpu_entry_t *sched; | ||
806 | server_t *tmp_server; | ||
807 | struct task_struct *tmp_task; | ||
808 | int on_cpu; | ||
809 | |||
810 | BUG_ON(linked && !is_realtime(linked)); | ||
811 | BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked)); | ||
812 | BUG_ON(entry->cpu == edf_hsb_release_master); | ||
813 | |||
814 | if (linked) | ||
815 | TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d", | ||
816 | entry->cpu); | ||
817 | |||
818 | /* Currently linked task is set to be unlinked. */ | ||
819 | if (entry->linked) { | ||
820 | unlink_server(entry, 1); | ||
821 | entry->linked->rt_param.linked_on = NO_CPU; | ||
822 | entry->linked = NULL; | ||
823 | } | ||
824 | |||
825 | /* Link new task to CPU. */ | ||
826 | if (linked) { | ||
827 | set_rt_flags(linked, RT_F_RUNNING); | ||
828 | /* Handle task is already scheduled somewhere! */ | ||
829 | on_cpu = linked->rt_param.scheduled_on; | ||
830 | if (on_cpu != NO_CPU) { | ||
831 | sched = &per_cpu(noslack_cpu_entries, on_cpu); | ||
832 | /* This should only happen if not linked already */ | ||
833 | BUG_ON(sched->linked == linked); | ||
834 | |||
835 | if (entry != sched && | ||
836 | sched->linked && is_hrt(sched->linked)) { | ||
837 | /* We are already scheduled on a CPU with an HRT */ | ||
838 | TRACE_TASK_SUB(linked, | ||
839 | "cannot move to scheduled CPU P%d", | ||
840 | sched->cpu); | ||
841 | |||
842 | requeue_server(next_server, litmus_clock()); | ||
843 | requeue(linked, get_rt_domain(entry, linked)); | ||
844 | |||
845 | linked = NULL; | ||
846 | next_server = NULL; | ||
847 | } else if (entry != sched) { | ||
848 | /* Link to the CPU we are scheduled on by swapping | ||
849 | * with that CPU's linked task. | ||
850 | */ | ||
851 | BUG_ON(is_hrt(linked)); | ||
852 | |||
853 | TRACE_TASK_SUB(linked,"already scheduled on P%d", | ||
854 | sched->cpu); | ||
855 | |||
856 | tmp_task = sched->linked; | ||
857 | tmp_server = sched->linked_server; | ||
858 | |||
859 | if (tmp_task) | ||
860 | unlink_server(sched, 0); | ||
861 | |||
862 | linked->rt_param.linked_on = sched->cpu; | ||
863 | sched->linked = linked; | ||
864 | link_server(sched, next_server); | ||
865 | |||
866 | update_cpu_position(sched); | ||
867 | |||
868 | linked = tmp_task; | ||
869 | next_server = tmp_server; | ||
870 | } | ||
871 | } | ||
872 | if (linked) /* Might be NULL due to swap */ | ||
873 | linked->rt_param.linked_on = entry->cpu; | ||
874 | } | ||
875 | entry->linked = linked; | ||
876 | link_server(entry, next_server); | ||
877 | update_cpu_position(entry); | ||
878 | |||
879 | BUG_ON(!entry->linked && entry->linked_server); | ||
880 | |||
881 | if (linked) | ||
882 | TRACE_TASK_SERVER_SUB(linked, next_server, | ||
883 | "linked to %d", entry->cpu); | ||
884 | else | ||
885 | TRACE_SUB("NULL linked to %d", entry->cpu); | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * Grab the local HRT or global SRT or BE domain for the task. | ||
890 | */ | ||
891 | static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry, | ||
892 | struct task_struct *task) | ||
893 | { | ||
894 | if (is_hrt(task)) | ||
895 | return &entry->hrt_server.hrt_domain; | ||
896 | else if (is_srt(task)) | ||
897 | return &srt_domain; | ||
898 | else /* BE */ | ||
899 | return &be_domain; | ||
900 | } | ||
901 | |||
902 | /* | ||
903 | * Ensures the task is not linked anywhere nor present in any ready queues. | ||
904 | */ | ||
905 | static noinline void unlink(struct task_struct* t) | ||
906 | { | ||
907 | cpu_entry_t *entry; | ||
908 | |||
909 | BUG_ON(!t); | ||
910 | |||
911 | if (t->rt_param.linked_on != NO_CPU) { | ||
912 | /* Unlink */ | ||
913 | entry = task_linked_entry(t); | ||
914 | link_to_cpu(entry, NULL, NULL); | ||
915 | } else if (is_queued(t)) { | ||
916 | entry = task_sched_entry(t); | ||
917 | |||
918 | /* A task that is unlinked due to a slack server must be treated | ||
919 | * differently. It is probably queued in a release_queue, but | ||
920 | * a race condition could allow is_released() to return true | ||
921 | * even when the task has not yet been released. Attempting | ||
922 | * to remove the task in this case would be disastrous. | ||
923 | */ | ||
924 | if (entry->scheduled == t && | ||
925 | entry->scheduled_server && /* Can be NULL on task_new */ | ||
926 | entry->scheduled_server->type == S_SLACK) { | ||
927 | |||
928 | TRACE_TASK_SUB(t, "unlinked on slack server"); | ||
929 | |||
930 | } else if (is_released(t, litmus_clock())) { | ||
931 | /* This is an interesting situation: t is scheduled, | ||
932 | * but has already been unlinked. It was re-added to | ||
933 | * a ready queue of some sort but now needs to | ||
934 | * be removed. This usually happens when a job has | ||
935 | * been preempted but completes before it is | ||
936 | * descheduled. | ||
937 | */ | ||
938 | TRACE_TASK_SUB(t, "removing from domain"); | ||
939 | remove(get_rt_domain(entry, t), t); | ||
940 | BUG_ON(is_queued(t)); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | if (head_in_list(&task_data(t)->candidate_list)) { | ||
945 | list_del_init(&task_data(t)->candidate_list); | ||
946 | } | ||
947 | |||
948 | } | ||
949 | |||
950 | /* | ||
951 | * A job generated by a HRT task is eligible if either the job's deadline | ||
952 | * is earlier than the server's next deadline, or the server has zero slack | ||
953 | * time in its current period. | ||
954 | */ | ||
955 | static inline int is_eligible(struct task_struct *task, | ||
956 | hrt_server_t *hrt_server) | ||
957 | { | ||
958 | TRACE_TASK_SUB(task, "%d %d %llu %llu", | ||
959 | hrt_server->ready, hrt_server->no_slack, | ||
960 | hrt_server->server.deadline, | ||
961 | get_deadline(task)); | ||
962 | return hrt_server->ready && !is_server_linked(&hrt_server->server) && | ||
963 | (hrt_server->no_slack || | ||
964 | lt_after_eq(hrt_server->server.deadline, get_deadline(task))); | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * Set the server to release at the closest preceding deadline to time. | ||
969 | */ | ||
970 | static inline void catchup_server(server_t *server, lt_t time) | ||
971 | { | ||
972 | lt_t diff, sub; | ||
973 | |||
974 | diff = time - server->deadline; | ||
975 | sub = diff % server->period; | ||
976 | |||
977 | server_release_at(server, time - sub); | ||
978 | TRACE_SERVER_SUB(server, "catching up to %llu", time); | ||
979 | } | ||
980 | |||
981 | static noinline int catchup_srt_server(struct task_struct *task) | ||
982 | { | ||
983 | int jobs, rv = 0; | ||
984 | lt_t release; | ||
985 | lt_t now = litmus_clock(); | ||
986 | server_t *srt_server = task_srt_server(task); | ||
987 | |||
988 | if (lt_before(srt_server->deadline, now) && | ||
989 | srt_server->job_no > 1) { | ||
990 | /* Calculate the number of jobs behind the server is */ | ||
991 | jobs = lt_subtract(now, srt_server->deadline) / | ||
992 | srt_server->period + 1; | ||
993 | |||
994 | /* Get the new release */ | ||
995 | release = srt_server->release + jobs * srt_server->period; | ||
996 | |||
997 | TRACE_SERVER_SUB(srt_server, "catching up to %llu, job %d", | ||
998 | release, srt_server->job_no + jobs); | ||
999 | |||
1000 | BUG_ON(jobs < 1); | ||
1001 | |||
1002 | /* Update server state */ | ||
1003 | server_release_at(srt_server, release); | ||
1004 | srt_server->job_no += jobs - 1; | ||
1005 | |||
1006 | /* Force task to take characteristics of server */ | ||
1007 | tsk_rt(task)->job_params.release = srt_server->release; | ||
1008 | tsk_rt(task)->job_params.deadline = srt_server->deadline; | ||
1009 | |||
1010 | rv = 1; | ||
1011 | |||
1012 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1013 | |||
1014 | } else if (lt_before(srt_server->deadline, now) && | ||
1015 | srt_server->job_no <= 1) { | ||
1016 | |||
1017 | server_release_at(srt_server, get_release(task)); | ||
1018 | srt_server->job_no = task_job_no(task); | ||
1019 | } | ||
1020 | |||
1021 | BUG_ON(srt_server->job_no == 0); | ||
1022 | |||
1023 | return rv; | ||
1024 | } | ||
1025 | |||
1026 | /* | ||
1027 | * If the server is eligible, return the next eligible job. If the server is | ||
1028 | * ineligible or there are no eligible jobs, returns NULL. This will re-release | ||
1029 | * any servers that are behind. | ||
1030 | */ | ||
1031 | static noinline struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server) | ||
1032 | { | ||
1033 | lt_t now = litmus_clock(); | ||
1034 | lt_t dead, slack, budget; | ||
1035 | struct task_struct *task = __peek_ready(&hrt_server->hrt_domain); | ||
1036 | |||
1037 | /* Catch up server if it is initialized, not running, and late */ | ||
1038 | if (check_hrt_server_initialized(hrt_server) && | ||
1039 | !is_server_linked(&hrt_server->server)) { | ||
1040 | |||
1041 | dead = hrt_server->server.deadline; | ||
1042 | budget = hrt_server->server.budget; | ||
1043 | slack = lt_subtract(dead, budget); | ||
1044 | |||
1045 | TRACE_SERVER_SUB(&hrt_server->server, "dead: %llu, budget: %llu" | ||
1046 | "now: %llu, slack: %llu", | ||
1047 | TIME(dead), TIME(budget), TIME(now), TIME(slack)); | ||
1048 | |||
1049 | if (!head_in_list(&hrt_server->server.release_list) && | ||
1050 | lt_before_eq(dead, now)) { | ||
1051 | /* The server missed a release */ | ||
1052 | catchup_server(&hrt_server->server, now); | ||
1053 | TRACE_SERVER_SUB(&hrt_server->server, "now ready"); | ||
1054 | hrt_server->ready = 1; | ||
1055 | remove_slack(server_slack(&hrt_server->server)); | ||
1056 | hrt_server->no_slack = 0; | ||
1057 | |||
1058 | slack = lt_subtract(hrt_server->server.deadline, | ||
1059 | hrt_server->server.budget); | ||
1060 | |||
1061 | ////sched_trace_action(task, SERVER_RELEASED_ACTION); | ||
1062 | } | ||
1063 | |||
1064 | /* If the slack timer is active, this is not necessary */ | ||
1065 | if (!hrtimer_active(&hrt_server->slack_timer) && hrt_server->ready) { | ||
1066 | if (lt_before_eq(slack, now) && !hrt_server->no_slack) { | ||
1067 | /* The server missed the shift to no slack */ | ||
1068 | TRACE_SERVER_SUB(&hrt_server->server, "no slack: %llu", | ||
1069 | TIME(slack)); | ||
1070 | hrt_server->no_slack = 1; | ||
1071 | ////sched_trace_action(task, NO_SLACK_ACTION); | ||
1072 | } else { | ||
1073 | slack_timer_arm(hrt_server); | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | } else { | ||
1078 | TRACE_SERVER_SUB(&hrt_server->server, "%llu %d %llu %d %d", | ||
1079 | hrt_server->server.deadline, | ||
1080 | is_server_linked(&hrt_server->server), | ||
1081 | now, check_hrt_server_initialized(hrt_server), | ||
1082 | !is_server_linked(&hrt_server->server)); | ||
1083 | } | ||
1084 | |||
1085 | if (!hrt_server->server.budget || | ||
1086 | (task && !is_eligible(task, hrt_server))) { | ||
1087 | |||
1088 | if (!hrt_server->server.budget && | ||
1089 | !head_in_list(&hrt_server->server.release_list)) { | ||
1090 | TRACE_SERVER_SUB(&hrt_server->server, "requeing"); | ||
1091 | catchup_server(&hrt_server->server, now); | ||
1092 | requeue_server(&hrt_server->server, now); | ||
1093 | slack_timer_arm(hrt_server); | ||
1094 | } | ||
1095 | |||
1096 | if (task) { | ||
1097 | TRACE_TASK_SUB(task, "not eligible, budget: %llu", | ||
1098 | TIME(hrt_server->server.budget)); | ||
1099 | } | ||
1100 | task = NULL; | ||
1101 | |||
1102 | /* Donate slack if we have nothing to schedule */ | ||
1103 | if (hrt_server->ready && hrt_server->no_slack) { | ||
1104 | check_donate_slack(&hrt_server->server, NULL); | ||
1105 | } | ||
1106 | } | ||
1107 | |||
1108 | return task; | ||
1109 | } | ||
1110 | |||
1111 | /* | ||
1112 | * This will catch up the SRT's server if it is behind. | ||
1113 | */ | ||
1114 | static noinline struct task_struct* next_eligible_srt(void) | ||
1115 | { | ||
1116 | int done = 0; | ||
1117 | struct task_struct *next_srt; | ||
1118 | |||
1119 | while (!done) { | ||
1120 | next_srt = __peek_ready(&srt_domain); | ||
1121 | |||
1122 | /* A blocking task might pollute the SRT domain if the | ||
1123 | * task blocked while it was being run by a slack server. | ||
1124 | * Remove and ignore this task. | ||
1125 | */ | ||
1126 | while (next_srt && (get_rt_flags(next_srt) == RT_F_BLOCK || | ||
1127 | unlikely(!is_realtime(next_srt)) || | ||
1128 | tsk_rt(next_srt)->linked_on != NO_CPU)) { | ||
1129 | TRACE_TASK_SUB(next_srt, "removing finished task"); | ||
1130 | remove(&srt_domain, next_srt); | ||
1131 | next_srt = __peek_ready(&srt_domain); | ||
1132 | } | ||
1133 | |||
1134 | /* If the task blocked for awhile or has otherwise not been | ||
1135 | * accessed, its server could have fallen behind. | ||
1136 | */ | ||
1137 | if (next_srt) { | ||
1138 | done = !catchup_srt_server(next_srt); | ||
1139 | |||
1140 | /* The parameters were modified. Re-insert the task. */ | ||
1141 | if (!done) { | ||
1142 | remove(&srt_domain, next_srt); | ||
1143 | __add_ready(&srt_domain, next_srt); | ||
1144 | } else if (is_server_linked(task_srt_server(next_srt))){ | ||
1145 | remove(&srt_domain, next_srt); | ||
1146 | done = 0; | ||
1147 | } | ||
1148 | } else { | ||
1149 | done = 1; | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | return next_srt; | ||
1154 | } | ||
1155 | |||
1156 | static inline server_t* next_be_server(void) | ||
1157 | { | ||
1158 | struct bheap_node *hn = bheap_peek(server_order, &be_ready_servers); | ||
1159 | return (hn) ? hn->value : NULL; | ||
1160 | } | ||
1161 | |||
1162 | static noinline server_t* next_eligible_be_server(void) | ||
1163 | { | ||
1164 | server_t *be_server = next_be_server(); | ||
1165 | lt_t now = litmus_clock(); | ||
1166 | |||
1167 | /* Catch up any late be servers. This happens when the servers could | ||
1168 | * not find tasks to schedule or if the system is overutilized. | ||
1169 | */ | ||
1170 | while (be_server && (lt_before_eq(be_server->deadline, now) || | ||
1171 | is_server_linked(be_server))) { | ||
1172 | if (!be_server->deadline) { | ||
1173 | TRACE_SERVER_SUB(be_server, "not intialized"); | ||
1174 | return NULL; | ||
1175 | } | ||
1176 | bheap_delete(server_order, &be_ready_servers, | ||
1177 | be_server->hn); | ||
1178 | |||
1179 | if (is_server_linked(be_server)) { | ||
1180 | TRACE_SERVER_SUB(be_server, "linked"); | ||
1181 | be_server = next_be_server(); | ||
1182 | return NULL; | ||
1183 | } | ||
1184 | |||
1185 | catchup_server(be_server, now); | ||
1186 | check_donate_slack(be_server, NULL); | ||
1187 | bheap_insert(server_order, &be_ready_servers, | ||
1188 | be_server->hn); | ||
1189 | be_server = next_be_server(); | ||
1190 | TRACE_SERVER_SUB(be_server, "catching up BE server"); | ||
1191 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); /* Release */ | ||
1192 | } | ||
1193 | |||
1194 | if (be_server && lt_before(now, be_server->release)) { | ||
1195 | TRACE_SERVER_SUB(be_server, "not released"); | ||
1196 | be_server = NULL; | ||
1197 | } | ||
1198 | |||
1199 | if (be_server) { | ||
1200 | TRACE_SERVER_SUB(be_server, "dead: %llu, rel: %llu, budget: %llu", | ||
1201 | be_server->deadline, be_server->release, | ||
1202 | be_server->budget); | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | return be_server; | ||
1207 | } | ||
1208 | |||
1209 | /* | ||
1210 | * Adds a task to the appropriate queue (ready / release) in a domain. | ||
1211 | */ | ||
1212 | static noinline void requeue(struct task_struct *task, rt_domain_t *domain) | ||
1213 | { | ||
1214 | lt_t now = litmus_clock(); | ||
1215 | int was_added; | ||
1216 | |||
1217 | BUG_ON(!is_realtime(task)); | ||
1218 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1219 | list_del_init(&task_data(task)->candidate_list); | ||
1220 | } | ||
1221 | |||
1222 | check_slack_candidate(task); | ||
1223 | |||
1224 | if (is_queued(task)) { | ||
1225 | TRACE_TASK_SUB(task, "not requeueing, already queued"); | ||
1226 | } else if (is_released(task, now)) { | ||
1227 | TRACE_TASK_SUB(task, "requeuing on ready %llu %llu %llu %llu", | ||
1228 | get_release(task), get_deadline(task), | ||
1229 | get_rt_period(task), now); | ||
1230 | __add_ready(domain, task); | ||
1231 | } else { | ||
1232 | /* Task needs to wait until it is released */ | ||
1233 | TRACE_TASK_SUB(task, "requeuing on release"); | ||
1234 | |||
1235 | was_added = add_release(domain, task); | ||
1236 | |||
1237 | /* The release time happened before we added ourselves | ||
1238 | * to the heap. We can now add to ready. | ||
1239 | */ | ||
1240 | if (!was_added) { | ||
1241 | TRACE_TASK_SUB(task, "missed release, going to ready"); | ||
1242 | __add_ready(domain, task); | ||
1243 | } | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | static inline void earlier_server_task(server_t *first, | ||
1248 | struct task_struct *first_task, | ||
1249 | server_t *second, | ||
1250 | struct task_struct *second_task, | ||
1251 | server_t **server, | ||
1252 | struct task_struct **task) | ||
1253 | { | ||
1254 | if (!first || | ||
1255 | (second && lt_before_eq(second->deadline, first->deadline))) { | ||
1256 | *server = second; | ||
1257 | *task = second_task; | ||
1258 | } else { | ||
1259 | *server = first; | ||
1260 | *task = first_task; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | /* | ||
1265 | * Set server and task to the next server and task respectively. | ||
1266 | * If entry is not null, the next server will see if it can schedule | ||
1267 | * entry's linked task. | ||
1268 | */ | ||
1269 | static void next_global_task(cpu_entry_t *entry, | ||
1270 | server_t **next_server, | ||
1271 | struct task_struct **next_task) | ||
1272 | { | ||
1273 | struct task_struct *next_srt, *next_be, *next_slack; | ||
1274 | server_t *be_server, *slack_server, *srt_server; | ||
1275 | |||
1276 | *next_server = NULL; | ||
1277 | *next_task = NULL; | ||
1278 | |||
1279 | next_srt = next_eligible_srt(); | ||
1280 | srt_server = (next_srt) ? task_srt_server(next_srt) : NULL; | ||
1281 | |||
1282 | next_be = __peek_ready(&be_domain); | ||
1283 | be_server = next_eligible_be_server(); | ||
1284 | |||
1285 | next_slack = next_eligible_slack(); | ||
1286 | slack_server = next_eligible_slack_server(); | ||
1287 | |||
1288 | TRACE_SUB("be_server: %d, next_be: %d, next_srt: %d, slack_server: %d " | ||
1289 | "next_slack: %d", (be_server) ? be_server->id : -1, | ||
1290 | (next_be) ? next_be->pid : -1, | ||
1291 | (next_srt) ? next_srt->pid : -1, | ||
1292 | (slack_server) ? slack_server->id : -1, | ||
1293 | (next_slack) ? next_slack->pid : -1); | ||
1294 | |||
1295 | /* Check if the servers can schedule the task linked to entry */ | ||
1296 | if (entry && entry->linked) { | ||
1297 | |||
1298 | if (entry->linked_server->type == S_BE && | ||
1299 | (!next_be || | ||
1300 | lt_before(get_release(entry->linked), | ||
1301 | get_release(next_be)))) { | ||
1302 | |||
1303 | next_be = entry->linked; | ||
1304 | } else if (entry->linked_server->type == S_SLACK && | ||
1305 | (!next_slack || | ||
1306 | lt_before(get_deadline(entry->linked), | ||
1307 | get_deadline(next_slack)))) { | ||
1308 | |||
1309 | next_slack = entry->linked; | ||
1310 | } | ||
1311 | } | ||
1312 | |||
1313 | /* Remove tasks without servers and vice versa from contention */ | ||
1314 | if (!next_be || !be_server) { | ||
1315 | next_be = NULL; | ||
1316 | be_server = NULL; | ||
1317 | } | ||
1318 | if (!next_slack || !slack_server) { | ||
1319 | next_slack = NULL; | ||
1320 | slack_server = NULL; | ||
1321 | } | ||
1322 | |||
1323 | /* Favor BE servers. If we don't, then a BE server might lose | ||
1324 | * out to its own slack. | ||
1325 | */ | ||
1326 | if (slack_server && be_server && | ||
1327 | be_server->deadline == slack_server->deadline) { | ||
1328 | next_slack = NULL; | ||
1329 | slack_server = NULL; | ||
1330 | } | ||
1331 | |||
1332 | /* There is probably a better way to do this */ | ||
1333 | earlier_server_task(srt_server, next_srt, | ||
1334 | be_server, next_be, | ||
1335 | next_server, next_task); | ||
1336 | earlier_server_task(*next_server, *next_task, | ||
1337 | slack_server, next_slack, | ||
1338 | next_server, next_task); | ||
1339 | |||
1340 | //BUG_ON(*next_server && lt_before(litmus_clock(), *next_server->release)); | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * Remove the task and server from any ready queues. | ||
1345 | */ | ||
1346 | static void remove_from_ready(server_t *server, struct task_struct *task, | ||
1347 | cpu_entry_t *entry) | ||
1348 | { | ||
1349 | server_t *slack; | ||
1350 | rt_domain_t *domain; | ||
1351 | BUG_ON(!server); | ||
1352 | BUG_ON(!entry); | ||
1353 | BUG_ON(!task); | ||
1354 | |||
1355 | if (server->type == S_SLACK) { | ||
1356 | TRACE_SERVER_SUB(server, "removed from slack list"); | ||
1357 | list_del_init(&server->list); | ||
1358 | |||
1359 | /* Remove from consideration of BE servers */ | ||
1360 | if (is_be(task) && is_queued(task)) { | ||
1361 | TRACE_TASK_SUB(task, "BE removed from ready"); | ||
1362 | remove(&be_domain, task); | ||
1363 | } | ||
1364 | |||
1365 | /* Remove from consideration of slack servers */ | ||
1366 | if (head_in_list(&task_data(task)->candidate_list)) { | ||
1367 | TRACE_TASK_SUB(task, "deleting candidate"); | ||
1368 | list_del_init(&task_data(task)->candidate_list); | ||
1369 | } | ||
1370 | } else { | ||
1371 | slack = server_slack(server); | ||
1372 | if (slack && head_in_list(&slack->list)) { | ||
1373 | remove_slack(slack); | ||
1374 | } | ||
1375 | if (server->type == S_BE) { | ||
1376 | TRACE_SERVER_SUB(server, "server removed from ready"); | ||
1377 | BUG_ON(!server->hn); | ||
1378 | bheap_delete(server_order, &be_ready_servers, | ||
1379 | server->hn); | ||
1380 | } | ||
1381 | if (is_queued(task)) { | ||
1382 | domain = get_rt_domain(entry, task); | ||
1383 | BUG_ON(!domain); | ||
1384 | TRACE_TASK_SUB(task, "removed from ready"); | ||
1385 | remove(domain, task); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | BUG_ON(!task_data(task)); | ||
1390 | } | ||
1391 | |||
1392 | static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int); | ||
1393 | |||
1394 | /* | ||
1395 | * Finds and links the next server and task to an entry with no linked task. | ||
1396 | */ | ||
1397 | static void edf_hsb_pick_next(cpu_entry_t *entry) | ||
1398 | { | ||
1399 | struct task_struct *next_task, *linked; | ||
1400 | server_t *next_server; | ||
1401 | |||
1402 | BUG_ON(entry->linked); | ||
1403 | |||
1404 | next_task = next_eligible_hrt(&entry->hrt_server); | ||
1405 | if (next_task) | ||
1406 | next_server = &entry->hrt_server.server; | ||
1407 | else | ||
1408 | next_global_task(NULL, &next_server, &next_task); | ||
1409 | |||
1410 | |||
1411 | if (next_task) { | ||
1412 | remove_from_ready(next_server, next_task, entry); | ||
1413 | check_for_slack_preempt(next_task, next_server, entry, 1); | ||
1414 | TRACE_TASK_SERVER_SUB(next_task, next_server, | ||
1415 | "removing and picked"); | ||
1416 | |||
1417 | /* A slack preemption could cause something that was already | ||
1418 | * running to be 'swapped' to this CPU in link_to_cpu. | ||
1419 | */ | ||
1420 | if (entry->linked) { | ||
1421 | linked = entry->linked; | ||
1422 | unlink(entry->linked); | ||
1423 | requeue(linked, get_rt_domain(entry, linked)); | ||
1424 | TRACE_TASK_SUB(linked, "preempted next pick"); | ||
1425 | } | ||
1426 | link_to_cpu(entry, next_task, next_server); | ||
1427 | } | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * Preempt the currently running server and task with new ones. | ||
1432 | * It is possible that either only the server or the task is different here. | ||
1433 | */ | ||
1434 | static void preempt(cpu_entry_t *entry, struct task_struct *next, | ||
1435 | server_t *next_server, int slack_resched) | ||
1436 | { | ||
1437 | struct task_struct *linked; | ||
1438 | rt_domain_t *domain; | ||
1439 | |||
1440 | TRACE_TASK_SERVER_SUB(next, next_server, | ||
1441 | "preempting on P%d", entry->cpu); | ||
1442 | |||
1443 | remove_from_ready(next_server, next, entry); | ||
1444 | |||
1445 | check_for_slack_preempt(next, next_server, entry, slack_resched); | ||
1446 | linked = entry->linked; | ||
1447 | link_to_cpu(entry, next, next_server); | ||
1448 | |||
1449 | /* No need for this if only the server was preempted */ | ||
1450 | if (!linked || linked != entry->linked) { | ||
1451 | if (linked) { | ||
1452 | domain = get_rt_domain(entry, linked); | ||
1453 | requeue(linked, domain); | ||
1454 | } | ||
1455 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1456 | } | ||
1457 | } | ||
1458 | |||
1459 | /* | ||
1460 | * Causes a preemption if: | ||
1461 | * 1. task is being run by a slack server on a different CPU | ||
1462 | * 2. slack donated by server is running a task on a different CPU | ||
1463 | */ | ||
1464 | static noinline void check_for_slack_preempt(struct task_struct *task, | ||
1465 | server_t *server, | ||
1466 | cpu_entry_t *next_entry, | ||
1467 | int resched) | ||
1468 | { | ||
1469 | cpu_entry_t *entry = NULL; | ||
1470 | server_t *slack = server_slack(server); | ||
1471 | struct task_struct *slack_task; | ||
1472 | |||
1473 | /* The task is currently being run by another server */ | ||
1474 | if (tsk_rt(task)->linked_on != NO_CPU) { | ||
1475 | entry = task_linked_entry(task); | ||
1476 | |||
1477 | if (entry != next_entry) { | ||
1478 | TRACE_TASK_SUB(task, "was on P%d", entry->cpu); | ||
1479 | |||
1480 | unlink(task); | ||
1481 | |||
1482 | /* if (resched) { */ | ||
1483 | /* edf_hsb_pick_next(entry); */ | ||
1484 | /* preempt_if_preemptable(entry->scheduled, entry->cpu); */ | ||
1485 | /* } */ | ||
1486 | } | ||
1487 | } | ||
1488 | |||
1489 | /* The server's slack is currently being run */ | ||
1490 | if (slack && is_server_linked(slack)) { | ||
1491 | entry = &per_cpu(noslack_cpu_entries, slack->cpu); | ||
1492 | slack_task = server_task(slack); | ||
1493 | |||
1494 | unlink(slack_task); | ||
1495 | remove_slack(slack); | ||
1496 | requeue(slack_task, get_rt_domain(entry, slack_task)); | ||
1497 | |||
1498 | if (entry != next_entry && resched) { | ||
1499 | TRACE_SERVER_SUB(slack, "was on P%d", entry->cpu); | ||
1500 | /* Force a reschedule */ | ||
1501 | edf_hsb_pick_next(entry); | ||
1502 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1503 | } else { | ||
1504 | /* This can only happen on a preemption. If a preemption | ||
1505 | * happens, the task will be requeued elsewhere. | ||
1506 | * Obviously the next task has already been chosen. | ||
1507 | */ | ||
1508 | TRACE_SERVER_SUB(slack, "was on local P%d", entry->cpu); | ||
1509 | } | ||
1510 | } | ||
1511 | } | ||
1512 | |||
1513 | /* | ||
1514 | * Check for any necessary non-hrt preemptions. | ||
1515 | */ | ||
1516 | static void check_for_global_preempt(void) | ||
1517 | { | ||
1518 | cpu_entry_t *entry, *sched; | ||
1519 | server_t *next_server; | ||
1520 | int on_cpu; | ||
1521 | struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */ | ||
1522 | |||
1523 | for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) { | ||
1524 | /* HRT cpus should not be in this heap */ | ||
1525 | BUG_ON(entry->linked && is_hrt(entry->linked)); | ||
1526 | |||
1527 | next_global_task(entry, &next_server, &next_task); | ||
1528 | |||
1529 | if (!next_server) | ||
1530 | break; | ||
1531 | |||
1532 | /* Preempt only if we have an earlier deadline */ | ||
1533 | if (entry->linked && | ||
1534 | !lt_before(next_server->deadline, | ||
1535 | entry->linked_server->deadline)) { | ||
1536 | break; | ||
1537 | } | ||
1538 | |||
1539 | /* If we are scheduled on another CPU, the link code | ||
1540 | * will force us to link to that CPU and try and link | ||
1541 | * that CPU's task to this CPU. This is impossible | ||
1542 | * if that CPU has linked HRT tasks which cannot | ||
1543 | * migrate. | ||
1544 | */ | ||
1545 | on_cpu = next_task->rt_param.scheduled_on; | ||
1546 | if (on_cpu != NO_CPU) { | ||
1547 | sched = &per_cpu(noslack_cpu_entries, on_cpu); | ||
1548 | |||
1549 | if (sched != entry && sched->linked && | ||
1550 | is_hrt(sched->linked)) { | ||
1551 | |||
1552 | TRACE_TASK_SUB(next_task, | ||
1553 | "Already on P%d", | ||
1554 | sched->cpu); | ||
1555 | break; | ||
1556 | } | ||
1557 | } | ||
1558 | |||
1559 | /* We do not reschedule if this causes a slack preemption | ||
1560 | * because we will detect if we should reschedule on the | ||
1561 | * next iteration of the loop. | ||
1562 | */ | ||
1563 | preempt(entry, next_task, next_server, | ||
1564 | 0 /* Don't reschedule on a slack preemption */); | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * Correct local link after a change to the local HRT domain. | ||
1570 | */ | ||
1571 | static void check_for_hrt_preempt(cpu_entry_t *entry) | ||
1572 | { | ||
1573 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1574 | struct task_struct *next_hrt = next_eligible_hrt(hrt_server); | ||
1575 | |||
1576 | if (next_hrt && | ||
1577 | (!entry->linked || !is_hrt(entry->linked) || | ||
1578 | !is_eligible(entry->linked, hrt_server) || | ||
1579 | edf_preemption_needed(&hrt_server->hrt_domain, entry->linked))) { | ||
1580 | |||
1581 | preempt(entry, next_hrt, &hrt_server->server, 1); | ||
1582 | |||
1583 | } else { | ||
1584 | TRACE_SERVER_SUB(&hrt_server->server, "not HRT preempting"); | ||
1585 | } | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Assumes called with local irqs disabled. | ||
1590 | */ | ||
1591 | static void job_arrival(struct task_struct *task, cpu_entry_t *entry) | ||
1592 | { | ||
1593 | int was_empty; | ||
1594 | |||
1595 | BUG_ON(task_cpu(task) == NO_CPU); | ||
1596 | |||
1597 | TRACE_TASK_SUB(task, "arriving on P%d", entry->cpu); | ||
1598 | |||
1599 | if (is_hrt(task)) { | ||
1600 | requeue(task, &entry->hrt_server.hrt_domain); | ||
1601 | check_for_hrt_preempt(entry); | ||
1602 | } else if (is_srt(task)) { | ||
1603 | requeue(task, &srt_domain); | ||
1604 | check_for_global_preempt(); | ||
1605 | } else /* BE */ { | ||
1606 | was_empty = !__jobs_pending(&be_domain); | ||
1607 | requeue(task, &be_domain); | ||
1608 | |||
1609 | /* Only way this could cause a preemption is if an eligible | ||
1610 | * BE server could not queue up a task. | ||
1611 | */ | ||
1612 | if (was_empty && __jobs_pending(&be_domain)) | ||
1613 | check_for_global_preempt(); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | /****************************************************************************** | ||
1618 | * Timer methods | ||
1619 | ******************************************************************************/ | ||
1620 | |||
1621 | /* | ||
1622 | * Merges a group of released HRT tasks into a ready queue and checks | ||
1623 | * for preeemptions. | ||
1624 | */ | ||
1625 | static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1626 | { | ||
1627 | unsigned long flags; | ||
1628 | struct task_struct *first; | ||
1629 | cpu_entry_t *entry; | ||
1630 | |||
1631 | raw_spin_lock_irqsave(global_lock, flags); | ||
1632 | |||
1633 | first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value; | ||
1634 | entry = task_sched_entry(first); | ||
1635 | |||
1636 | BUG_ON(!first || !is_hrt(first)); | ||
1637 | TRACE_TASK(first, "HRT tasks released at %llu on P%d\n", | ||
1638 | TIME(litmus_clock()), task_cpu(first)); | ||
1639 | |||
1640 | __merge_ready(domain, tasks); | ||
1641 | check_for_hrt_preempt(entry); | ||
1642 | |||
1643 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1644 | } | ||
1645 | |||
1646 | /* | ||
1647 | * Merges a group of released tasks into a ready queue and checks to see | ||
1648 | * if scheduled needs to be called. | ||
1649 | */ | ||
1650 | static void release_srt_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1651 | { | ||
1652 | unsigned long flags; | ||
1653 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1654 | |||
1655 | raw_spin_lock_irqsave(global_lock, flags); | ||
1656 | |||
1657 | TRACE_TASK(first, "SRT tasks released at %llu\n", TIME(litmus_clock())); | ||
1658 | |||
1659 | __merge_ready(domain, tasks); | ||
1660 | check_for_global_preempt(); | ||
1661 | |||
1662 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1663 | } | ||
1664 | |||
1665 | /* | ||
1666 | * Merges a group of released tasks into a ready queue and checks to see | ||
1667 | * if scheduled needs to be called. | ||
1668 | */ | ||
1669 | static void release_be_jobs(rt_domain_t *domain, struct bheap *tasks) | ||
1670 | { | ||
1671 | unsigned long flags; | ||
1672 | int was_empty; | ||
1673 | struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value); | ||
1674 | |||
1675 | TRACE_TASK(first, "BE tasks released at %llu\n", TIME(litmus_clock()));; | ||
1676 | |||
1677 | raw_spin_lock_irqsave(global_lock, flags); | ||
1678 | |||
1679 | was_empty = !__jobs_pending(domain); | ||
1680 | __merge_ready(domain, tasks); | ||
1681 | if (was_empty) { | ||
1682 | /* Only way this could cause a preemption is if an BE server | ||
1683 | * could not find a task to run. | ||
1684 | */ | ||
1685 | check_for_global_preempt(); | ||
1686 | } | ||
1687 | |||
1688 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1689 | } | ||
1690 | |||
1691 | static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer) | ||
1692 | { | ||
1693 | unsigned long flags; | ||
1694 | hrt_server_t *server = container_of(timer, hrt_server_t, slack_timer); | ||
1695 | cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); | ||
1696 | |||
1697 | raw_spin_lock_irqsave(global_lock, flags); | ||
1698 | |||
1699 | TRACE_TIMER("slack timer fired for P%d", entry->cpu); | ||
1700 | BUG_ON(!server->ready); | ||
1701 | ////sched_trace_action(entry->linked, NO_SLACK_ACTION); | ||
1702 | |||
1703 | /* Set new state of entry */ | ||
1704 | server->no_slack = 1; | ||
1705 | check_for_hrt_preempt(entry); | ||
1706 | |||
1707 | /* Donate slack if the HRT server cannot run anything */ | ||
1708 | if (!entry->linked || !is_hrt(entry->linked)) { | ||
1709 | check_donate_slack(&server->server, NULL); | ||
1710 | check_for_global_preempt(); | ||
1711 | } | ||
1712 | |||
1713 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1714 | |||
1715 | return HRTIMER_NORESTART; | ||
1716 | } | ||
1717 | |||
1718 | static void job_completion(cpu_entry_t *entry, struct task_struct* task) | ||
1719 | { | ||
1720 | server_t *server = entry->linked_server; | ||
1721 | set_rt_flags(task, RT_F_SLEEP); | ||
1722 | |||
1723 | TRACE_TASK_SUB(task, "completed"); | ||
1724 | |||
1725 | unlink(task); | ||
1726 | check_donate_slack(server, task); | ||
1727 | |||
1728 | /* If a slack server completed an SRT task, the work for the | ||
1729 | * next job arrival has already been done. | ||
1730 | */ | ||
1731 | if (server->type == S_SLACK && is_srt(task)) { | ||
1732 | tsk_rt(task)->job_params.job_no++; | ||
1733 | sched_trace_task_release(task); | ||
1734 | TRACE_TASK_SERVER_SUB(task, server, "catching up SRT, " | ||
1735 | "rel: %llu, dead: %llu", | ||
1736 | TIME(get_release(task)), | ||
1737 | TIME(get_deadline(task))); | ||
1738 | } else if (server->type == S_SRT) { | ||
1739 | /* If the task is behind the server it must release immediately, | ||
1740 | * leaving its release time and deadline unchanged. | ||
1741 | */ | ||
1742 | if (server->job_no > tsk_rt(task)->job_params.job_no) { | ||
1743 | TRACE_TASK_SUB(task, "catching up"); | ||
1744 | tsk_rt(task)->job_params.job_no++; | ||
1745 | } else { | ||
1746 | /* Otherwise release them both */ | ||
1747 | prepare_for_next_period(task); | ||
1748 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1749 | TIME(get_release(task)), | ||
1750 | TIME(get_deadline(task))); | ||
1751 | server_release(server); | ||
1752 | } | ||
1753 | } else { | ||
1754 | prepare_for_next_period(task); | ||
1755 | TRACE_TASK_SUB(task, "next release: %llu, dead: %llu", | ||
1756 | TIME(get_release(task)), | ||
1757 | TIME(get_deadline(task))); | ||
1758 | } | ||
1759 | |||
1760 | if (is_released(task, litmus_clock())) | ||
1761 | sched_trace_task_release(task); | ||
1762 | |||
1763 | /* Don't requeue a blocking task */ | ||
1764 | if (is_running(task)) | ||
1765 | job_arrival(task, entry); | ||
1766 | |||
1767 | sched_trace_task_completion(task, 1); | ||
1768 | } | ||
1769 | |||
1770 | /* | ||
1771 | * Assumes called with local irqs disabled. | ||
1772 | */ | ||
1773 | static void server_completed(server_t *server, struct task_struct *task) | ||
1774 | { | ||
1775 | hrt_server_t *hrt_server; | ||
1776 | cpu_entry_t *entry = task_linked_entry(task); | ||
1777 | |||
1778 | BUG_ON(entry->linked != task); | ||
1779 | BUG_ON(entry->linked_server != server); | ||
1780 | |||
1781 | if (server->type == S_SRT) { | ||
1782 | TRACE_TASK_SUB(task, "must wait on server"); | ||
1783 | |||
1784 | /* The job must now take the priority and release time | ||
1785 | * of the next server. We do this so that we can still | ||
1786 | * use rt_domain and other handy methods to still work | ||
1787 | * with SRT jobs. Because this can ONLY happen if the | ||
1788 | * task's job number gets behind the server's, we can | ||
1789 | * easily detect the job catching up later. | ||
1790 | */ | ||
1791 | tsk_rt(task)->job_params.release = server->deadline; | ||
1792 | tsk_rt(task)->job_params.deadline = server->deadline + | ||
1793 | get_rt_period(task); | ||
1794 | TRACE_TASK_SUB(task, "waiting, new dead: %llu, new rel: %llu", | ||
1795 | TIME(get_deadline(task)), | ||
1796 | TIME(get_release(task))); | ||
1797 | |||
1798 | } else if (server->type == S_HRT) { | ||
1799 | /* Update state of HRT server */ | ||
1800 | hrt_server = container_of(server, hrt_server_t, server); | ||
1801 | hrt_server->ready = 0; | ||
1802 | TRACE_SERVER_SUB(server, "P%d no longer ready", entry->cpu); | ||
1803 | |||
1804 | if (hrtimer_active(&hrt_server->slack_timer)) | ||
1805 | slack_timer_cancel(hrt_server); | ||
1806 | } | ||
1807 | |||
1808 | if (server->type != S_SLACK) { | ||
1809 | server_release(server); | ||
1810 | } | ||
1811 | |||
1812 | sched_trace_action(task, SERVER_COMPLETED_ACTION); | ||
1813 | |||
1814 | unlink(task); | ||
1815 | requeue(task, get_rt_domain(entry, task)); | ||
1816 | |||
1817 | /* We know this CPU needs to pick its next task */ | ||
1818 | edf_hsb_pick_next(entry); | ||
1819 | |||
1820 | /* Only cause a reschedule if something new was scheduled. A task | ||
1821 | * could merely have swapped servers. | ||
1822 | */ | ||
1823 | if (entry->linked != task) | ||
1824 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
1825 | else | ||
1826 | entry->scheduled_server = entry->linked_server; | ||
1827 | } | ||
1828 | |||
1829 | static void hrt_server_released(server_t *server) | ||
1830 | { | ||
1831 | hrt_server_t *hrt_server = container_of(server, hrt_server_t, server); | ||
1832 | cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server); | ||
1833 | |||
1834 | BUG_ON(hrtimer_active(&hrt_server->slack_timer)); | ||
1835 | TRACE_SERVER_SUB(server, "HRT server released on P%d", entry->cpu); | ||
1836 | |||
1837 | hrt_server->no_slack = 0; | ||
1838 | hrt_server->ready = 1; | ||
1839 | remove_slack(server_slack(&hrt_server->server)); | ||
1840 | |||
1841 | check_for_hrt_preempt(entry); | ||
1842 | |||
1843 | /* Ensure slack timer is only running if the current | ||
1844 | * job is not HRT. | ||
1845 | */ | ||
1846 | if (entry->linked && is_hrt(entry->linked)) | ||
1847 | slack_timer_cancel(hrt_server); | ||
1848 | else | ||
1849 | slack_timer_arm(hrt_server); | ||
1850 | } | ||
1851 | |||
1852 | static void servers_released(struct list_head *servers) | ||
1853 | { | ||
1854 | int was_be = 0; | ||
1855 | unsigned long flags; | ||
1856 | struct list_head *pos, *safe; | ||
1857 | server_t *server; | ||
1858 | |||
1859 | raw_spin_lock_irqsave(global_lock, flags); | ||
1860 | |||
1861 | ////sched_trace_action(NULL, SERVER_RELEASED_ACTION); | ||
1862 | TRACE_TIMER("Servers released"); | ||
1863 | |||
1864 | list_for_each_safe(pos, safe, servers) { | ||
1865 | server = list_entry(pos, server_t, release_list); | ||
1866 | |||
1867 | list_del_init(pos); | ||
1868 | |||
1869 | if (server->type == S_BE) { | ||
1870 | check_donate_slack(server, NULL); | ||
1871 | was_be = 1; | ||
1872 | BUG_ON(bheap_node_in_heap(server->hn)); | ||
1873 | TRACE_SERVER_SUB(server, "inserting BE server"); | ||
1874 | bheap_insert(server_order, &be_ready_servers, | ||
1875 | server->hn); | ||
1876 | check_donate_slack(server, NULL); | ||
1877 | } else { /* HRT server */ | ||
1878 | hrt_server_released(server); | ||
1879 | } | ||
1880 | } | ||
1881 | |||
1882 | if (was_be) | ||
1883 | check_for_global_preempt(); | ||
1884 | |||
1885 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
1886 | } | ||
1887 | |||
1888 | /****************************************************************************** | ||
1889 | * Server management methods | ||
1890 | ******************************************************************************/ | ||
1891 | |||
1892 | static int curr_be = 0; | ||
1893 | |||
1894 | /* | ||
1895 | * A BE server has been added in a proc entry. | ||
1896 | */ | ||
1897 | static int admit_be_server(unsigned long long wcet, | ||
1898 | unsigned long long period, | ||
1899 | int cpu) | ||
1900 | { | ||
1901 | int rv = 0; | ||
1902 | server_t *be_server; | ||
1903 | |||
1904 | if (cpu != NO_CPU) { | ||
1905 | rv = -EINVAL; | ||
1906 | goto out; | ||
1907 | } | ||
1908 | |||
1909 | be_server = server_alloc(GFP_ATOMIC); | ||
1910 | server_init(be_server, &server_domain, | ||
1911 | BE_SERVER_BASE + ++curr_be, | ||
1912 | wcet, period, 1); | ||
1913 | be_server->type = S_BE; | ||
1914 | server_slack_create(be_server); | ||
1915 | |||
1916 | TRACE_SERVER_SUB(be_server, "admitted BE server"); | ||
1917 | |||
1918 | list_add(&be_server->list, &be_servers); | ||
1919 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
1920 | |||
1921 | out: | ||
1922 | return rv; | ||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Output all BE servers to a proc entry. | ||
1927 | */ | ||
1928 | static void list_be_servers(server_proc_t *proc) | ||
1929 | { | ||
1930 | struct list_head *pos; | ||
1931 | server_t *be_server; | ||
1932 | |||
1933 | list_for_each(pos, &be_servers) { | ||
1934 | be_server = list_entry(pos, server_t, list); | ||
1935 | list_server(be_server, NO_CPU, proc); | ||
1936 | } | ||
1937 | } | ||
1938 | |||
1939 | /* | ||
1940 | * Halts and destroys all BE servers. | ||
1941 | */ | ||
1942 | static void stop_be_servers(void) | ||
1943 | { | ||
1944 | server_t *be_server; | ||
1945 | struct list_head *pos, *safe; | ||
1946 | |||
1947 | list_for_each_safe(pos, safe, &be_servers) { | ||
1948 | be_server = list_entry(pos, server_t, list); | ||
1949 | |||
1950 | list_del_init(pos); | ||
1951 | if (bheap_node_in_heap(be_server->hn)) | ||
1952 | bheap_delete(server_order, &be_ready_servers, | ||
1953 | be_server->hn); | ||
1954 | server_slack_destroy(be_server); | ||
1955 | server_destroy(be_server); | ||
1956 | server_free(be_server); | ||
1957 | } | ||
1958 | } | ||
1959 | |||
1960 | /* | ||
1961 | * An HRT server has been added in a proc entry. | ||
1962 | */ | ||
1963 | static int admit_hrt_server(unsigned long long wcet, | ||
1964 | unsigned long long period, | ||
1965 | int cpu) | ||
1966 | { | ||
1967 | cpu_entry_t *entry = &per_cpu(noslack_cpu_entries, cpu); | ||
1968 | hrt_server_t *hrt_server = &entry->hrt_server; | ||
1969 | struct hrtimer *slack_timer = &hrt_server->slack_timer; | ||
1970 | |||
1971 | server_init(&hrt_server->server, &server_domain, | ||
1972 | cpu, wcet, period, 1); | ||
1973 | server_slack_create(&hrt_server->server); | ||
1974 | hrt_server->no_slack = 0; | ||
1975 | hrt_server->ready = 1; | ||
1976 | hrt_server->server.type = S_HRT; | ||
1977 | |||
1978 | edf_domain_init(&hrt_server->hrt_domain, NULL, | ||
1979 | release_hrt_jobs); | ||
1980 | |||
1981 | hrtimer_init(slack_timer, | ||
1982 | CLOCK_MONOTONIC, | ||
1983 | HRTIMER_MODE_ABS); | ||
1984 | slack_timer->function = slack_timer_fire; | ||
1985 | |||
1986 | return 0; | ||
1987 | } | ||
1988 | |||
1989 | /* | ||
1990 | * Print all HRT servers to a proc entry. | ||
1991 | */ | ||
1992 | static void list_hrt_servers(server_proc_t *proc) | ||
1993 | { | ||
1994 | cpu_entry_t *entry; | ||
1995 | hrt_server_t *hrt_server; | ||
1996 | int cpu; | ||
1997 | |||
1998 | for_each_online_cpu(cpu) { | ||
1999 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2000 | hrt_server = &entry->hrt_server; | ||
2001 | list_server(&hrt_server->server, cpu, proc); | ||
2002 | } | ||
2003 | } | ||
2004 | |||
2005 | /* | ||
2006 | * Stops all hrt server timers and resets all fields to 0. | ||
2007 | */ | ||
2008 | static void stop_hrt_servers(void) | ||
2009 | { | ||
2010 | int cpu; | ||
2011 | cpu_entry_t *entry; | ||
2012 | hrt_server_t *hrt_server; | ||
2013 | |||
2014 | for_each_online_cpu(cpu) { | ||
2015 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2016 | hrt_server = &entry->hrt_server; | ||
2017 | |||
2018 | if (hrt_server->server.data) | ||
2019 | server_slack_destroy(&hrt_server->server); | ||
2020 | slack_timer_cancel(hrt_server); | ||
2021 | |||
2022 | hrt_server->no_slack = 0; | ||
2023 | hrt_server->ready = 0; | ||
2024 | hrt_server->server.period = 0; | ||
2025 | hrt_server->server.wcet = 0; | ||
2026 | } | ||
2027 | } | ||
2028 | |||
2029 | /* | ||
2030 | * Starts timers used to manage servers. | ||
2031 | */ | ||
2032 | static void start_servers(lt_t time) | ||
2033 | { | ||
2034 | int cpu; | ||
2035 | cpu_entry_t *entry; | ||
2036 | server_t *server; | ||
2037 | server_t *be_server; | ||
2038 | struct list_head *pos; | ||
2039 | |||
2040 | TRACE_SUB("starting servers at %llu", time); | ||
2041 | |||
2042 | /* Start HRT servers */ | ||
2043 | for_each_online_cpu(cpu) { | ||
2044 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2045 | server = &entry->hrt_server.server; | ||
2046 | |||
2047 | if (!check_hrt_server_initialized(&entry->hrt_server)) | ||
2048 | goto loop_end; | ||
2049 | |||
2050 | /* Cause a catchup later */ | ||
2051 | server_release_at(server, time - server->period); | ||
2052 | entry->hrt_server.ready = 1; | ||
2053 | |||
2054 | TRACE("Setting up cpu %d to have timer deadline %llu\n", | ||
2055 | cpu, TIME(server->deadline)); | ||
2056 | loop_end: | ||
2057 | cpu = cpu; | ||
2058 | } | ||
2059 | |||
2060 | /* Start BE servers */ | ||
2061 | list_for_each(pos, &be_servers) { | ||
2062 | be_server = list_entry(pos, server_t, list); | ||
2063 | |||
2064 | if (!bheap_node_in_heap(be_server->hn)) | ||
2065 | bheap_insert(server_order, &be_ready_servers, be_server->hn); | ||
2066 | |||
2067 | /* Cause a catchup later */ | ||
2068 | server_release_at(be_server, time - be_server->period); | ||
2069 | |||
2070 | TRACE("Releasing BE server %d\n", be_server->id); | ||
2071 | TRACE_SERVER_SUB(be_server, "inserting be server"); | ||
2072 | } | ||
2073 | } | ||
2074 | |||
2075 | /****************************************************************************** | ||
2076 | * Plugin methods | ||
2077 | ******************************************************************************/ | ||
2078 | |||
2079 | static long edf_hsb_activate_plugin(void) | ||
2080 | { | ||
2081 | int cpu; | ||
2082 | cpu_entry_t *entry; | ||
2083 | #ifdef CONFIG_RELEASE_MASTER | ||
2084 | edf_hsb_release_master = atomic_read(&release_master_cpu); | ||
2085 | #else | ||
2086 | edf_hsb_release_master = NO_CPU; | ||
2087 | #endif | ||
2088 | server_domain.release_master = edf_hsb_release_master; | ||
2089 | |||
2090 | for_each_online_cpu(cpu) { | ||
2091 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2092 | #ifdef CONFIG_RELEASE_MASTER | ||
2093 | if (cpu != edf_hsb_release_master) | ||
2094 | #endif | ||
2095 | update_cpu_position(entry); | ||
2096 | } | ||
2097 | |||
2098 | start_servers(litmus_clock()); | ||
2099 | |||
2100 | TRACE("activating EDF-HSB plugin.\n"); | ||
2101 | return 0; | ||
2102 | } | ||
2103 | |||
2104 | /* | ||
2105 | * Requires a processor be specified for any task run on the system. | ||
2106 | */ | ||
2107 | static long edf_hsb_admit_task(struct task_struct *task) | ||
2108 | { | ||
2109 | cpu_entry_t *entry = task_sched_entry(task); | ||
2110 | |||
2111 | TRACE_TASK(task, "Admitting\n"); | ||
2112 | |||
2113 | if (is_hrt(task)) { | ||
2114 | return check_hrt_server_initialized(&entry->hrt_server) && | ||
2115 | ((task_cpu(task) == task->rt_param.task_params.cpu) && | ||
2116 | (task_cpu(task) == entry->cpu)) ? 0 : -EINVAL; | ||
2117 | } else { | ||
2118 | /* If the task is not HRT, we don't want to force the user | ||
2119 | * to specify a CPU. | ||
2120 | */ | ||
2121 | return 0; | ||
2122 | } | ||
2123 | } | ||
2124 | |||
2125 | /* | ||
2126 | * Stops all servers from running. | ||
2127 | */ | ||
2128 | static long edf_hsb_deactivate_plugin(void) | ||
2129 | { | ||
2130 | cpu_entry_t *cpu_entry; | ||
2131 | hrt_server_t *hrt_server; | ||
2132 | unsigned long flags; | ||
2133 | int cpu; | ||
2134 | |||
2135 | local_irq_save(flags); | ||
2136 | |||
2137 | for_each_online_cpu(cpu) { | ||
2138 | cpu_entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2139 | hrt_server = &cpu_entry->hrt_server; | ||
2140 | |||
2141 | slack_timer_cancel(hrt_server); | ||
2142 | |||
2143 | if (likely(bheap_node_in_heap(cpu_entry->hn))) | ||
2144 | bheap_delete(server_order, &cpu_heap, cpu_entry->hn); | ||
2145 | } | ||
2146 | |||
2147 | local_irq_restore(flags); | ||
2148 | |||
2149 | return 0; | ||
2150 | } | ||
2151 | |||
2152 | static void edf_hsb_task_block(struct task_struct *task) | ||
2153 | { | ||
2154 | unsigned long flags; | ||
2155 | cpu_entry_t *entry = task_sched_entry(task); | ||
2156 | struct task_struct *linked; | ||
2157 | server_t *linked_server; | ||
2158 | |||
2159 | TRACE_TASK(task, "block at %llu in state %llu\n", | ||
2160 | litmus_clock(), task->state); | ||
2161 | set_rt_flags(task, RT_F_BLOCK); | ||
2162 | |||
2163 | raw_spin_lock_irqsave(global_lock, flags); | ||
2164 | |||
2165 | linked = entry->linked; | ||
2166 | linked_server = entry->linked_server; | ||
2167 | |||
2168 | unlink(task); | ||
2169 | |||
2170 | /* TODO: necessary? */ | ||
2171 | if (task == linked) { | ||
2172 | check_donate_slack(linked_server, task); | ||
2173 | } | ||
2174 | |||
2175 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2176 | } | ||
2177 | |||
2178 | /* | ||
2179 | * A task leaves the system. | ||
2180 | */ | ||
2181 | static void edf_hsb_task_exit(struct task_struct *task) | ||
2182 | { | ||
2183 | unsigned long flags; | ||
2184 | cpu_entry_t *entry = task_sched_entry(task); | ||
2185 | |||
2186 | BUG_ON(!is_realtime(task)); | ||
2187 | TRACE_TASK(task, "RIP at %llu on P%d\n", | ||
2188 | TIME(litmus_clock()), tsk_rt(task)->scheduled_on); | ||
2189 | |||
2190 | raw_spin_lock_irqsave(global_lock, flags); | ||
2191 | |||
2192 | unlink(task); | ||
2193 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | ||
2194 | entry->scheduled = NULL; | ||
2195 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2196 | } | ||
2197 | if (is_srt(task)) { | ||
2198 | server_slack_destroy(task_srt_server(task)); | ||
2199 | server_destroy(task_srt_server(task)); | ||
2200 | server_free(task_srt_server(task)); | ||
2201 | task_data_free(tsk_rt(task)->plugin_data); | ||
2202 | } | ||
2203 | |||
2204 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * Attempts to determine the current scheduler state, then selects the | ||
2209 | * next task and updates the scheduler state. | ||
2210 | */ | ||
2211 | static struct task_struct* edf_hsb_schedule(struct task_struct *prev) | ||
2212 | { | ||
2213 | unsigned long flags; | ||
2214 | int blocks, preempted, sleep, was_slack, np, hrt_preempt, donated; | ||
2215 | struct task_struct *curr; | ||
2216 | cpu_entry_t *entry = local_cpu_entry; | ||
2217 | |||
2218 | #ifdef CONFIG_RELEASE_MASTER | ||
2219 | /* Bail out early if we are the release master. | ||
2220 | * The release master never schedules any real-time tasks. | ||
2221 | */ | ||
2222 | if (edf_hsb_release_master == entry->cpu) { | ||
2223 | sched_state_task_picked(); | ||
2224 | return NULL; | ||
2225 | } | ||
2226 | #endif | ||
2227 | |||
2228 | raw_spin_lock_irqsave(global_lock, flags); | ||
2229 | |||
2230 | curr = entry->scheduled; | ||
2231 | |||
2232 | if (entry->scheduled && !is_realtime(prev)) { | ||
2233 | TRACE_TASK_SUB(entry->scheduled, "Stack deadlock!"); | ||
2234 | } | ||
2235 | |||
2236 | TRACE("server_budget: %llu, server_deadline: %llu, " | ||
2237 | "curr_time: %llu, no_slack: %d, ready: %d\n", | ||
2238 | TIME(entry->hrt_server.server.budget), | ||
2239 | TIME(entry->hrt_server.server.deadline), | ||
2240 | TIME(litmus_clock()), entry->hrt_server.no_slack, | ||
2241 | entry->hrt_server.ready); | ||
2242 | |||
2243 | /* Determine state */ | ||
2244 | blocks = curr && !is_running(curr); | ||
2245 | preempted = entry->scheduled != entry->linked; | ||
2246 | sleep = curr && get_rt_flags(curr) == RT_F_SLEEP; | ||
2247 | was_slack = !list_empty(&slack_queue); | ||
2248 | np = curr && is_np(curr); | ||
2249 | |||
2250 | TRACE("blocks: %d, preempted: %d, sleep: %d, np: %d\n", | ||
2251 | blocks, preempted, sleep, np); | ||
2252 | if (blocks) | ||
2253 | unlink(entry->scheduled); | ||
2254 | |||
2255 | /* If the task has gone to sleep or exhausted its budget, it | ||
2256 | * must complete its current job. | ||
2257 | */ | ||
2258 | if (sleep && !blocks && !preempted) | ||
2259 | job_completion(entry, entry->scheduled); | ||
2260 | |||
2261 | /* Pick the next task if there isn't one currently */ | ||
2262 | if (!entry->linked) | ||
2263 | edf_hsb_pick_next(entry); | ||
2264 | |||
2265 | /* Set task states */ | ||
2266 | if (entry->linked != entry->scheduled) { | ||
2267 | if (entry->linked) | ||
2268 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
2269 | if (entry->scheduled) | ||
2270 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
2271 | } | ||
2272 | |||
2273 | entry->scheduled = entry->linked; | ||
2274 | entry->scheduled_server = entry->linked_server; | ||
2275 | sched_state_task_picked(); | ||
2276 | |||
2277 | /* An non-HRT was preempted by an HRT task. Because of the way linking | ||
2278 | * works, it cannot link itself to anything else until the non-migratory | ||
2279 | * HRT task is scheduled. | ||
2280 | */ | ||
2281 | hrt_preempt = preempted && entry->linked && curr && | ||
2282 | is_hrt(entry->linked) && !is_hrt(curr); | ||
2283 | /* A server just donated slack */ | ||
2284 | donated = entry->linked && entry->linked_server->type != S_SLACK && | ||
2285 | head_in_list(&server_slack(entry->linked_server)->list); | ||
2286 | |||
2287 | if (hrt_preempt || donated) | ||
2288 | check_for_global_preempt(); | ||
2289 | |||
2290 | if (entry->scheduled) | ||
2291 | TRACE_TASK(entry->scheduled, "scheduled at %llu\n", | ||
2292 | TIME(litmus_clock())); | ||
2293 | else | ||
2294 | TRACE("NULL scheduled at %llu\n", TIME(litmus_clock())); | ||
2295 | |||
2296 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2297 | |||
2298 | if (!entry->scheduled && !next_eligible_slack_server()) { | ||
2299 | TRACE_SUB("A slack server has dissapeared!"); | ||
2300 | } | ||
2301 | |||
2302 | return entry->scheduled; | ||
2303 | } | ||
2304 | |||
2305 | /* | ||
2306 | * Prepare a task for running in RT mode | ||
2307 | */ | ||
2308 | static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) | ||
2309 | { | ||
2310 | unsigned long flags; | ||
2311 | task_data_t *data; | ||
2312 | server_t *srt_server = NULL; | ||
2313 | cpu_entry_t *entry = task_sched_entry(task); | ||
2314 | |||
2315 | TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); | ||
2316 | |||
2317 | raw_spin_lock_irqsave(global_lock, flags); | ||
2318 | |||
2319 | /* Setup job parameters */ | ||
2320 | release_at(task, litmus_clock()); | ||
2321 | |||
2322 | /* Create SRT server */ | ||
2323 | if (is_srt(task)) { | ||
2324 | /* Create SRT server */ | ||
2325 | srt_server = server_alloc(GFP_ATOMIC); | ||
2326 | server_init(srt_server, &server_domain, | ||
2327 | task->pid, get_exec_cost(task), | ||
2328 | get_rt_period(task), 0); | ||
2329 | srt_server->type = S_SRT; | ||
2330 | |||
2331 | server_slack_create(srt_server); | ||
2332 | |||
2333 | } | ||
2334 | |||
2335 | /* Create task plugin data */ | ||
2336 | data = task_data_alloc(GFP_ATOMIC); | ||
2337 | data->owner = task; | ||
2338 | data->srt_server = srt_server; | ||
2339 | INIT_LIST_HEAD(&data->candidate_list); | ||
2340 | tsk_rt(task)->plugin_data = data; | ||
2341 | |||
2342 | /* Already running, update the cpu entry. | ||
2343 | * This tends to happen when the first tasks enter the system. | ||
2344 | */ | ||
2345 | if (running) { | ||
2346 | //BUG_ON(entry->scheduled); | ||
2347 | |||
2348 | #ifdef CONFIG_RELEASE_MASTER | ||
2349 | if (entry->cpu != edf_hsb_release_master) { | ||
2350 | #endif | ||
2351 | entry->scheduled = task; | ||
2352 | tsk_rt(task)->scheduled_on = task_cpu(task); | ||
2353 | #ifdef CONFIG_RELEASE_MASTER | ||
2354 | } else { | ||
2355 | /* do not schedule on release master */ | ||
2356 | /* Cannot preempt! Causing a preemption with a BE task | ||
2357 | * somehow leads to that task never blocking during | ||
2358 | * a synchronous release. This is a bug! | ||
2359 | */ | ||
2360 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
2361 | tsk_rt(task)->scheduled_on = NO_CPU; | ||
2362 | } | ||
2363 | #endif | ||
2364 | } else { | ||
2365 | task->rt_param.scheduled_on = NO_CPU; | ||
2366 | } | ||
2367 | |||
2368 | task->rt_param.linked_on = NO_CPU; | ||
2369 | job_arrival(task, entry); | ||
2370 | |||
2371 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2372 | } | ||
2373 | |||
2374 | static void edf_hsb_task_wake_up(struct task_struct *task) | ||
2375 | { | ||
2376 | lt_t now; | ||
2377 | unsigned long flags; | ||
2378 | cpu_entry_t *entry = task_sched_entry(task); | ||
2379 | |||
2380 | |||
2381 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), | ||
2382 | task_cpu(task), task->rt_param.task_params.cpu); | ||
2383 | |||
2384 | raw_spin_lock_irqsave(global_lock, flags); | ||
2385 | |||
2386 | if (!is_be(task)) { | ||
2387 | if (is_srt(task)) { | ||
2388 | catchup_srt_server(task); | ||
2389 | } | ||
2390 | |||
2391 | /* Non-BE tasks are not sporadic in this model */ | ||
2392 | set_rt_flags(task, RT_F_RUNNING); | ||
2393 | /* The job blocked while it was being run by a slack server */ | ||
2394 | if (is_queued(task)) { | ||
2395 | check_slack_candidate(task); | ||
2396 | goto out; | ||
2397 | } | ||
2398 | } else { | ||
2399 | /* Re-release all BE tasks on wake-up */ | ||
2400 | now = litmus_clock(); | ||
2401 | |||
2402 | if (is_tardy(task, now)) { | ||
2403 | release_at(task, now); | ||
2404 | sched_trace_task_release(task); | ||
2405 | } | ||
2406 | } | ||
2407 | |||
2408 | job_arrival(task, entry); | ||
2409 | |||
2410 | out: | ||
2411 | raw_spin_unlock_irqrestore(global_lock, flags); | ||
2412 | } | ||
2413 | |||
2414 | /* | ||
2415 | * Unused. | ||
2416 | */ | ||
2417 | static void edf_hsb_tick(struct task_struct *t) | ||
2418 | { | ||
2419 | } | ||
2420 | |||
2421 | |||
2422 | /****************************************************************************** | ||
2423 | * Plugin | ||
2424 | ******************************************************************************/ | ||
2425 | |||
2426 | static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = { | ||
2427 | .plugin_name = "EDF-HSB-NOSLACK", | ||
2428 | |||
2429 | .activate_plugin = edf_hsb_activate_plugin, | ||
2430 | .deactivate_plugin = edf_hsb_deactivate_plugin, | ||
2431 | |||
2432 | .schedule = edf_hsb_schedule, | ||
2433 | .admit_task = edf_hsb_admit_task, | ||
2434 | .task_block = edf_hsb_task_block, | ||
2435 | .task_exit = edf_hsb_task_exit, | ||
2436 | .task_new = edf_hsb_task_new, | ||
2437 | .task_wake_up = edf_hsb_task_wake_up, | ||
2438 | .tick = edf_hsb_tick, | ||
2439 | |||
2440 | /* From jobs.h */ | ||
2441 | .complete_job = complete_job, | ||
2442 | .release_at = release_at, | ||
2443 | }; | ||
2444 | |||
2445 | static int __init init_edf_hsb(void) | ||
2446 | { | ||
2447 | cpu_entry_t *entry; | ||
2448 | hrt_server_t *hrt_server; | ||
2449 | server_t *idle_slack; | ||
2450 | int rv, cpu; | ||
2451 | |||
2452 | rv = register_sched_plugin(&edf_hsb_plugin); | ||
2453 | if (rv) { | ||
2454 | printk(KERN_ERR "Could not register plugin %s.\n", | ||
2455 | edf_hsb_plugin.plugin_name); | ||
2456 | goto out; | ||
2457 | } | ||
2458 | |||
2459 | rv = make_plugin_proc_dir(&edf_hsb_plugin, &edf_hsb_proc_dir); | ||
2460 | if (rv) { | ||
2461 | printk(KERN_ERR "Could not create %s procfs dir.\n", | ||
2462 | edf_hsb_plugin.plugin_name); | ||
2463 | goto out; | ||
2464 | } | ||
2465 | |||
2466 | |||
2467 | task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC); | ||
2468 | |||
2469 | /* Global domains */ | ||
2470 | edf_domain_init(&srt_domain, NULL, release_srt_jobs); | ||
2471 | rt_domain_init(&be_domain, be_ready_order, | ||
2472 | NULL, release_be_jobs); | ||
2473 | server_domain_init(&server_domain, servers_released, | ||
2474 | server_completed, NO_CPU, global_lock); | ||
2475 | |||
2476 | /* Server proc interfaces */ | ||
2477 | server_proc_init(&server_domain, | ||
2478 | edf_hsb_proc_dir, BE_PROC_NAME, | ||
2479 | admit_be_server, list_be_servers, | ||
2480 | stop_be_servers); | ||
2481 | server_proc_init(&server_domain, | ||
2482 | edf_hsb_proc_dir, HRT_PROC_NAME, | ||
2483 | admit_hrt_server, list_hrt_servers, | ||
2484 | stop_hrt_servers); | ||
2485 | |||
2486 | |||
2487 | /* Global collections */ | ||
2488 | bheap_init(&cpu_heap); | ||
2489 | bheap_init(&be_ready_servers); | ||
2490 | INIT_LIST_HEAD(&be_servers); | ||
2491 | INIT_LIST_HEAD(&slack_queue); | ||
2492 | INIT_LIST_HEAD(&slack_candidates); | ||
2493 | |||
2494 | for_each_online_cpu(cpu) { | ||
2495 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2496 | hrt_server = &entry->hrt_server; | ||
2497 | |||
2498 | idle_slack = server_alloc(GFP_ATOMIC); | ||
2499 | server_init(idle_slack, &server_domain, | ||
2500 | IDLE_SLACK_BASE + cpu, | ||
2501 | LLONG_MAX, LLONG_MAX, 1); | ||
2502 | idle_slack->deadline = LLONG_MAX; | ||
2503 | idle_slack->budget = LLONG_MAX; | ||
2504 | idle_slack->job_no = 1; | ||
2505 | idle_slack->release = 1; | ||
2506 | idle_slack->type = S_SLACK; | ||
2507 | add_slack(idle_slack); | ||
2508 | |||
2509 | entry->cpu = cpu; | ||
2510 | entry->linked = NULL; | ||
2511 | entry->scheduled = NULL; | ||
2512 | entry->linked_server = NULL; | ||
2513 | |||
2514 | /* HRT server */ | ||
2515 | hrt_server->server.id = cpu; | ||
2516 | hrt_server->server.deadline = 0; | ||
2517 | hrt_server->server.period = 0; | ||
2518 | hrt_server->server.wcet = 0; | ||
2519 | hrt_server->ready = 0; | ||
2520 | |||
2521 | hrtimer_start_on_info_init(&hrt_server->slack_timer_info); | ||
2522 | |||
2523 | /* CPU entry bheap nodes */ | ||
2524 | entry->hn = &cpu_heap_node[cpu]; | ||
2525 | bheap_node_init(&entry->hn, entry); | ||
2526 | } | ||
2527 | |||
2528 | out: | ||
2529 | return rv; | ||
2530 | } | ||
2531 | |||
2532 | static void exit_edf_hsb(void) | ||
2533 | { | ||
2534 | int cpu; | ||
2535 | cpu_entry_t *entry; | ||
2536 | |||
2537 | stop_be_servers(); | ||
2538 | stop_hrt_servers(); | ||
2539 | |||
2540 | server_domain_destroy(&server_domain); | ||
2541 | |||
2542 | for_each_online_cpu(cpu) { | ||
2543 | entry = &per_cpu(noslack_cpu_entries, cpu); | ||
2544 | server_slack_destroy(&entry->hrt_server.server); | ||
2545 | server_destroy(&entry->hrt_server.server); | ||
2546 | } | ||
2547 | |||
2548 | if (edf_hsb_proc_dir) { | ||
2549 | remove_plugin_proc_dir(&edf_hsb_plugin); | ||
2550 | /* TODO: is this wrong? */ | ||
2551 | edf_hsb_proc_dir = NULL; | ||
2552 | } | ||
2553 | } | ||
2554 | |||
2555 | module_init(init_edf_hsb); | ||
2556 | module_exit(exit_edf_hsb); | ||
diff --git a/litmus/servers.c b/litmus/servers.c new file mode 100644 index 000000000000..5aebbe5ec1c1 --- /dev/null +++ b/litmus/servers.c | |||
@@ -0,0 +1,857 @@ | |||
1 | /* | ||
2 | * TODO: change from destroy to exit, rename server proc stuff | ||
3 | */ | ||
4 | #include <linux/hrtimer.h> | ||
5 | #include <linux/percpu.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/uaccess.h> | ||
8 | #include <linux/ctype.h> | ||
9 | |||
10 | #include <litmus/bheap.h> | ||
11 | #include <litmus/litmus.h> | ||
12 | #include <litmus/litmus_proc.h> | ||
13 | #include <litmus/sched_trace.h> | ||
14 | #include <litmus/servers.h> | ||
15 | |||
16 | #define DEBUG_SERVERS | ||
17 | |||
18 | /* Not working */ | ||
19 | /* #define COMPLETION_ON_MASTER */ | ||
20 | |||
21 | #define TIME(x) \ | ||
22 | ({lt_t y = x; \ | ||
23 | do_div(y, NSEC_PER_MSEC); \ | ||
24 | y;}) | ||
25 | #ifdef DEBUG_SERVERS | ||
26 | #define _TRACE_SUB(fmt, args...) \ | ||
27 | sched_trace_log_message("%d P%d -[%s@%s:%d]: " fmt "\n", \ | ||
28 | TRACE_ARGS, ## args) | ||
29 | #define TRACE_SUB(s, fmt, args...) \ | ||
30 | do {\ | ||
31 | if (is_server_linked(s)) \ | ||
32 | _TRACE_SUB(TASK_FMT " " SERVER_FMT " " fmt, \ | ||
33 | TASK_ARGS(server_task(s)), \ | ||
34 | SERVER_ARGS(s), ##args); \ | ||
35 | else \ | ||
36 | _TRACE_SUB("(NULL) " SERVER_FMT " " fmt, \ | ||
37 | SERVER_ARGS(s), ##args); \ | ||
38 | } while(0) | ||
39 | |||
40 | #define _TRACE_TIMER(fmt, args...) \ | ||
41 | sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \ | ||
42 | TRACE_ARGS, ## args, TIME(litmus_clock())) | ||
43 | #define TRACE_TIMER(s, fmt, args...) \ | ||
44 | do { \ | ||
45 | if (is_server_linked(s)) \ | ||
46 | _TRACE_TIMER(TASK_FMT " " SERVER_FMT " " fmt, \ | ||
47 | TASK_ARGS(server_task(s)), \ | ||
48 | SERVER_ARGS(s), ##args); \ | ||
49 | else \ | ||
50 | _TRACE_TIMER("(NULL) " SERVER_FMT " " fmt, \ | ||
51 | SERVER_ARGS(s), ##args); \ | ||
52 | } while(0) | ||
53 | #else | ||
54 | #define _TRACE_SUB(fmt, args...) | ||
55 | #define TRACE_SUB(s, fmt, args...) | ||
56 | #define TRACE_TIMER(s, fmt, args...) | ||
57 | #define _TRACE_TIMER(fmt, args...) | ||
58 | #endif | ||
59 | |||
60 | /* Used to run a server on a remote CPU */ | ||
61 | DEFINE_PER_CPU(struct hrtimer_start_on_info, server_cpu_infos); | ||
62 | |||
63 | /* Memory slabs for servers */ | ||
64 | struct kmem_cache *server_release_cache; | ||
65 | struct kmem_cache *server_cache; | ||
66 | |||
67 | /* | ||
68 | * Okay to call if the timer is not armed. | ||
69 | */ | ||
70 | static inline int timer_cancel(struct hrtimer *timer) | ||
71 | { | ||
72 | if (hrtimer_active(timer)) | ||
73 | return hrtimer_try_to_cancel(timer); | ||
74 | else | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static int completion_timer_arm(server_domain_t* domain, int cpu) | ||
79 | { | ||
80 | int err = 0, on_cpu; | ||
81 | lt_t now = domain->start_times[cpu]; | ||
82 | server_t *server = domain->linked_servers[cpu]; | ||
83 | lt_t budget_exhausted = now + server->budget; | ||
84 | completion_timer_t *timer = &domain->completion_timers[cpu]; | ||
85 | |||
86 | /* This happens when someone attempts to call server_run when | ||
87 | * the server completes. When this happens, we can ignore the request | ||
88 | * here because completion_timer_fire will re-arm the timer if | ||
89 | * the server is still running / was run again. | ||
90 | */ | ||
91 | if (hrtimer_active(&timer->timer)) { | ||
92 | TRACE_SUB(server, "cannot arm completion, already active"); | ||
93 | return 0; | ||
94 | } | ||
95 | if (timer->armed) { | ||
96 | TRACE_SUB(server, "cannot arm completion, waiting for arm"); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | if (lt_after(budget_exhausted, server->deadline)) | ||
101 | budget_exhausted = server->deadline; | ||
102 | |||
103 | TRACE_SUB(server, "start time: %llu", domain->start_times[cpu]); | ||
104 | |||
105 | #ifdef COMPLETION_ON_MASTER | ||
106 | if (domain->release_master != NO_CPU) | ||
107 | on_cpu = domain->release_master; | ||
108 | else | ||
109 | #endif | ||
110 | on_cpu = cpu; | ||
111 | |||
112 | err = 1; | ||
113 | if (cpu != smp_processor_id()) { | ||
114 | err = hrtimer_start_on(on_cpu, &timer->info, &timer->timer, | ||
115 | ns_to_ktime(budget_exhausted), | ||
116 | HRTIMER_MODE_ABS_PINNED); | ||
117 | if (err) { | ||
118 | TRACE_SUB(server, "failed to arm completion"); | ||
119 | } else { | ||
120 | TRACE_SUB(server, "success on P%d!", on_cpu); | ||
121 | } | ||
122 | } else if (atomic_read(&timer->info.state)== HRTIMER_START_ON_INACTIVE){ | ||
123 | err = __hrtimer_start_range_ns(&timer->timer, | ||
124 | ns_to_ktime(budget_exhausted), | ||
125 | 0 /* delta */, | ||
126 | HRTIMER_MODE_ABS_PINNED, | ||
127 | 0 /* no wakeup */); | ||
128 | } | ||
129 | |||
130 | timer->armed = (err) ? 0 : 1; | ||
131 | |||
132 | TRACE_SUB(server, "completion 0x%x and %p armed to fire at %llu, err: %d", | ||
133 | &timer->timer, | ||
134 | &timer->timer, | ||
135 | TIME(budget_exhausted), err); | ||
136 | |||
137 | return !err; | ||
138 | } | ||
139 | |||
140 | static enum hrtimer_restart completion_timer_fire(struct hrtimer *timer) | ||
141 | { | ||
142 | int cpu; | ||
143 | unsigned long flags; | ||
144 | enum hrtimer_restart rv; | ||
145 | struct task_struct *was_running; | ||
146 | completion_timer_t *completion_timer; | ||
147 | server_domain_t *domain; | ||
148 | server_t *server; | ||
149 | lt_t budget_exhausted; | ||
150 | |||
151 | rv = HRTIMER_NORESTART; | ||
152 | |||
153 | completion_timer = container_of(timer, completion_timer_t, timer); | ||
154 | domain = completion_timer->domain; | ||
155 | cpu = completion_timer->cpu; | ||
156 | |||
157 | raw_spin_lock_irqsave(domain->completion_lock, flags); | ||
158 | |||
159 | _TRACE_TIMER("completion timer firing on P%d", cpu); | ||
160 | |||
161 | /* We got the lock before someone tried to re-arm. Proceed. */ | ||
162 | if (completion_timer->armed) { | ||
163 | server = domain->linked_servers[cpu]; | ||
164 | TRACE_SUB(server, "completed"); | ||
165 | |||
166 | was_running = server_task(server); | ||
167 | |||
168 | server->budget = 0; | ||
169 | server->cpu = NO_CPU; | ||
170 | domain->start_times[cpu] = 0; | ||
171 | domain->linked_servers[cpu] = NULL; | ||
172 | domain->linked_tasks[cpu] = NULL; | ||
173 | |||
174 | domain->server_completed(server, was_running); | ||
175 | } | ||
176 | |||
177 | /* Someone either beat us to the lock or hooked up a new server | ||
178 | * when we called server_completed. Rearm the timer. | ||
179 | */ | ||
180 | if (domain->linked_servers[cpu] && !completion_timer->armed) { | ||
181 | server = domain->linked_servers[cpu]; | ||
182 | budget_exhausted = domain->start_times[cpu] + server->budget; | ||
183 | if (lt_after(budget_exhausted, server->deadline)) | ||
184 | budget_exhausted = server->deadline; | ||
185 | hrtimer_set_expires(timer, ns_to_ktime(budget_exhausted)); | ||
186 | completion_timer->armed = 1; | ||
187 | |||
188 | TRACE_SUB(server, "rearming on P%d at %llu", | ||
189 | cpu, TIME(budget_exhausted)); | ||
190 | |||
191 | rv = HRTIMER_RESTART; | ||
192 | } else { | ||
193 | completion_timer->armed = 0; | ||
194 | } | ||
195 | |||
196 | raw_spin_unlock_irqrestore(domain->completion_lock, flags); | ||
197 | |||
198 | return rv; | ||
199 | } | ||
200 | |||
201 | struct kmem_cache *server_release_cache; /* In litmus.c */ | ||
202 | static enum hrtimer_restart release_servers_fire(struct hrtimer *timer); | ||
203 | |||
204 | /* | ||
205 | * Initialize heap. | ||
206 | */ | ||
207 | static server_release_heap_t* release_heap_alloc(int gfp_flags) | ||
208 | { | ||
209 | server_release_heap_t *rh; | ||
210 | rh = kmem_cache_alloc(server_release_cache, gfp_flags); | ||
211 | if (rh) { | ||
212 | hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
213 | rh->timer.function = release_servers_fire; | ||
214 | } | ||
215 | return rh; | ||
216 | } | ||
217 | |||
218 | static void release_heap_free(server_release_heap_t* rh) | ||
219 | { | ||
220 | kmem_cache_free(server_release_cache, rh); | ||
221 | } | ||
222 | |||
223 | void server_init(server_t *server, server_domain_t *domain, | ||
224 | int id, lt_t wcet, lt_t period, int grouped) | ||
225 | { | ||
226 | server->id = id; | ||
227 | server->wcet = wcet; | ||
228 | server->period = period; | ||
229 | |||
230 | server->deadline = 0; | ||
231 | server->release = 0; | ||
232 | server->budget = 0; | ||
233 | server->job_no = 0; | ||
234 | server->cpu = NO_CPU; | ||
235 | |||
236 | server->domain = domain; | ||
237 | |||
238 | server->data = NULL; | ||
239 | |||
240 | server->hn = bheap_node_alloc(GFP_ATOMIC); | ||
241 | bheap_node_init(&server->hn, server); | ||
242 | INIT_LIST_HEAD(&server->list); | ||
243 | |||
244 | server->release_heap = NULL; | ||
245 | if (grouped) { | ||
246 | server->release_heap = release_heap_alloc(GFP_ATOMIC); | ||
247 | INIT_LIST_HEAD(&server->release_list); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | void server_destroy(server_t *server) | ||
252 | { | ||
253 | bheap_node_free(server->hn); | ||
254 | if (server->release_heap) { | ||
255 | release_heap_free(server->release_heap); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | server_t* server_alloc(int gfp_flags) | ||
260 | { | ||
261 | return kmem_cache_alloc(server_cache, gfp_flags); | ||
262 | } | ||
263 | |||
264 | void server_free(server_t *server) | ||
265 | { | ||
266 | kmem_cache_free(server_cache, server); | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Handles subtraction of lt_t without underflows. | ||
271 | */ | ||
272 | static inline lt_t lt_subtract(lt_t a, lt_t b) | ||
273 | { | ||
274 | long long sub = (long long)a - (long long)b; | ||
275 | if (sub >= 0) | ||
276 | return sub; | ||
277 | else | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | void server_run(server_t *server, struct task_struct *task) | ||
282 | { | ||
283 | int armed, cpu = task->rt_param.linked_on; | ||
284 | server_domain_t *domain = server->domain; | ||
285 | |||
286 | TRACE_SUB(server, "running on cpu P%d", task->rt_param.linked_on); | ||
287 | |||
288 | BUG_ON(is_server_linked(server)); | ||
289 | BUG_ON(server-> cpu != NO_CPU); | ||
290 | BUG_ON(cpu == NO_CPU); | ||
291 | BUG_ON(domain->linked_servers[cpu]); | ||
292 | BUG_ON(domain->linked_tasks[cpu]); | ||
293 | |||
294 | server->cpu = cpu; | ||
295 | |||
296 | domain->linked_servers[cpu] = server; | ||
297 | domain->linked_tasks[cpu] = task; | ||
298 | domain->start_times[cpu] = litmus_clock(); | ||
299 | |||
300 | /* Arm completion timer */ | ||
301 | armed = completion_timer_arm(domain, cpu); | ||
302 | domain->completion_timers[cpu].armed = armed; | ||
303 | } | ||
304 | |||
305 | void server_stop(server_t *server) | ||
306 | { | ||
307 | int cpu; | ||
308 | lt_t elapsed_time, now = litmus_clock(); | ||
309 | server_domain_t *domain = server->domain; | ||
310 | |||
311 | if (!is_server_linked(server)) { | ||
312 | TRACE_SUB(server, "already stopped"); | ||
313 | return; | ||
314 | } | ||
315 | |||
316 | cpu = server->cpu; | ||
317 | BUG_ON(cpu == NO_CPU); | ||
318 | |||
319 | TRACE_SUB(server, "stopping server, start: %llu, end: %llu", | ||
320 | domain->start_times[cpu], now); | ||
321 | |||
322 | /* Calculate remaining budget */ | ||
323 | elapsed_time = lt_subtract(now, domain->start_times[cpu]); | ||
324 | server->budget = lt_subtract(server->budget, elapsed_time); | ||
325 | |||
326 | server->cpu = NO_CPU; | ||
327 | |||
328 | TRACE_SUB(server, "new budget: %llu", TIME(server->budget)); | ||
329 | BUG_ON(domain->linked_servers[cpu] != server); | ||
330 | |||
331 | /* Set domain state */ | ||
332 | domain->completion_timers[cpu].armed = 0; | ||
333 | domain->linked_servers[cpu] = NULL; | ||
334 | domain->linked_tasks[cpu] = NULL; | ||
335 | timer_cancel(&domain->completion_timers[cpu].timer); | ||
336 | } | ||
337 | |||
338 | void server_release(server_t *server) | ||
339 | { | ||
340 | BUG_ON(!server->deadline); | ||
341 | |||
342 | server->budget = server->wcet; | ||
343 | server->release = server->deadline; | ||
344 | server->deadline += server->period; | ||
345 | ++server->job_no; | ||
346 | |||
347 | TRACE_SUB(server, "budget: %llu, release: %llu," | ||
348 | "deadline: %llu, period: %llu, job: %d", | ||
349 | TIME(server->budget), TIME(server->release), TIME(server->deadline), | ||
350 | TIME(server->period), server->job_no); | ||
351 | |||
352 | /* Need to reset for budget calculations */ | ||
353 | if (is_server_linked(server)) | ||
354 | server->domain->start_times[server->cpu] = litmus_clock(); | ||
355 | } | ||
356 | |||
357 | void server_release_at(server_t *server, lt_t time) | ||
358 | { | ||
359 | server->deadline = time; | ||
360 | server_release(server); | ||
361 | |||
362 | TRACE_SUB(server, "releasing at %llu", time); | ||
363 | } | ||
364 | |||
365 | /****************************************************************************** | ||
366 | * Proc methods | ||
367 | ******************************************************************************/ | ||
368 | |||
369 | static int server_proc_read(char* page, char **start, off_t off, | ||
370 | int count, int *eof, void *data) | ||
371 | { | ||
372 | int length; | ||
373 | server_proc_t *proc = (server_proc_t*)data; | ||
374 | |||
375 | proc->page = page; | ||
376 | proc->length = 0; | ||
377 | proc->list_servers(proc); | ||
378 | |||
379 | length = proc->length; | ||
380 | *eof = 1; | ||
381 | |||
382 | proc->length = 0; | ||
383 | proc->page = NULL; | ||
384 | |||
385 | return length; | ||
386 | } | ||
387 | |||
388 | void list_server(server_t *server, int cpu, server_proc_t *proc) | ||
389 | { | ||
390 | if (cpu == NO_CPU) { | ||
391 | proc->length += | ||
392 | snprintf(proc->page + proc->length, | ||
393 | PAGE_SIZE - proc->length, | ||
394 | "%8llu %8llu\n", | ||
395 | server->wcet, server->period); | ||
396 | } else { | ||
397 | proc->length += | ||
398 | snprintf(proc->page + proc->length, | ||
399 | PAGE_SIZE - proc->length, | ||
400 | "%8llu %8llu %3d\n", | ||
401 | server->wcet, server->period, cpu); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * Validate server parameters. | ||
407 | */ | ||
408 | static inline int server_param_check(unsigned long long wcet, | ||
409 | unsigned long long period, | ||
410 | int cpu) | ||
411 | { | ||
412 | int rv = 0; | ||
413 | |||
414 | if (wcet <= 0) { | ||
415 | printk(KERN_WARNING "Invalid WCET '%llu'\n", wcet); | ||
416 | rv = -EINVAL; | ||
417 | goto out; | ||
418 | } | ||
419 | |||
420 | if (period < wcet) { | ||
421 | printk(KERN_WARNING "Invalid period '%llu'\n", period); | ||
422 | rv = -EINVAL; | ||
423 | goto out; | ||
424 | } | ||
425 | |||
426 | if (cpu != NO_CPU && (cpu < 0 || cpu >= nr_cpu_ids)) { | ||
427 | printk(KERN_WARNING "Invalid CPU '%d'\n", cpu); | ||
428 | rv = -EINVAL; | ||
429 | goto out; | ||
430 | } | ||
431 | out: | ||
432 | return rv; | ||
433 | } | ||
434 | |||
435 | /* Macro to see if we are in the buffer's range and not at the null byte */ | ||
436 | #define buf_in_range(buf, pos, max) (buf <= pos && pos < (buf + max) && *pos) | ||
437 | |||
438 | #define find_newline(buf, pos, max) \ | ||
439 | do { \ | ||
440 | while (buf_in_range(buf, pos, max) && \ | ||
441 | *pos != '\n') \ | ||
442 | ++pos; \ | ||
443 | } while (0) | ||
444 | |||
445 | static int server_proc_write(struct file *file, const char __user *input, | ||
446 | unsigned long count, void *data) | ||
447 | { | ||
448 | server_proc_t *proc = (server_proc_t*)data; | ||
449 | #define SERVER_PROC_BUF 512 | ||
450 | char buffer[SERVER_PROC_BUF]; | ||
451 | unsigned long long wcet, period; | ||
452 | char *pos, *newline, *space_check; | ||
453 | int nums_converted, chars_seen, ret, cpu; | ||
454 | |||
455 | /* Allow plugin to stop any running servers */ | ||
456 | proc->stop_servers(); | ||
457 | |||
458 | if (count >= SERVER_PROC_BUF){ | ||
459 | printk(KERN_WARNING "proc buffer possibly too small in %s.\n", | ||
460 | __func__); | ||
461 | return -ENOSPC; | ||
462 | } | ||
463 | |||
464 | memset(buffer, 0, SERVER_PROC_BUF); | ||
465 | |||
466 | /* Input is definitely < SERVER_PROC_BUF (see above check) */ | ||
467 | if (copy_from_user(buffer, input, count)) | ||
468 | return -EFAULT; | ||
469 | |||
470 | buffer[SERVER_PROC_BUF-1] = '\0'; | ||
471 | pos = buffer; | ||
472 | |||
473 | while (buf_in_range(buffer, pos, SERVER_PROC_BUF)) { | ||
474 | newline = pos; | ||
475 | find_newline(buffer, newline, SERVER_PROC_BUF); | ||
476 | if (buf_in_range(buffer, newline, SERVER_PROC_BUF)) { | ||
477 | /* If there was a newline character */ | ||
478 | *newline = '\0'; | ||
479 | } | ||
480 | nums_converted = sscanf(pos, "%llu %llu %d%n", &wcet, | ||
481 | &period, &cpu, &chars_seen); | ||
482 | if (nums_converted == 2) | ||
483 | cpu = NO_CPU; | ||
484 | if (nums_converted != 2 && nums_converted != 3) { | ||
485 | printk(KERN_WARNING "Didn't see 2-3 integers for " | ||
486 | "server config: %s\n", pos); | ||
487 | goto loop_end; | ||
488 | } | ||
489 | /* space_check = pos + chars_seen; */ | ||
490 | /* if (space_check != newline) { */ | ||
491 | /* /\* If the newline was not right after the numbers */ | ||
492 | /* * converted, ensure extra characters are just space */ | ||
493 | /* *\/ */ | ||
494 | /* for (; *space_check; space_check++) { */ | ||
495 | /* if (!isspace(*space_check)) { */ | ||
496 | /* printk(KERN_WARNING "Extra characters " */ | ||
497 | /* "in line: %s\n", pos); */ | ||
498 | /* goto loop_end; */ | ||
499 | /* } */ | ||
500 | /* } */ | ||
501 | /* } */ | ||
502 | |||
503 | ret = server_param_check(wcet, period, cpu); | ||
504 | if (ret) goto loop_end; | ||
505 | |||
506 | ret = proc->admit_server(wcet, period, cpu); | ||
507 | if (ret) { | ||
508 | printk(KERN_WARNING "Litmus plugin rejects server with " | ||
509 | "period: %llu, wcet: %llu, cpu: %d\n", | ||
510 | period, wcet, cpu); | ||
511 | goto loop_end; /* Currently does nothing */ | ||
512 | } | ||
513 | loop_end: | ||
514 | pos = newline + 1; /* Consider next line */ | ||
515 | } | ||
516 | |||
517 | return count; | ||
518 | } | ||
519 | |||
520 | server_proc_t* server_proc_init(server_domain_t *domain, | ||
521 | struct proc_dir_entry *proc_dir, char *file, | ||
522 | admit_server_t admit_server, | ||
523 | list_servers_t list_servers, | ||
524 | stop_servers_t stop_servers) | ||
525 | { | ||
526 | server_proc_t *server_proc = NULL; | ||
527 | struct proc_dir_entry *entry; | ||
528 | |||
529 | entry = create_proc_entry(file, 0644, proc_dir); | ||
530 | if (!entry) { | ||
531 | printk(KERN_ERR "Could not create proc entry: %s.\n", file); | ||
532 | goto out; | ||
533 | } | ||
534 | |||
535 | server_proc = kmalloc(sizeof(server_proc_t), GFP_ATOMIC); | ||
536 | |||
537 | entry->data = server_proc; | ||
538 | entry->read_proc = server_proc_read; | ||
539 | entry->write_proc = server_proc_write; | ||
540 | |||
541 | server_proc->entry = entry; | ||
542 | server_proc->admit_server = admit_server; | ||
543 | server_proc->list_servers = list_servers; | ||
544 | server_proc->stop_servers = stop_servers; | ||
545 | server_proc->length = 0; | ||
546 | server_proc->page = NULL; | ||
547 | |||
548 | INIT_LIST_HEAD(&server_proc->list); | ||
549 | list_add(&server_proc->list, &domain->server_procs); | ||
550 | |||
551 | out: | ||
552 | return server_proc; | ||
553 | } | ||
554 | |||
555 | void server_proc_exit(server_proc_t *proc) | ||
556 | { | ||
557 | remove_proc_entry(proc->entry->name, proc->entry->parent); | ||
558 | list_del(&proc->list); | ||
559 | kfree(proc); | ||
560 | } | ||
561 | |||
562 | /****************************************************************************** | ||
563 | * Domain methods | ||
564 | ******************************************************************************/ | ||
565 | |||
566 | void server_domain_init(server_domain_t *domain, | ||
567 | servers_released_t servers_released, | ||
568 | server_completed_t server_completed, | ||
569 | int release_master, raw_spinlock_t *completion_lock) | ||
570 | { | ||
571 | int i; | ||
572 | BUG_ON(!servers_released || !server_completed); | ||
573 | |||
574 | INIT_LIST_HEAD(&domain->tobe_released); | ||
575 | for (i = 0; i < SERVER_RELEASE_QUEUE_SLOTS; i++) | ||
576 | INIT_LIST_HEAD(&domain->release_queue[i]); | ||
577 | |||
578 | raw_spin_lock_init(&domain->release_lock); | ||
579 | raw_spin_lock_init(&domain->tobe_lock); | ||
580 | |||
581 | |||
582 | domain->release_master = release_master; | ||
583 | domain->completion_lock = completion_lock; | ||
584 | domain->server_completed = server_completed; | ||
585 | domain->servers_released = servers_released; | ||
586 | |||
587 | INIT_LIST_HEAD(&domain->server_procs); | ||
588 | |||
589 | domain->completion_timers = | ||
590 | kmalloc(NR_CPUS*sizeof(completion_timer_t), GFP_ATOMIC); | ||
591 | domain->linked_servers = | ||
592 | kmalloc(NR_CPUS*sizeof(server_t*), GFP_ATOMIC); | ||
593 | domain->linked_tasks = | ||
594 | kmalloc(NR_CPUS*sizeof(struct task_struct*), GFP_ATOMIC); | ||
595 | domain->start_times = | ||
596 | kmalloc(NR_CPUS*sizeof(lt_t), GFP_ATOMIC); | ||
597 | |||
598 | for_each_online_cpu(i) { | ||
599 | domain->linked_tasks[i] = NULL; | ||
600 | domain->linked_servers[i] = NULL; | ||
601 | domain->start_times[i] = 0; | ||
602 | |||
603 | /* Initialize the completion timer info */ | ||
604 | domain->completion_timers[i].armed = 0; | ||
605 | domain->completion_timers[i].cpu = i; | ||
606 | hrtimer_init(&domain->completion_timers[i].timer, | ||
607 | CLOCK_MONOTONIC, | ||
608 | HRTIMER_MODE_ABS); | ||
609 | domain->completion_timers[i].timer.function = | ||
610 | completion_timer_fire; | ||
611 | hrtimer_start_on_info_init(&domain->completion_timers[i].info); | ||
612 | domain->completion_timers[i].domain = domain; | ||
613 | } | ||
614 | } | ||
615 | |||
616 | void server_domain_destroy(server_domain_t *domain) | ||
617 | { | ||
618 | struct list_head *pos, *safe; | ||
619 | server_proc_t *proc; | ||
620 | |||
621 | kfree(domain->completion_timers); | ||
622 | kfree(domain->linked_tasks); | ||
623 | kfree(domain->linked_servers); | ||
624 | kfree(domain->start_times); | ||
625 | |||
626 | list_for_each_safe(pos, safe, &domain->server_procs) { | ||
627 | proc = list_entry(pos, server_proc_t, list); | ||
628 | server_proc_exit(proc); | ||
629 | } | ||
630 | } | ||
631 | |||
632 | static unsigned int time2slot(lt_t time) | ||
633 | { | ||
634 | return (unsigned int) time2quanta(time, FLOOR) % | ||
635 | SERVER_RELEASE_QUEUE_SLOTS; | ||
636 | } | ||
637 | |||
638 | /* | ||
639 | * Send a list of servers to a client callback. | ||
640 | */ | ||
641 | static enum hrtimer_restart release_servers_fire(struct hrtimer *timer) | ||
642 | { | ||
643 | unsigned long flags; | ||
644 | server_release_heap_t *rh; | ||
645 | |||
646 | _TRACE_SUB("on_release_timer(0x%p) starts.", timer); | ||
647 | |||
648 | rh = container_of(timer, server_release_heap_t, timer); | ||
649 | |||
650 | raw_spin_lock_irqsave(&rh->domain->release_lock, flags); | ||
651 | _TRACE_SUB("CB has the release_lock"); | ||
652 | |||
653 | /* Remove from release queue */ | ||
654 | list_del(&rh->list); | ||
655 | |||
656 | raw_spin_unlock_irqrestore(&rh->domain->release_lock, flags); | ||
657 | _TRACE_SUB("CB returned release_lock"); | ||
658 | |||
659 | /* Call release callback */ | ||
660 | rh->domain->servers_released(&rh->servers); | ||
661 | /* WARNING: rh can be referenced from other CPUs from now on. */ | ||
662 | |||
663 | _TRACE_SUB("on_release_timer(0x%p) ends.", timer); | ||
664 | |||
665 | return HRTIMER_NORESTART; | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * Caller must hold release lock. | ||
670 | * Will return heap for given time. If no such heap exists prior to | ||
671 | * the invocation it will be created. | ||
672 | */ | ||
673 | static server_release_heap_t* get_release_heap(server_domain_t *rt, | ||
674 | server_t *server, | ||
675 | int use_server_heap) | ||
676 | { | ||
677 | struct list_head *pos; | ||
678 | server_release_heap_t *heap = NULL; | ||
679 | server_release_heap_t *rh; | ||
680 | lt_t release_time = server->release; | ||
681 | unsigned int slot = time2slot(release_time); | ||
682 | |||
683 | _TRACE_SUB("searching for release time %llu", release_time); | ||
684 | |||
685 | /* Initialize pos for the case that the list is empty */ | ||
686 | pos = rt->release_queue[slot].next; | ||
687 | list_for_each(pos, &rt->release_queue[slot]) { | ||
688 | rh = list_entry(pos, server_release_heap_t, list); | ||
689 | if (release_time == rh->release_time) { | ||
690 | /* Perfect match -- this happens on hyperperiod | ||
691 | * boundaries | ||
692 | */ | ||
693 | heap = rh; | ||
694 | break; | ||
695 | } else if (lt_before(release_time, rh->release_time)) { | ||
696 | /* We need to insert a new node since rh is | ||
697 | * already in the future | ||
698 | */ | ||
699 | break; | ||
700 | } | ||
701 | } | ||
702 | if (!heap && use_server_heap) { | ||
703 | /* Use pre-allocated release heap */ | ||
704 | rh = server->release_heap; | ||
705 | rh->domain = rt; | ||
706 | rh->release_time = release_time; | ||
707 | |||
708 | /* Add to release queue */ | ||
709 | list_add(&rh->list, pos->prev); | ||
710 | heap = rh; | ||
711 | } | ||
712 | return heap; | ||
713 | } | ||
714 | |||
715 | /* | ||
716 | * Prepare a server's release_heap for use. | ||
717 | */ | ||
718 | static int reinit_release_heap(server_t *server) | ||
719 | { | ||
720 | int rv = 0; | ||
721 | server_release_heap_t* rh; | ||
722 | |||
723 | /* Use pre-allocated release heap */ | ||
724 | rh = server->release_heap; | ||
725 | |||
726 | /* WARNING: If the CPU still holds the release_lock at this point, | ||
727 | * deadlock may occur! | ||
728 | */ | ||
729 | rv = hrtimer_try_to_cancel(&rh->timer); | ||
730 | |||
731 | /* The timer callback is running, it is useless to add | ||
732 | * to the release heap now. | ||
733 | */ | ||
734 | if (rv == -1) { | ||
735 | rv = 0; | ||
736 | goto out; | ||
737 | } | ||
738 | |||
739 | /* Under no cirumstances should the timer have been active | ||
740 | * but not running. | ||
741 | */ | ||
742 | /* TODO: stop living dangerously */ | ||
743 | //BUG_ON(rv == 1); | ||
744 | rv = 1; | ||
745 | |||
746 | /* initialize */ | ||
747 | INIT_LIST_HEAD(&rh->servers); | ||
748 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | ||
749 | out: | ||
750 | return rv; | ||
751 | } | ||
752 | |||
753 | /* | ||
754 | * Arm the release timer for the next set of servers. | ||
755 | */ | ||
756 | static int arm_release_timer(server_domain_t *domain) | ||
757 | { | ||
758 | int rv = 1; | ||
759 | struct list_head list; | ||
760 | struct list_head *pos, *safe; | ||
761 | server_t *server; | ||
762 | server_release_heap_t *rh; | ||
763 | |||
764 | _TRACE_SUB("arm_release_timer() at %llu", litmus_clock()); | ||
765 | list_replace_init(&domain->tobe_released, &list); | ||
766 | |||
767 | list_for_each_safe(pos, safe, &list) { | ||
768 | /* Pick server from work list */ | ||
769 | server = list_entry(pos, server_t, release_list); | ||
770 | list_del(pos); | ||
771 | |||
772 | /* Put into release heap while holding release_lock */ | ||
773 | raw_spin_lock(&domain->release_lock); | ||
774 | TRACE_SUB(server, "I have the release_lock"); | ||
775 | |||
776 | rh = get_release_heap(domain, server, 0); | ||
777 | if (!rh) { | ||
778 | /* Need to use our own, but drop lock first */ | ||
779 | raw_spin_unlock(&domain->release_lock); | ||
780 | TRACE_SUB(server, "Dropped release_lock"); | ||
781 | |||
782 | rv = reinit_release_heap(server); | ||
783 | |||
784 | /* Bail! We missed the release time */ | ||
785 | if (!rv) { | ||
786 | TRACE_SUB(server, "missed release"); | ||
787 | rv = 0; | ||
788 | goto out; | ||
789 | } | ||
790 | |||
791 | TRACE_SUB(server, "release_heap ready"); | ||
792 | |||
793 | raw_spin_lock(&domain->release_lock); | ||
794 | TRACE_SUB(server, "Re-acquired release_lock"); | ||
795 | |||
796 | rh = get_release_heap(domain, server, 1); | ||
797 | } | ||
798 | |||
799 | list_add(&server->release_list, &rh->servers); | ||
800 | TRACE_SUB(server, "arm_release_timer(): added to release heap"); | ||
801 | |||
802 | raw_spin_unlock(&domain->release_lock); | ||
803 | TRACE_SUB(server, "Returned the release_lock"); | ||
804 | |||
805 | /* To avoid arming the timer multiple times, we only let the | ||
806 | * owner do the arming (which is the "first" task to reference | ||
807 | * this release_heap anyway). | ||
808 | */ | ||
809 | if (rh == server->release_heap) { | ||
810 | TRACE_SUB(server, "arming timer 0x%p at %llu on P%d", | ||
811 | &rh->timer, rh->release_time, | ||
812 | domain->release_master); | ||
813 | /* We cannot arm the timer using hrtimer_start() | ||
814 | * as it may deadlock on rq->lock | ||
815 | * | ||
816 | * PINNED mode is ok on both local and remote CPU | ||
817 | */ | ||
818 | if (domain->release_master == NO_CPU) { | ||
819 | __hrtimer_start_range_ns(&rh->timer, | ||
820 | ns_to_ktime(rh->release_time), | ||
821 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
822 | } else { | ||
823 | hrtimer_start_on(domain->release_master, | ||
824 | &rh->info, &rh->timer, | ||
825 | ns_to_ktime(rh->release_time), | ||
826 | HRTIMER_MODE_ABS_PINNED); | ||
827 | } | ||
828 | } else | ||
829 | TRACE_SUB(server, "0x%p is not my timer", &rh->timer); | ||
830 | } | ||
831 | out: | ||
832 | return rv; | ||
833 | } | ||
834 | |||
835 | int add_server_release(server_t *server, server_domain_t *domain) | ||
836 | { | ||
837 | TRACE_SUB(server, "adding to release at %llu", server->release); | ||
838 | list_add(&server->release_list, &domain->tobe_released); | ||
839 | return arm_release_timer(domain); | ||
840 | } | ||
841 | |||
842 | static int __init init_servers(void) | ||
843 | { | ||
844 | server_cache = KMEM_CACHE(server, SLAB_PANIC); | ||
845 | server_release_cache = KMEM_CACHE(server_release_heap, SLAB_PANIC); | ||
846 | return 1; | ||
847 | } | ||
848 | |||
849 | static void exit_servers(void) | ||
850 | { | ||
851 | kmem_cache_destroy(server_cache); | ||
852 | kmem_cache_destroy(server_release_cache); | ||
853 | } | ||
854 | |||
855 | |||
856 | module_init(init_servers); | ||
857 | module_exit(exit_servers); | ||