diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 07:18:48 -0400 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 06:21:18 -0400 |
commit | 8e048c798adaabef530a1526f7ce8c6c3cd3475e (patch) | |
tree | 5a96b3eaeaafecec1bf08ba71a9d0084d39d46eb /litmus/srp.c | |
parent | bd175e94795774908317a861a883761b75750e35 (diff) |
Add LITMUS^RT core implementation
This patch adds the core of LITMUS^RT:
- library functionality (heaps, rt_domain, prioritization, etc.)
- budget enforcement logic
- job management
- system call backends
- virtual devices (control page, etc.)
- scheduler plugin API (and dummy plugin)
This code compiles, but is not yet integrated with the rest of Linux.
Diffstat (limited to 'litmus/srp.c')
-rw-r--r-- | litmus/srp.c | 308 |
1 files changed, 308 insertions, 0 deletions
diff --git a/litmus/srp.c b/litmus/srp.c new file mode 100644 index 000000000000..7ab388646e29 --- /dev/null +++ b/litmus/srp.c | |||
@@ -0,0 +1,308 @@ | |||
1 | /* ************************************************************************** */ | ||
2 | /* STACK RESOURCE POLICY */ | ||
3 | /* ************************************************************************** */ | ||
4 | |||
5 | #include <asm/atomic.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/wait.h> | ||
8 | |||
9 | #include <litmus/litmus.h> | ||
10 | #include <litmus/sched_plugin.h> | ||
11 | #include <litmus/fdso.h> | ||
12 | #include <litmus/trace.h> | ||
13 | |||
14 | |||
15 | #ifdef CONFIG_LITMUS_LOCKING | ||
16 | |||
17 | #include <litmus/srp.h> | ||
18 | |||
19 | srp_prioritization_t get_srp_prio; | ||
20 | |||
21 | struct srp { | ||
22 | struct list_head ceiling; | ||
23 | wait_queue_head_t ceiling_blocked; | ||
24 | }; | ||
25 | #define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
26 | #define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
27 | |||
28 | #define UNDEF_SEM -2 | ||
29 | |||
30 | DEFINE_PER_CPU(struct srp, srp); | ||
31 | |||
32 | DEFINE_PER_CPU(int, srp_objects_in_use); | ||
33 | |||
34 | /* Initialize SRP semaphores at boot time. */ | ||
35 | static int __init srp_init(void) | ||
36 | { | ||
37 | int i; | ||
38 | |||
39 | printk("Initializing SRP per-CPU ceilings..."); | ||
40 | for (i = 0; i < NR_CPUS; i++) { | ||
41 | init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked); | ||
42 | INIT_LIST_HEAD(&per_cpu(srp, i).ceiling); | ||
43 | per_cpu(srp_objects_in_use, i) = 0; | ||
44 | } | ||
45 | printk(" done!\n"); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | module_init(srp_init); | ||
50 | |||
51 | /* SRP task priority comparison function. Smaller numeric values have higher | ||
52 | * priority, tie-break is PID. Special case: priority == 0 <=> no priority | ||
53 | */ | ||
54 | static int srp_higher_prio(struct srp_priority* first, | ||
55 | struct srp_priority* second) | ||
56 | { | ||
57 | if (!first->priority) | ||
58 | return 0; | ||
59 | else | ||
60 | return !second->priority || | ||
61 | first->priority < second->priority || ( | ||
62 | first->priority == second->priority && | ||
63 | first->pid < second->pid); | ||
64 | } | ||
65 | |||
66 | |||
67 | static int srp_exceeds_ceiling(struct task_struct* first, | ||
68 | struct srp* srp) | ||
69 | { | ||
70 | struct srp_priority prio; | ||
71 | |||
72 | if (list_empty(&srp->ceiling)) | ||
73 | return 1; | ||
74 | else { | ||
75 | prio.pid = first->pid; | ||
76 | prio.priority = get_srp_prio(first); | ||
77 | return srp_higher_prio(&prio, system_ceiling(srp)) || | ||
78 | ceiling2sem(system_ceiling(srp))->owner == first; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | ||
83 | { | ||
84 | struct list_head *pos; | ||
85 | if (in_list(&prio->list)) { | ||
86 | printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in " | ||
87 | "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio)); | ||
88 | return; | ||
89 | } | ||
90 | list_for_each(pos, &srp->ceiling) | ||
91 | if (unlikely(srp_higher_prio(prio, list2prio(pos)))) { | ||
92 | __list_add(&prio->list, pos->prev, pos); | ||
93 | return; | ||
94 | } | ||
95 | |||
96 | list_add_tail(&prio->list, &srp->ceiling); | ||
97 | } | ||
98 | |||
99 | |||
100 | static int lock_srp_semaphore(struct litmus_lock* l) | ||
101 | { | ||
102 | struct task_struct* t = current; | ||
103 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
104 | |||
105 | if (!is_realtime(t)) | ||
106 | return -EPERM; | ||
107 | |||
108 | /* prevent acquisition of local locks in global critical sections */ | ||
109 | if (tsk_rt(t)->num_locks_held) | ||
110 | return -EBUSY; | ||
111 | |||
112 | preempt_disable(); | ||
113 | |||
114 | /* Update ceiling. */ | ||
115 | srp_add_prio(this_cpu_ptr(&srp), &sem->ceiling); | ||
116 | |||
117 | /* SRP invariant: all resources available */ | ||
118 | BUG_ON(sem->owner != NULL); | ||
119 | |||
120 | sem->owner = t; | ||
121 | TRACE_CUR("acquired srp 0x%p\n", sem); | ||
122 | |||
123 | tsk_rt(t)->num_local_locks_held++; | ||
124 | |||
125 | preempt_enable(); | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int unlock_srp_semaphore(struct litmus_lock* l) | ||
131 | { | ||
132 | struct task_struct* t = current; | ||
133 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
134 | int err = 0; | ||
135 | |||
136 | preempt_disable(); | ||
137 | |||
138 | if (sem->owner != t) { | ||
139 | err = -EINVAL; | ||
140 | } else { | ||
141 | /* The current owner should be executing on the correct CPU. | ||
142 | * | ||
143 | * If the owner transitioned out of RT mode or is exiting, then | ||
144 | * we it might have already been migrated away by the best-effort | ||
145 | * scheduler and we just have to deal with it. */ | ||
146 | if (unlikely(!is_realtime(t) && sem->cpu != smp_processor_id())) { | ||
147 | TRACE_TASK(t, "SRP unlock cpu=%d, sem->cpu=%d\n", | ||
148 | smp_processor_id(), sem->cpu); | ||
149 | preempt_enable(); | ||
150 | err = litmus_be_migrate_to(sem->cpu); | ||
151 | preempt_disable(); | ||
152 | TRACE_TASK(t, "post-migrate: cpu=%d, sem->cpu=%d err=%d\n", | ||
153 | smp_processor_id(), sem->cpu, err); | ||
154 | } | ||
155 | BUG_ON(sem->cpu != smp_processor_id()); | ||
156 | err = 0; | ||
157 | |||
158 | /* Determine new system priority ceiling for this CPU. */ | ||
159 | BUG_ON(!in_list(&sem->ceiling.list)); | ||
160 | |||
161 | list_del(&sem->ceiling.list); | ||
162 | sem->owner = NULL; | ||
163 | |||
164 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
165 | TRACE_CUR("released srp 0x%p\n", sem); | ||
166 | wake_up_all(&this_cpu_ptr(&srp)->ceiling_blocked); | ||
167 | |||
168 | tsk_rt(t)->num_local_locks_held--; | ||
169 | } | ||
170 | |||
171 | preempt_enable(); | ||
172 | return err; | ||
173 | } | ||
174 | |||
175 | static int open_srp_semaphore(struct litmus_lock* l, void* __user arg) | ||
176 | { | ||
177 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
178 | int err = 0; | ||
179 | struct task_struct* t = current; | ||
180 | struct srp_priority t_prio; | ||
181 | |||
182 | if (!is_realtime(t)) | ||
183 | return -EPERM; | ||
184 | |||
185 | TRACE_CUR("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); | ||
186 | |||
187 | preempt_disable(); | ||
188 | |||
189 | if (sem->owner != NULL) | ||
190 | err = -EBUSY; | ||
191 | |||
192 | if (err == 0) { | ||
193 | if (sem->cpu == UNDEF_SEM) | ||
194 | sem->cpu = get_partition(t); | ||
195 | else if (sem->cpu != get_partition(t)) | ||
196 | err = -EPERM; | ||
197 | } | ||
198 | |||
199 | if (err == 0) { | ||
200 | t_prio.priority = get_srp_prio(t); | ||
201 | t_prio.pid = t->pid; | ||
202 | if (srp_higher_prio(&t_prio, &sem->ceiling)) { | ||
203 | sem->ceiling.priority = t_prio.priority; | ||
204 | sem->ceiling.pid = t_prio.pid; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | preempt_enable(); | ||
209 | |||
210 | return err; | ||
211 | } | ||
212 | |||
213 | static int close_srp_semaphore(struct litmus_lock* l) | ||
214 | { | ||
215 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
216 | int err = 0; | ||
217 | |||
218 | preempt_disable(); | ||
219 | |||
220 | if (sem->owner == current) | ||
221 | unlock_srp_semaphore(l); | ||
222 | |||
223 | preempt_enable(); | ||
224 | |||
225 | return err; | ||
226 | } | ||
227 | |||
228 | static void deallocate_srp_semaphore(struct litmus_lock* l) | ||
229 | { | ||
230 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
231 | raw_cpu_dec(srp_objects_in_use); | ||
232 | kfree(sem); | ||
233 | } | ||
234 | |||
235 | static struct litmus_lock_ops srp_lock_ops = { | ||
236 | .open = open_srp_semaphore, | ||
237 | .close = close_srp_semaphore, | ||
238 | .lock = lock_srp_semaphore, | ||
239 | .unlock = unlock_srp_semaphore, | ||
240 | .deallocate = deallocate_srp_semaphore, | ||
241 | }; | ||
242 | |||
243 | struct srp_semaphore* allocate_srp_semaphore(void) | ||
244 | { | ||
245 | struct srp_semaphore* sem; | ||
246 | |||
247 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
248 | if (!sem) | ||
249 | return NULL; | ||
250 | |||
251 | INIT_LIST_HEAD(&sem->ceiling.list); | ||
252 | sem->ceiling.priority = 0; | ||
253 | sem->cpu = UNDEF_SEM; | ||
254 | sem->owner = NULL; | ||
255 | |||
256 | sem->litmus_lock.ops = &srp_lock_ops; | ||
257 | |||
258 | raw_cpu_inc(srp_objects_in_use); | ||
259 | return sem; | ||
260 | } | ||
261 | |||
262 | static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
263 | void *key) | ||
264 | { | ||
265 | int cpu = smp_processor_id(); | ||
266 | struct task_struct *tsk = wait->private; | ||
267 | if (cpu != get_partition(tsk)) | ||
268 | TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b", | ||
269 | get_partition(tsk)); | ||
270 | else if (srp_exceeds_ceiling(tsk, this_cpu_ptr(&srp))) | ||
271 | return default_wake_function(wait, mode, sync, key); | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void do_ceiling_block(struct task_struct *tsk) | ||
276 | { | ||
277 | wait_queue_t wait = { | ||
278 | .private = tsk, | ||
279 | .func = srp_wake_up, | ||
280 | .task_list = {NULL, NULL} | ||
281 | }; | ||
282 | |||
283 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
284 | add_wait_queue(&this_cpu_ptr(&srp)->ceiling_blocked, &wait); | ||
285 | tsk->rt_param.srp_non_recurse = 1; | ||
286 | preempt_enable_no_resched(); | ||
287 | schedule(); | ||
288 | preempt_disable(); | ||
289 | tsk->rt_param.srp_non_recurse = 0; | ||
290 | remove_wait_queue(&this_cpu_ptr(&srp)->ceiling_blocked, &wait); | ||
291 | } | ||
292 | |||
293 | /* Wait for current task priority to exceed system-wide priority ceiling. | ||
294 | */ | ||
295 | void __srp_ceiling_block(struct task_struct *cur) | ||
296 | { | ||
297 | preempt_disable(); | ||
298 | if (!srp_exceeds_ceiling(cur, this_cpu_ptr(&srp))) { | ||
299 | TRACE_CUR("is priority ceiling blocked.\n"); | ||
300 | while (!srp_exceeds_ceiling(cur, this_cpu_ptr(&srp))) | ||
301 | do_ceiling_block(cur); | ||
302 | TRACE_CUR("finally exceeds system ceiling.\n"); | ||
303 | } else | ||
304 | TRACE_CUR("is not priority ceiling blocked\n"); | ||
305 | preempt_enable(); | ||
306 | } | ||
307 | |||
308 | #endif | ||