aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/srp.c
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-25 01:27:07 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:46:49 -0400
commit543810eb67bea9c3046ecb58388493bca39fe796 (patch)
treecf65010367e53dfbd3e39a9eb6e89dacf92348f3 /litmus/srp.c
parent1412c8b72e192a14b8dd620f58a75f55a5490783 (diff)
Add LITMUS^RT core implementation
This patch adds the core of LITMUS^RT: - library functionality (heaps, rt_domain, prioritization, etc.) - budget enforcement logic - job management - system call backends - virtual devices (control page, etc.) - scheduler plugin API (and dummy plugin) This code compiles, but is not yet integrated with the rest of Linux.
Diffstat (limited to 'litmus/srp.c')
-rw-r--r--litmus/srp.c305
1 files changed, 305 insertions, 0 deletions
diff --git a/litmus/srp.c b/litmus/srp.c
new file mode 100644
index 000000000000..c88dbf2f580f
--- /dev/null
+++ b/litmus/srp.c
@@ -0,0 +1,305 @@
1/* ************************************************************************** */
2/* STACK RESOURCE POLICY */
3/* ************************************************************************** */
4
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/wait.h>
8
9#include <litmus/litmus.h>
10#include <litmus/sched_plugin.h>
11#include <litmus/fdso.h>
12#include <litmus/trace.h>
13
14
15#ifdef CONFIG_LITMUS_LOCKING
16
17#include <litmus/srp.h>
18
19srp_prioritization_t get_srp_prio;
20
21struct srp {
22 struct list_head ceiling;
23 wait_queue_head_t ceiling_blocked;
24};
25#define system_ceiling(srp) list2prio(srp->ceiling.next)
26#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling)
27
28#define UNDEF_SEM -2
29
30atomic_t srp_objects_in_use = ATOMIC_INIT(0);
31
32DEFINE_PER_CPU(struct srp, srp);
33
34/* Initialize SRP semaphores at boot time. */
35static int __init srp_init(void)
36{
37 int i;
38
39 printk("Initializing SRP per-CPU ceilings...");
40 for (i = 0; i < NR_CPUS; i++) {
41 init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked);
42 INIT_LIST_HEAD(&per_cpu(srp, i).ceiling);
43 }
44 printk(" done!\n");
45
46 return 0;
47}
48module_init(srp_init);
49
50/* SRP task priority comparison function. Smaller numeric values have higher
51 * priority, tie-break is PID. Special case: priority == 0 <=> no priority
52 */
53static int srp_higher_prio(struct srp_priority* first,
54 struct srp_priority* second)
55{
56 if (!first->priority)
57 return 0;
58 else
59 return !second->priority ||
60 first->priority < second->priority || (
61 first->priority == second->priority &&
62 first->pid < second->pid);
63}
64
65
66static int srp_exceeds_ceiling(struct task_struct* first,
67 struct srp* srp)
68{
69 struct srp_priority prio;
70
71 if (list_empty(&srp->ceiling))
72 return 1;
73 else {
74 prio.pid = first->pid;
75 prio.priority = get_srp_prio(first);
76 return srp_higher_prio(&prio, system_ceiling(srp)) ||
77 ceiling2sem(system_ceiling(srp))->owner == first;
78 }
79}
80
81static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
82{
83 struct list_head *pos;
84 if (in_list(&prio->list)) {
85 printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in "
86 "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio));
87 return;
88 }
89 list_for_each(pos, &srp->ceiling)
90 if (unlikely(srp_higher_prio(prio, list2prio(pos)))) {
91 __list_add(&prio->list, pos->prev, pos);
92 return;
93 }
94
95 list_add_tail(&prio->list, &srp->ceiling);
96}
97
98
99static int lock_srp_semaphore(struct litmus_lock* l)
100{
101 struct task_struct* t = current;
102 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
103
104 if (!is_realtime(t))
105 return -EPERM;
106
107 /* prevent acquisition of local locks in global critical sections */
108 if (tsk_rt(t)->num_locks_held)
109 return -EBUSY;
110
111 preempt_disable();
112
113 /* Update ceiling. */
114 srp_add_prio(&__get_cpu_var(srp), &sem->ceiling);
115
116 /* SRP invariant: all resources available */
117 BUG_ON(sem->owner != NULL);
118
119 sem->owner = t;
120 TRACE_CUR("acquired srp 0x%p\n", sem);
121
122 tsk_rt(t)->num_local_locks_held++;
123
124 preempt_enable();
125
126 return 0;
127}
128
129static int unlock_srp_semaphore(struct litmus_lock* l)
130{
131 struct task_struct* t = current;
132 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
133 int err = 0;
134
135 preempt_disable();
136
137 if (sem->owner != t) {
138 err = -EINVAL;
139 } else {
140 /* Determine new system priority ceiling for this CPU. */
141 BUG_ON(!in_list(&sem->ceiling.list));
142
143 list_del(&sem->ceiling.list);
144 sem->owner = NULL;
145
146 /* Wake tasks on this CPU, if they exceed current ceiling. */
147 TRACE_CUR("released srp 0x%p\n", sem);
148 wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
149
150 tsk_rt(t)->num_local_locks_held--;
151 }
152
153 preempt_enable();
154 return err;
155}
156
157static int open_srp_semaphore(struct litmus_lock* l, void* __user arg)
158{
159 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
160 int err = 0;
161 struct task_struct* t = current;
162 struct srp_priority t_prio;
163
164 if (!is_realtime(t))
165 return -EPERM;
166
167 TRACE_CUR("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu);
168
169 preempt_disable();
170
171 if (sem->owner != NULL)
172 err = -EBUSY;
173
174 if (err == 0) {
175 if (sem->cpu == UNDEF_SEM)
176 sem->cpu = get_partition(t);
177 else if (sem->cpu != get_partition(t))
178 err = -EPERM;
179 }
180
181 if (err == 0) {
182 t_prio.priority = get_srp_prio(t);
183 t_prio.pid = t->pid;
184 if (srp_higher_prio(&t_prio, &sem->ceiling)) {
185 sem->ceiling.priority = t_prio.priority;
186 sem->ceiling.pid = t_prio.pid;
187 }
188 }
189
190 preempt_enable();
191
192 return err;
193}
194
195static int close_srp_semaphore(struct litmus_lock* l)
196{
197 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
198 int err = 0;
199
200 preempt_disable();
201
202 if (sem->owner == current)
203 unlock_srp_semaphore(l);
204
205 preempt_enable();
206
207 return err;
208}
209
210static void deallocate_srp_semaphore(struct litmus_lock* l)
211{
212 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
213 atomic_dec(&srp_objects_in_use);
214 kfree(sem);
215}
216
217static struct litmus_lock_ops srp_lock_ops = {
218 .open = open_srp_semaphore,
219 .close = close_srp_semaphore,
220 .lock = lock_srp_semaphore,
221 .unlock = unlock_srp_semaphore,
222 .deallocate = deallocate_srp_semaphore,
223};
224
225struct srp_semaphore* allocate_srp_semaphore(void)
226{
227 struct srp_semaphore* sem;
228
229 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
230 if (!sem)
231 return NULL;
232
233 INIT_LIST_HEAD(&sem->ceiling.list);
234 sem->ceiling.priority = 0;
235 sem->cpu = UNDEF_SEM;
236 sem->owner = NULL;
237
238 sem->litmus_lock.ops = &srp_lock_ops;
239
240 atomic_inc(&srp_objects_in_use);
241 return sem;
242}
243
244static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync,
245 void *key)
246{
247 int cpu = smp_processor_id();
248 struct task_struct *tsk = wait->private;
249 if (cpu != get_partition(tsk))
250 TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b",
251 get_partition(tsk));
252 else if (srp_exceeds_ceiling(tsk, &__get_cpu_var(srp)))
253 return default_wake_function(wait, mode, sync, key);
254 return 0;
255}
256
257static void do_ceiling_block(struct task_struct *tsk)
258{
259 wait_queue_t wait = {
260 .private = tsk,
261 .func = srp_wake_up,
262 .task_list = {NULL, NULL}
263 };
264
265 tsk->state = TASK_UNINTERRUPTIBLE;
266 add_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait);
267 tsk->rt_param.srp_non_recurse = 1;
268 preempt_enable_no_resched();
269 schedule();
270 preempt_disable();
271 tsk->rt_param.srp_non_recurse = 0;
272 remove_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait);
273}
274
275/* Wait for current task priority to exceed system-wide priority ceiling.
276 * FIXME: the hotpath should be inline.
277 */
278void srp_ceiling_block(void)
279{
280 struct task_struct *tsk = current;
281
282 /* Only applies to real-time tasks, but optimize for RT tasks. */
283 if (unlikely(!is_realtime(tsk)))
284 return;
285
286 /* Avoid recursive ceiling blocking. */
287 if (unlikely(tsk->rt_param.srp_non_recurse))
288 return;
289
290 /* Bail out early if there aren't any SRP resources around. */
291 if (likely(!atomic_read(&srp_objects_in_use)))
292 return;
293
294 preempt_disable();
295 if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) {
296 TRACE_CUR("is priority ceiling blocked.\n");
297 while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp)))
298 do_ceiling_block(tsk);
299 TRACE_CUR("finally exceeds system ceiling.\n");
300 } else
301 TRACE_CUR("is not priority ceiling blocked\n");
302 preempt_enable();
303}
304
305#endif