aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/srp.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/srp.c')
-rw-r--r--litmus/srp.c295
1 files changed, 295 insertions, 0 deletions
diff --git a/litmus/srp.c b/litmus/srp.c
new file mode 100644
index 000000000000..2ed4ec12a9d3
--- /dev/null
+++ b/litmus/srp.c
@@ -0,0 +1,295 @@
1/* ************************************************************************** */
2/* STACK RESOURCE POLICY */
3/* ************************************************************************** */
4
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/wait.h>
8
9#include <litmus/litmus.h>
10#include <litmus/sched_plugin.h>
11#include <litmus/fdso.h>
12#include <litmus/trace.h>
13
14
15#ifdef CONFIG_LITMUS_LOCKING
16
17#include <litmus/srp.h>
18
19srp_prioritization_t get_srp_prio;
20
21struct srp {
22 struct list_head ceiling;
23 wait_queue_head_t ceiling_blocked;
24};
25#define system_ceiling(srp) list2prio(srp->ceiling.next)
26#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling)
27
28#define UNDEF_SEM -2
29
30atomic_t srp_objects_in_use = ATOMIC_INIT(0);
31
32DEFINE_PER_CPU(struct srp, srp);
33
34/* Initialize SRP semaphores at boot time. */
35static int __init srp_init(void)
36{
37 int i;
38
39 printk("Initializing SRP per-CPU ceilings...");
40 for (i = 0; i < NR_CPUS; i++) {
41 init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked);
42 INIT_LIST_HEAD(&per_cpu(srp, i).ceiling);
43 }
44 printk(" done!\n");
45
46 return 0;
47}
48module_init(srp_init);
49
50/* SRP task priority comparison function. Smaller numeric values have higher
51 * priority, tie-break is PID. Special case: priority == 0 <=> no priority
52 */
53static int srp_higher_prio(struct srp_priority* first,
54 struct srp_priority* second)
55{
56 if (!first->priority)
57 return 0;
58 else
59 return !second->priority ||
60 first->priority < second->priority || (
61 first->priority == second->priority &&
62 first->pid < second->pid);
63}
64
65
66static int srp_exceeds_ceiling(struct task_struct* first,
67 struct srp* srp)
68{
69 struct srp_priority prio;
70
71 if (list_empty(&srp->ceiling))
72 return 1;
73 else {
74 prio.pid = first->pid;
75 prio.priority = get_srp_prio(first);
76 return srp_higher_prio(&prio, system_ceiling(srp)) ||
77 ceiling2sem(system_ceiling(srp))->owner == first;
78 }
79}
80
81static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
82{
83 struct list_head *pos;
84 if (in_list(&prio->list)) {
85 printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in "
86 "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio));
87 return;
88 }
89 list_for_each(pos, &srp->ceiling)
90 if (unlikely(srp_higher_prio(prio, list2prio(pos)))) {
91 __list_add(&prio->list, pos->prev, pos);
92 return;
93 }
94
95 list_add_tail(&prio->list, &srp->ceiling);
96}
97
98
99static int lock_srp_semaphore(struct litmus_lock* l)
100{
101 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
102
103 if (!is_realtime(current))
104 return -EPERM;
105
106 preempt_disable();
107
108 /* Update ceiling. */
109 srp_add_prio(&__get_cpu_var(srp), &sem->ceiling);
110
111 /* SRP invariant: all resources available */
112 BUG_ON(sem->owner != NULL);
113
114 sem->owner = current;
115 TRACE_CUR("acquired srp 0x%p\n", sem);
116
117 preempt_enable();
118
119 return 0;
120}
121
122static int unlock_srp_semaphore(struct litmus_lock* l)
123{
124 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
125 int err = 0;
126
127 preempt_disable();
128
129 if (sem->owner != current) {
130 err = -EINVAL;
131 } else {
132 /* Determine new system priority ceiling for this CPU. */
133 BUG_ON(!in_list(&sem->ceiling.list));
134
135 list_del(&sem->ceiling.list);
136 sem->owner = NULL;
137
138 /* Wake tasks on this CPU, if they exceed current ceiling. */
139 TRACE_CUR("released srp 0x%p\n", sem);
140 wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
141 }
142
143 preempt_enable();
144 return err;
145}
146
147static int open_srp_semaphore(struct litmus_lock* l, void* __user arg)
148{
149 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
150 int err = 0;
151 struct task_struct* t = current;
152 struct srp_priority t_prio;
153
154 if (!is_realtime(t))
155 return -EPERM;
156
157 TRACE_CUR("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu);
158
159 preempt_disable();
160
161 if (sem->owner != NULL)
162 err = -EBUSY;
163
164 if (err == 0) {
165 if (sem->cpu == UNDEF_SEM)
166 sem->cpu = get_partition(t);
167 else if (sem->cpu != get_partition(t))
168 err = -EPERM;
169 }
170
171 if (err == 0) {
172 t_prio.priority = get_srp_prio(t);
173 t_prio.pid = t->pid;
174 if (srp_higher_prio(&t_prio, &sem->ceiling)) {
175 sem->ceiling.priority = t_prio.priority;
176 sem->ceiling.pid = t_prio.pid;
177 }
178 }
179
180 preempt_enable();
181
182 return err;
183}
184
185static int close_srp_semaphore(struct litmus_lock* l)
186{
187 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
188 int err = 0;
189
190 preempt_disable();
191
192 if (sem->owner == current)
193 unlock_srp_semaphore(l);
194
195 preempt_enable();
196
197 return err;
198}
199
200static void deallocate_srp_semaphore(struct litmus_lock* l)
201{
202 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
203 atomic_dec(&srp_objects_in_use);
204 kfree(sem);
205}
206
207static struct litmus_lock_ops srp_lock_ops = {
208 .open = open_srp_semaphore,
209 .close = close_srp_semaphore,
210 .lock = lock_srp_semaphore,
211 .unlock = unlock_srp_semaphore,
212 .deallocate = deallocate_srp_semaphore,
213};
214
215struct srp_semaphore* allocate_srp_semaphore(void)
216{
217 struct srp_semaphore* sem;
218
219 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
220 if (!sem)
221 return NULL;
222
223 INIT_LIST_HEAD(&sem->ceiling.list);
224 sem->ceiling.priority = 0;
225 sem->cpu = UNDEF_SEM;
226 sem->owner = NULL;
227
228 sem->litmus_lock.ops = &srp_lock_ops;
229
230 atomic_inc(&srp_objects_in_use);
231 return sem;
232}
233
234static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync,
235 void *key)
236{
237 int cpu = smp_processor_id();
238 struct task_struct *tsk = wait->private;
239 if (cpu != get_partition(tsk))
240 TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b",
241 get_partition(tsk));
242 else if (srp_exceeds_ceiling(tsk, &__get_cpu_var(srp)))
243 return default_wake_function(wait, mode, sync, key);
244 return 0;
245}
246
247static void do_ceiling_block(struct task_struct *tsk)
248{
249 wait_queue_t wait = {
250 .private = tsk,
251 .func = srp_wake_up,
252 .task_list = {NULL, NULL}
253 };
254
255 tsk->state = TASK_UNINTERRUPTIBLE;
256 add_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait);
257 tsk->rt_param.srp_non_recurse = 1;
258 preempt_enable_no_resched();
259 schedule();
260 preempt_disable();
261 tsk->rt_param.srp_non_recurse = 0;
262 remove_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait);
263}
264
265/* Wait for current task priority to exceed system-wide priority ceiling.
266 * FIXME: the hotpath should be inline.
267 */
268void srp_ceiling_block(void)
269{
270 struct task_struct *tsk = current;
271
272 /* Only applies to real-time tasks, but optimize for RT tasks. */
273 if (unlikely(!is_realtime(tsk)))
274 return;
275
276 /* Avoid recursive ceiling blocking. */
277 if (unlikely(tsk->rt_param.srp_non_recurse))
278 return;
279
280 /* Bail out early if there aren't any SRP resources around. */
281 if (likely(!atomic_read(&srp_objects_in_use)))
282 return;
283
284 preempt_disable();
285 if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) {
286 TRACE_CUR("is priority ceiling blocked.\n");
287 while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp)))
288 do_ceiling_block(tsk);
289 TRACE_CUR("finally exceeds system ceiling.\n");
290 } else
291 TRACE_CUR("is not priority ceiling blocked\n");
292 preempt_enable();
293}
294
295#endif