aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/fmlp.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/fmlp.c')
-rw-r--r--litmus/fmlp.c268
1 files changed, 268 insertions, 0 deletions
diff --git a/litmus/fmlp.c b/litmus/fmlp.c
new file mode 100644
index 000000000000..a9a638576d69
--- /dev/null
+++ b/litmus/fmlp.c
@@ -0,0 +1,268 @@
1/*
2 * FMLP implementation.
3 * Much of the code here is borrowed from include/asm-i386/semaphore.h
4 */
5
6#include <asm/atomic.h>
7
8#include <linux/semaphore.h>
9#include <linux/sched.h>
10#include <linux/wait.h>
11#include <linux/spinlock.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_plugin.h>
15#include <litmus/edf_common.h>
16
17#include <litmus/fdso.h>
18
19#include <litmus/trace.h>
20
21#ifdef CONFIG_FMLP
22
23static void* create_fmlp_semaphore(void)
24{
25 struct pi_semaphore* sem;
26 int i;
27
28 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
29 if (!sem)
30 return NULL;
31 atomic_set(&sem->count, 1);
32 sem->sleepers = 0;
33 init_waitqueue_head(&sem->wait);
34 sem->hp.task = NULL;
35 sem->holder = NULL;
36 for (i = 0; i < NR_CPUS; i++)
37 sem->hp.cpu_task[i] = NULL;
38 return sem;
39}
40
41static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg)
42{
43 if (!fmlp_active())
44 return -EBUSY;
45 return 0;
46}
47
48static void destroy_fmlp_semaphore(void* sem)
49{
50 /* XXX assert invariants */
51 kfree(sem);
52}
53
54struct fdso_ops fmlp_sem_ops = {
55 .create = create_fmlp_semaphore,
56 .open = open_fmlp_semaphore,
57 .destroy = destroy_fmlp_semaphore
58};
59
60struct wq_pair {
61 struct task_struct* tsk;
62 struct pi_semaphore* sem;
63};
64
65static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync,
66 void *key)
67{
68 struct wq_pair* wqp = (struct wq_pair*) wait->private;
69 set_rt_flags(wqp->tsk, RT_F_EXIT_SEM);
70 litmus->inherit_priority(wqp->sem, wqp->tsk);
71 TRACE_TASK(wqp->tsk,
72 "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n");
73 /* point to task for default_wake_function() */
74 wait->private = wqp->tsk;
75 default_wake_function(wait, mode, sync, key);
76
77 /* Always return true since we know that if we encountered a task
78 * that was already running the wake_up raced with the schedule in
79 * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled
80 * immediately and own the lock. We must not wake up another task in
81 * any case.
82 */
83 return 1;
84}
85
86/* caller is responsible for locking */
87int edf_set_hp_task(struct pi_semaphore *sem)
88{
89 struct list_head *tmp, *next;
90 struct task_struct *queued;
91 int ret = 0;
92
93 sem->hp.task = NULL;
94 list_for_each_safe(tmp, next, &sem->wait.task_list) {
95 queued = ((struct wq_pair*)
96 list_entry(tmp, wait_queue_t,
97 task_list)->private)->tsk;
98
99 /* Compare task prios, find high prio task. */
100 if (edf_higher_prio(queued, sem->hp.task)) {
101 sem->hp.task = queued;
102 ret = 1;
103 }
104 }
105 return ret;
106}
107
108/* caller is responsible for locking */
109int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu)
110{
111 struct list_head *tmp, *next;
112 struct task_struct *queued;
113 int ret = 0;
114
115 sem->hp.cpu_task[cpu] = NULL;
116 list_for_each_safe(tmp, next, &sem->wait.task_list) {
117 queued = ((struct wq_pair*)
118 list_entry(tmp, wait_queue_t,
119 task_list)->private)->tsk;
120
121 /* Compare task prios, find high prio task. */
122 if (get_partition(queued) == cpu &&
123 edf_higher_prio(queued, sem->hp.cpu_task[cpu])) {
124 sem->hp.cpu_task[cpu] = queued;
125 ret = 1;
126 }
127 }
128 return ret;
129}
130
131static int do_fmlp_down(struct pi_semaphore* sem)
132{
133 unsigned long flags;
134 struct task_struct *tsk = current;
135 struct wq_pair pair;
136 int suspended = 1;
137 wait_queue_t wait = {
138 .private = &pair,
139 .func = rt_pi_wake_up,
140 .task_list = {NULL, NULL}
141 };
142
143 pair.tsk = tsk;
144 pair.sem = sem;
145 spin_lock_irqsave(&sem->wait.lock, flags);
146
147 if (atomic_dec_return(&sem->count) < 0 ||
148 waitqueue_active(&sem->wait)) {
149 /* we need to suspend */
150 tsk->state = TASK_UNINTERRUPTIBLE;
151 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
152
153 TRACE_CUR("suspends on PI lock %p\n", sem);
154 litmus->pi_block(sem, tsk);
155
156 /* release lock before sleeping */
157 spin_unlock_irqrestore(&sem->wait.lock, flags);
158
159 TS_PI_DOWN_END;
160 preempt_enable_no_resched();
161
162
163 /* we depend on the FIFO order
164 * Thus, we don't need to recheck when we wake up, we
165 * are guaranteed to have the lock since there is only one
166 * wake up per release
167 */
168 schedule();
169
170 TRACE_CUR("woke up, now owns PI lock %p\n", sem);
171
172 /* try_to_wake_up() set our state to TASK_RUNNING,
173 * all we need to do is to remove our wait queue entry
174 */
175 remove_wait_queue(&sem->wait, &wait);
176 } else {
177 /* no priority inheritance necessary, since there are no queued
178 * tasks.
179 */
180 suspended = 0;
181 TRACE_CUR("acquired PI lock %p, no contention\n", sem);
182 sem->holder = tsk;
183
184 /* don't know if we're global or partitioned. */
185 sem->hp.task = tsk;
186 sem->hp.cpu_task[get_partition(tsk)] = tsk;
187
188 litmus->inherit_priority(sem, tsk);
189 spin_unlock_irqrestore(&sem->wait.lock, flags);
190 }
191 return suspended;
192}
193
194static void do_fmlp_up(struct pi_semaphore* sem)
195{
196 unsigned long flags;
197
198 spin_lock_irqsave(&sem->wait.lock, flags);
199
200 TRACE_CUR("releases PI lock %p\n", sem);
201 litmus->return_priority(sem);
202 sem->holder = NULL;
203 if (atomic_inc_return(&sem->count) < 1)
204 /* there is a task queued */
205 wake_up_locked(&sem->wait);
206
207 spin_unlock_irqrestore(&sem->wait.lock, flags);
208}
209
210asmlinkage long sys_fmlp_down(int sem_od)
211{
212 long ret = 0;
213 struct pi_semaphore * sem;
214 int suspended = 0;
215
216 preempt_disable();
217 TS_PI_DOWN_START;
218
219 sem = lookup_fmlp_sem(sem_od);
220 if (sem)
221 suspended = do_fmlp_down(sem);
222 else
223 ret = -EINVAL;
224
225 if (!suspended) {
226 TS_PI_DOWN_END;
227 preempt_enable();
228 }
229
230 return ret;
231}
232
233asmlinkage long sys_fmlp_up(int sem_od)
234{
235 long ret = 0;
236 struct pi_semaphore * sem;
237
238 preempt_disable();
239 TS_PI_UP_START;
240
241 sem = lookup_fmlp_sem(sem_od);
242 if (sem)
243 do_fmlp_up(sem);
244 else
245 ret = -EINVAL;
246
247
248 TS_PI_UP_END;
249 preempt_enable();
250
251 return ret;
252}
253
254#else
255
256struct fdso_ops fmlp_sem_ops = {};
257
258asmlinkage long sys_fmlp_down(int sem_od)
259{
260 return -ENOSYS;
261}
262
263asmlinkage long sys_fmlp_up(int sem_od)
264{
265 return -ENOSYS;
266}
267
268#endif