aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/fmlp.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:36:40 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:36:40 -0500
commitd7d11d0a9c3a3a6a87e5cebb0dbcd57d8b0e2515 (patch)
tree43a0289fb7e500c1603baa7cc11003d9a1f15b94 /litmus/fmlp.c
parenta2ac69aab6a363f3e450e4f54d72026dfcd2c72d (diff)
Add FMLP support
Diffstat (limited to 'litmus/fmlp.c')
-rw-r--r--litmus/fmlp.c264
1 files changed, 264 insertions, 0 deletions
diff --git a/litmus/fmlp.c b/litmus/fmlp.c
new file mode 100644
index 000000000000..d27698a1cb39
--- /dev/null
+++ b/litmus/fmlp.c
@@ -0,0 +1,264 @@
1/*
2 * FMLP implementation.
3 * Much of the code here is borrowed from include/asm-i386/semaphore.h
4 */
5
6#include <asm/atomic.h>
7
8#include <linux/semaphore.h>
9#include <linux/sched.h>
10#include <linux/wait.h>
11#include <linux/spinlock.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_plugin.h>
15#include <litmus/edf_common.h>
16
17#include <litmus/fdso.h>
18
19#include <litmus/trace.h>
20
21#ifdef CONFIG_FMLP
22
23static void* create_fmlp_semaphore(void)
24{
25 struct pi_semaphore* sem;
26 int i;
27
28 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
29 if (!sem)
30 return NULL;
31 atomic_set(&sem->count, 1);
32 sem->sleepers = 0;
33 init_waitqueue_head(&sem->wait);
34 sem->hp.task = NULL;
35 sem->holder = NULL;
36 for (i = 0; i < NR_CPUS; i++)
37 sem->hp.cpu_task[i] = NULL;
38 return sem;
39}
40
41static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg)
42{
43 if (!fmlp_active())
44 return -EBUSY;
45 return 0;
46}
47
48static void destroy_fmlp_semaphore(void* sem)
49{
50 /* XXX assert invariants */
51 kfree(sem);
52}
53
54struct fdso_ops fmlp_sem_ops = {
55 .create = create_fmlp_semaphore,
56 .open = open_fmlp_semaphore,
57 .destroy = destroy_fmlp_semaphore
58};
59
60struct wq_pair {
61 struct task_struct* tsk;
62 struct pi_semaphore* sem;
63};
64
65static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync,
66 void *key)
67{
68 struct wq_pair* wqp = (struct wq_pair*) wait->private;
69 set_rt_flags(wqp->tsk, RT_F_EXIT_SEM);
70 litmus->inherit_priority(wqp->sem, wqp->tsk);
71 TRACE_TASK(wqp->tsk,
72 "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n");
73 /* point to task for default_wake_function() */
74 wait->private = wqp->tsk;
75 default_wake_function(wait, mode, sync, key);
76
77 /* Always return true since we know that if we encountered a task
78 * that was already running the wake_up raced with the schedule in
79 * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled
80 * immediately and own the lock. We must not wake up another task in
81 * any case.
82 */
83 return 1;
84}
85
86/* caller is responsible for locking */
87int edf_set_hp_task(struct pi_semaphore *sem)
88{
89 struct list_head *tmp, *next;
90 struct task_struct *queued;
91 int ret = 0;
92
93 sem->hp.task = NULL;
94 list_for_each_safe(tmp, next, &sem->wait.task_list) {
95 queued = ((struct wq_pair*)
96 list_entry(tmp, wait_queue_t,
97 task_list)->private)->tsk;
98
99 /* Compare task prios, find high prio task. */
100 if (edf_higher_prio(queued, sem->hp.task)) {
101 sem->hp.task = queued;
102 ret = 1;
103 }
104 }
105 return ret;
106}
107
108/* caller is responsible for locking */
109int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu)
110{
111 struct list_head *tmp, *next;
112 struct task_struct *queued;
113 int ret = 0;
114
115 sem->hp.cpu_task[cpu] = NULL;
116 list_for_each_safe(tmp, next, &sem->wait.task_list) {
117 queued = ((struct wq_pair*)
118 list_entry(tmp, wait_queue_t,
119 task_list)->private)->tsk;
120
121 /* Compare task prios, find high prio task. */
122 if (get_partition(queued) == cpu &&
123 edf_higher_prio(queued, sem->hp.cpu_task[cpu])) {
124 sem->hp.cpu_task[cpu] = queued;
125 ret = 1;
126 }
127 }
128 return ret;
129}
130
131static int do_fmlp_down(struct pi_semaphore* sem)
132{
133 unsigned long flags;
134 struct task_struct *tsk = current;
135 struct wq_pair pair;
136 int suspended = 1;
137 wait_queue_t wait = {
138 .private = &pair,
139 .func = rt_pi_wake_up,
140 .task_list = {NULL, NULL}
141 };
142
143 pair.tsk = tsk;
144 pair.sem = sem;
145 spin_lock_irqsave(&sem->wait.lock, flags);
146
147 if (atomic_dec_return(&sem->count) < 0 ||
148 waitqueue_active(&sem->wait)) {
149 /* we need to suspend */
150 tsk->state = TASK_UNINTERRUPTIBLE;
151 add_wait_queue_exclusive_locked(&sem->wait, &wait);
152
153 TRACE_CUR("suspends on PI lock %p\n", sem);
154 litmus->pi_block(sem, tsk);
155
156 /* release lock before sleeping */
157 spin_unlock_irqrestore(&sem->wait.lock, flags);
158
159 TS_PI_DOWN_END;
160 preempt_enable_no_resched();
161
162
163 /* we depend on the FIFO order
164 * Thus, we don't need to recheck when we wake up, we
165 * are guaranteed to have the lock since there is only one
166 * wake up per release
167 */
168 schedule();
169
170 TRACE_CUR("woke up, now owns PI lock %p\n", sem);
171
172 /* try_to_wake_up() set our state to TASK_RUNNING,
173 * all we need to do is to remove our wait queue entry
174 */
175 remove_wait_queue(&sem->wait, &wait);
176 } else {
177 /* no priority inheritance necessary, since there are no queued
178 * tasks.
179 */
180 suspended = 0;
181 TRACE_CUR("acquired PI lock %p, no contention\n", sem);
182 sem->holder = tsk;
183 sem->hp.task = tsk;
184 litmus->inherit_priority(sem, tsk);
185 spin_unlock_irqrestore(&sem->wait.lock, flags);
186 }
187 return suspended;
188}
189
190static void do_fmlp_up(struct pi_semaphore* sem)
191{
192 unsigned long flags;
193
194 spin_lock_irqsave(&sem->wait.lock, flags);
195
196 TRACE_CUR("releases PI lock %p\n", sem);
197 litmus->return_priority(sem);
198 sem->holder = NULL;
199 if (atomic_inc_return(&sem->count) < 1)
200 /* there is a task queued */
201 wake_up_locked(&sem->wait);
202
203 spin_unlock_irqrestore(&sem->wait.lock, flags);
204}
205
206asmlinkage long sys_fmlp_down(int sem_od)
207{
208 long ret = 0;
209 struct pi_semaphore * sem;
210 int suspended = 0;
211
212 preempt_disable();
213 TS_PI_DOWN_START;
214
215 sem = lookup_fmlp_sem(sem_od);
216 if (sem)
217 suspended = do_fmlp_down(sem);
218 else
219 ret = -EINVAL;
220
221 if (!suspended) {
222 TS_PI_DOWN_END;
223 preempt_enable();
224 }
225
226 return ret;
227}
228
229asmlinkage long sys_fmlp_up(int sem_od)
230{
231 long ret = 0;
232 struct pi_semaphore * sem;
233
234 preempt_disable();
235 TS_PI_UP_START;
236
237 sem = lookup_fmlp_sem(sem_od);
238 if (sem)
239 do_fmlp_up(sem);
240 else
241 ret = -EINVAL;
242
243
244 TS_PI_UP_END;
245 preempt_enable();
246
247 return ret;
248}
249
250#else
251
252struct fdso_ops fmlp_sem_ops = {};
253
254asmlinkage long sys_fmlp_down(int sem_od)
255{
256 return -ENOSYS;
257}
258
259asmlinkage long sys_fmlp_up(int sem_od)
260{
261 return -ENOSYS;
262}
263
264#endif