aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-15 15:04:15 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-15 15:04:15 -0400
commit3f53a88be223f484db011f0f42e843aa57be8fca (patch)
tree1cf92aeaf8ea8459bbe1e9af617c49be592bdf28
parentb3ae67412531cbc583d5697d2366fc58d6dd07e7 (diff)
add kfmlp as separate file
-rw-r--r--include/litmus/kfmlp_lock.h39
-rw-r--r--litmus/kfmlp_lock.c401
2 files changed, 440 insertions, 0 deletions
diff --git a/include/litmus/kfmlp_lock.h b/include/litmus/kfmlp_lock.h
new file mode 100644
index 000000000000..49156a9ba4ea
--- /dev/null
+++ b/include/litmus/kfmlp_lock.h
@@ -0,0 +1,39 @@
1#ifndef LITMUS_KFMLP_H
2#define LITMUS_KFMLP_H
3
4#include <litmus/litmus.h>
5#include <litmus/locking.h>
6
7/* struct for semaphore with priority inheritance */
8struct kfmlp_queue
9{
10 wait_queue_head_t wait;
11 struct task_struct* owner;
12 struct task_struct* hp_waiter;
13 int count; /* number of waiters + holder */
14};
15
16struct kfmlp_semaphore
17{
18 struct litmus_lock litmus_lock;
19
20 spinlock_t lock;
21
22 int num_resources; /* aka k */
23
24 struct kfmlp_queue *queues; /* array */
25 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
26};
27
28static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
29{
30 return container_of(lock, struct kfmlp_semaphore, litmus_lock);
31}
32
33int kfmlp_lock(struct litmus_lock* l);
34int kfmlp_unlock(struct litmus_lock* l);
35int kfmlp_close(struct litmus_lock* l);
36void kfmlp_free(struct litmus_lock* l);
37struct litmus_lock* kfmlp_new(struct litmus_lock_ops*, void* __user arg);
38
39#endif \ No newline at end of file
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c
new file mode 100644
index 000000000000..37302064bd8c
--- /dev/null
+++ b/litmus/kfmlp_lock.c
@@ -0,0 +1,401 @@
1#include <linux/slab.h>
2#include <linux/uaccess.h>
3
4#include <litmus/trace.h>
5#include <litmus/sched_plugin.h>
6#include <litmus/kfmlp_lock.h>
7
8#include <litmus/edf_common.h>
9
10static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem,
11 struct kfmlp_queue* queue)
12{
13 return (queue - &sem->queues[0]);
14}
15
16static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem,
17 struct task_struct* holder)
18{
19 int i;
20 for(i = 0; i < sem->num_resources; ++i)
21 if(sem->queues[i].owner == holder)
22 return(&sem->queues[i]);
23 return(NULL);
24}
25
26/* caller is responsible for locking */
27static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
28 struct task_struct *skip)
29{
30 struct list_head *pos;
31 struct task_struct *queued, *found = NULL;
32
33 list_for_each(pos, &kqueue->wait.task_list) {
34 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
35 task_list)->private;
36
37 /* Compare task prios, find high prio task. */
38 if (queued != skip && edf_higher_prio(queued, found))
39 found = queued;
40 }
41 return found;
42}
43
44static inline struct kfmlp_queue* kfmlp_find_shortest(struct kfmlp_semaphore* sem,
45 struct kfmlp_queue* search_start)
46{
47 // we start our search at search_start instead of at the beginning of the
48 // queue list to load-balance across all resources.
49 struct kfmlp_queue* step = search_start;
50 struct kfmlp_queue* shortest = sem->shortest_queue;
51
52 do
53 {
54 step = (step+1 != &sem->queues[sem->num_resources]) ?
55 step+1 : &sem->queues[0];
56
57 if(step->count < shortest->count)
58 {
59 shortest = step;
60 if(step->count == 0)
61 break; /* can't get any shorter */
62 }
63
64 }while(step != search_start);
65
66 return(shortest);
67}
68
69static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
70{
71 /* must hold sem->lock */
72
73 struct kfmlp_queue *my_queue = NULL;
74 struct task_struct *max_hp = NULL;
75
76
77 struct list_head *pos;
78 struct task_struct *queued;
79 int i;
80
81 for(i = 0; i < sem->num_resources; ++i)
82 {
83 if( (sem->queues[i].count > 1) &&
84 ((my_queue == NULL) ||
85 (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) )
86 {
87 my_queue = &sem->queues[i];
88 }
89 }
90
91 if(my_queue)
92 {
93 max_hp = my_queue->hp_waiter;
94
95 BUG_ON(!max_hp);
96
97 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
98 kfmlp_get_idx(sem, my_queue),
99 max_hp->comm, max_hp->pid,
100 kfmlp_get_idx(sem, my_queue));
101
102 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
103
104 if(tsk_rt(my_queue->owner)->inh_task == max_hp)
105 {
106 litmus->decrease_prio(my_queue->owner, my_queue->hp_waiter);
107 }
108
109 list_for_each(pos, &my_queue->wait.task_list)
110 {
111 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
112 task_list)->private;
113 /* Compare task prios, find high prio task. */
114 if (queued == max_hp)
115 {
116 /*
117 TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n",
118 kfmlp_get_idx(sem, my_queue));
119 */
120 __remove_wait_queue(&my_queue->wait,
121 list_entry(pos, wait_queue_t, task_list));
122 break;
123 }
124 }
125 --(my_queue->count);
126 }
127
128 return(max_hp);
129}
130
131int kfmlp_lock(struct litmus_lock* l)
132{
133 struct task_struct* t = current;
134 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
135 struct kfmlp_queue* my_queue;
136 wait_queue_t wait;
137 unsigned long flags;
138
139 if (!is_realtime(t))
140 return -EPERM;
141
142 spin_lock_irqsave(&sem->lock, flags);
143
144 my_queue = sem->shortest_queue;
145
146 if (my_queue->owner) {
147 /* resource is not free => must suspend and wait */
148 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
149 kfmlp_get_idx(sem, my_queue));
150
151 init_waitqueue_entry(&wait, t);
152
153 /* FIXME: interruptible would be nice some day */
154 set_task_state(t, TASK_UNINTERRUPTIBLE);
155
156 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
157
158 /* check if we need to activate priority inheritance */
159 if (edf_higher_prio(t, my_queue->hp_waiter))
160 {
161 my_queue->hp_waiter = t;
162 if (edf_higher_prio(t, my_queue->owner))
163 {
164 litmus->increase_prio(my_queue->owner, my_queue->hp_waiter);
165 }
166 }
167
168 ++(my_queue->count);
169 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
170
171 /* release lock before sleeping */
172 spin_unlock_irqrestore(&sem->lock, flags);
173
174 /* We depend on the FIFO order. Thus, we don't need to recheck
175 * when we wake up; we are guaranteed to have the lock since
176 * there is only one wake up per release (or steal).
177 */
178 schedule();
179
180
181 if(my_queue->owner == t)
182 {
183 TRACE_CUR("queue %d: acquired through waiting\n",
184 kfmlp_get_idx(sem, my_queue));
185 }
186 else
187 {
188 /* this case may happen if our wait entry was stolen
189 between queues. record where we went. */
190 my_queue = kfmlp_get_queue(sem, t);
191
192 BUG_ON(!my_queue);
193 TRACE_CUR("queue %d: acquired through stealing\n",
194 kfmlp_get_idx(sem, my_queue));
195 }
196 }
197 else
198 {
199 TRACE_CUR("queue %d: acquired immediately\n",
200 kfmlp_get_idx(sem, my_queue));
201
202 my_queue->owner = t;
203
204 ++(my_queue->count);
205 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
206
207 spin_unlock_irqrestore(&sem->lock, flags);
208 }
209
210 return kfmlp_get_idx(sem, my_queue);
211}
212
213
214int kfmlp_unlock(struct litmus_lock* l)
215{
216 struct task_struct *t = current, *next;
217 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
218 struct kfmlp_queue *my_queue;
219 unsigned long flags;
220 int err = 0;
221
222 spin_lock_irqsave(&sem->lock, flags);
223
224 my_queue = kfmlp_get_queue(sem, t);
225
226 if (!my_queue) {
227 err = -EINVAL;
228 goto out;
229 }
230
231 /* check if there are jobs waiting for this resource */
232 next = __waitqueue_remove_first(&my_queue->wait);
233 if (next) {
234 /*
235 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
236 kfmlp_get_idx(sem, my_queue),
237 next->comm, next->pid);
238 */
239 /* next becomes the resouce holder */
240 my_queue->owner = next;
241
242 --(my_queue->count);
243 // the '=' of '<=' is a dumb method to attempt to build
244 // affinity until tasks can tell us where they ran last...
245 if(my_queue->count <= sem->shortest_queue->count)
246 {
247 sem->shortest_queue = my_queue;
248 }
249
250 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
251 kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
252
253 /* determine new hp_waiter if necessary */
254 if (next == my_queue->hp_waiter) {
255 TRACE_TASK(next, "was highest-prio waiter\n");
256 /* next has the highest priority --- it doesn't need to
257 * inherit. However, we need to make sure that the
258 * next-highest priority in the queue is reflected in
259 * hp_waiter. */
260 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next);
261 if (my_queue->hp_waiter)
262 TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue));
263 else
264 TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue));
265 } else {
266 /* Well, if next is not the highest-priority waiter,
267 * then it ought to inherit the highest-priority
268 * waiter's priority. */
269 litmus->increase_prio(next, my_queue->hp_waiter);
270 }
271
272 /* wake up next */
273 wake_up_process(next);
274 }
275 else
276 {
277 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
278
279 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
280
281 /*
282 if(next)
283 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
284 kfmlp_get_idx(sem, my_queue),
285 next->comm, next->pid);
286 */
287
288 my_queue->owner = next;
289
290 if(next)
291 {
292 TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
293 kfmlp_get_idx(sem, my_queue),
294 next->comm, next->pid);
295
296 /* wake up next */
297 wake_up_process(next);
298 }
299 else
300 {
301 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
302
303 --(my_queue->count);
304 // the '=' of '<=' is a dumb method to attempt to build
305 // affinity until tasks can tell us where they ran last...
306 if(my_queue->count <= sem->shortest_queue->count)
307 {
308 sem->shortest_queue = my_queue;
309 }
310 }
311 }
312
313 /* we lose the benefit of priority inheritance (if any) */
314 if (tsk_rt(t)->inh_task)
315 litmus->decrease_prio(t, NULL);
316
317out:
318 spin_unlock_irqrestore(&sem->lock, flags);
319
320 return err;
321}
322
323int kfmlp_close(struct litmus_lock* l)
324{
325 struct task_struct *t = current;
326 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
327 struct kfmlp_queue *my_queue;
328 unsigned long flags;
329
330 int owner;
331
332 spin_lock_irqsave(&sem->lock, flags);
333
334 my_queue = kfmlp_get_queue(sem, t);
335 owner = (my_queue) ? (my_queue->owner == t) : 0;
336
337 spin_unlock_irqrestore(&sem->lock, flags);
338
339 if (owner)
340 kfmlp_unlock(l);
341
342 return 0;
343}
344
345void kfmlp_free(struct litmus_lock* l)
346{
347 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
348 kfree(sem->queues);
349 kfree(sem);
350}
351
352
353
354struct litmus_lock* kfmlp_new(struct litmus_lock_ops* ops, void* __user args)
355{
356 struct kfmlp_semaphore* sem;
357 int num_resources = 0;
358 int i;
359
360 if(!access_ok(VERIFY_READ, args, sizeof(num_resources)))
361 {
362 return(NULL);
363 }
364 if(__copy_from_user(&num_resources, args, sizeof(num_resources)))
365 {
366 return(NULL);
367 }
368 if(num_resources < 1)
369 {
370 return(NULL);
371 }
372
373 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
374 if(!sem)
375 {
376 return(NULL);
377 }
378
379 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
380 if(!sem->queues)
381 {
382 kfree(sem);
383 return(NULL);
384 }
385
386 sem->litmus_lock.ops = ops;
387 spin_lock_init(&sem->lock);
388 sem->num_resources = num_resources;
389
390 for(i = 0; i < num_resources; ++i)
391 {
392 sem->queues[i].owner = NULL;
393 sem->queues[i].hp_waiter = NULL;
394 init_waitqueue_head(&sem->queues[i].wait);
395 sem->queues[i].count = 0;
396 }
397
398 sem->shortest_queue = &sem->queues[0];
399
400 return &sem->litmus_lock;
401}