aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2010-08-03 12:33:01 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2010-08-03 12:43:47 -0400
commit93d94bce2b7a90d789cf0ab5e167f2c373484eb2 (patch)
tree915e49699880a5ccf879df9bc206f6307a12f437
parent2183cf547b67f7d59f23ed4322f1748867b3f7ea (diff)
First implementation of G-OMLP.wip-omlp-gedf
This patch implements G-OMLP (OMLP for G-EDF). It reuses (calls) much of the code from G-FMLP-Long. Note that the implementation here will have to change if/when P-OMLP is implemented. This is because P-OMLP uses seperate priority queues (PQ) for each CPU. The OMLP code will have to be smarter to figure out when to use a global PQ or CPU-bound PQ. Also, PQ may be better implemented as a binary (or binomial) heap. It is a simple linked-list in this implementation.
-rw-r--r--include/litmus/fdso.h4
-rw-r--r--include/litmus/sched_plugin.h34
-rw-r--r--include/litmus/unistd_32.h4
-rw-r--r--include/litmus/unistd_64.h6
-rw-r--r--litmus/Kconfig16
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/fdso.c2
-rw-r--r--litmus/omlp.c319
-rw-r--r--litmus/sched_gsn_edf.c31
-rw-r--r--litmus/sched_plugin.c10
10 files changed, 424 insertions, 3 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index b0e62a9098bd..4c67c87d0e1b 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -19,8 +19,9 @@ typedef enum {
19 19
20 FMLP_SEM = 0, 20 FMLP_SEM = 0,
21 SRP_SEM = 1, 21 SRP_SEM = 1,
22 OMLP_SEM = 2,
22 23
23 MAX_OBJ_TYPE = 1 24 MAX_OBJ_TYPE = 2
24} obj_type_t; 25} obj_type_t;
25 26
26struct inode_obj_id { 27struct inode_obj_id {
@@ -64,6 +65,7 @@ static inline void* od_lookup(int od, obj_type_t type)
64 65
65#define lookup_fmlp_sem(od)((struct fmlp_semaphore*) od_lookup(od, FMLP_SEM)) 66#define lookup_fmlp_sem(od)((struct fmlp_semaphore*) od_lookup(od, FMLP_SEM))
66#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) 67#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
68#define lookup_omlp_sem(od)((struct omlp_semaphore*) od_lookup(od, OMLP_SEM))
67#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) 69#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
68 70
69 71
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 071e809564b2..186e99c6bb88 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -41,6 +41,25 @@ static inline struct fmlp_semaphore* to_fmlp(struct pi_semaphore* sem) {
41} 41}
42#endif 42#endif
43 43
44#ifdef CONFIG_OMLP
45struct omlp_semaphore {
46 /* NOTE: first part of struct matchs fmlp_semaphore, so
47 * omlp_struct can be cast to fmlp_struct
48 */
49 struct pi_semaphore pi; /* must always be first. */
50
51 /* current lock holder */
52 struct task_struct *holder;
53
54 /* PQ - just a single list since we only support G-EDF for now */
55 struct list_head pq_task_list;
56};
57
58static inline struct omlp_semaphore* to_omlp(struct pi_semaphore* sem) {
59 return (struct omlp_semaphore*)sem;
60}
61#endif
62
44/************************ setup/tear down ********************/ 63/************************ setup/tear down ********************/
45 64
46typedef long (*activate_plugin_t) (void); 65typedef long (*activate_plugin_t) (void);
@@ -149,6 +168,13 @@ struct sched_plugin {
149 return_priority_t fmlp_return_priority; 168 return_priority_t fmlp_return_priority;
150 pi_block_t fmlp_pi_block; 169 pi_block_t fmlp_pi_block;
151#endif 170#endif
171
172#ifdef CONFIG_OMLP
173 unsigned int omlp_active;
174 inherit_priority_t omlp_inherit_priority;
175 return_priority_t omlp_return_priority;
176 pi_block_t omlp_pi_block;
177#endif
152} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); 178} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
153 179
154 180
@@ -177,6 +203,14 @@ static inline int fmlp_active(void)
177 return 0; 203 return 0;
178#endif 204#endif
179} 205}
206static inline int omlp_active(void)
207{
208#ifdef CONFIG_OMLP
209 return litmus->omlp_active;
210#else
211 return 0;
212#endif
213}
180 214
181extern struct sched_plugin linux_sched_plugin; 215extern struct sched_plugin linux_sched_plugin;
182 216
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index dbddc6523f8e..3c55c8331e1b 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -19,5 +19,7 @@
19#define __NR_wait_for_ts_release __LSC(11) 19#define __NR_wait_for_ts_release __LSC(11)
20#define __NR_release_ts __LSC(12) 20#define __NR_release_ts __LSC(12)
21#define __NR_null_call __LSC(13) 21#define __NR_null_call __LSC(13)
22#define __NR_omlp_down __LSC(14)
23#define __NR_omlp_up __LSC(15)
22 24
23#define NR_litmus_syscalls 14 25#define NR_litmus_syscalls 16
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index f0618e75348d..dd4b6c928b2c 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -33,5 +33,9 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
33__SYSCALL(__NR_release_ts, sys_release_ts) 33__SYSCALL(__NR_release_ts, sys_release_ts)
34#define __NR_null_call __LSC(13) 34#define __NR_null_call __LSC(13)
35__SYSCALL(__NR_null_call, sys_null_call) 35__SYSCALL(__NR_null_call, sys_null_call)
36#define __NR_omlp_down __LSC(14)
37__SYSCALL(__NR_omlp_down, sys_omlp_down)
38#define __NR_omlp_up __LSC(15)
39__SYSCALL(__NR_omlp_up, sys_omlp_up)
36 40
37#define NR_litmus_syscalls 14 41#define NR_litmus_syscalls 16
diff --git a/litmus/Kconfig b/litmus/Kconfig
index e00887e672df..9549afbb5591 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -77,6 +77,22 @@ config FMLP
77 Say Yes if you want FMLP long critical section 77 Say Yes if you want FMLP long critical section
78 synchronization support. 78 synchronization support.
79 79
80config OMLP
81 bool "OMLP support"
82 depends on FMLP
83 default n
84 help
85 Include support for enhanced deterministic multiprocessor
86 real-time synchronization support. In contrast to FMLP, which
87 is O(n), OMLP is O(m), where n is the number of tasks sharing a
88 resource and m is the number of processors. OMLP always
89 lower-bounds FMLP-Long.
90
91 ONLY G-EDF CURRENTLY SUPPORTED!
92
93 Say Yes if you want OMLP critical section synchronization
94 support.
95
80endmenu 96endmenu
81 97
82menu "Tracing" 98menu "Tracing"
diff --git a/litmus/Makefile b/litmus/Makefile
index 1cf2b6267ae3..1d7f27ec568d 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -10,6 +10,7 @@ obj-y = sched_plugin.o litmus.o \
10 fdso.o \ 10 fdso.o \
11 srp.o \ 11 srp.o \
12 fmlp.o \ 12 fmlp.o \
13 omlp.o \
13 bheap.o \ 14 bheap.o \
14 ctrldev.o \ 15 ctrldev.o \
15 sched_gsn_edf.o \ 16 sched_gsn_edf.o \
diff --git a/litmus/fdso.c b/litmus/fdso.c
index 85be716941d8..0cd0235d1a7e 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -19,11 +19,13 @@
19#include <litmus/fdso.h> 19#include <litmus/fdso.h>
20 20
21extern struct fdso_ops fmlp_sem_ops; 21extern struct fdso_ops fmlp_sem_ops;
22extern struct fdso_ops omlp_sem_ops;
22extern struct fdso_ops srp_sem_ops; 23extern struct fdso_ops srp_sem_ops;
23 24
24static const struct fdso_ops* fdso_ops[] = { 25static const struct fdso_ops* fdso_ops[] = {
25 &fmlp_sem_ops, 26 &fmlp_sem_ops,
26 &srp_sem_ops, 27 &srp_sem_ops,
28 &omlp_sem_ops
27}; 29};
28 30
29static void* fdso_create(obj_type_t type) 31static void* fdso_create(obj_type_t type)
diff --git a/litmus/omlp.c b/litmus/omlp.c
new file mode 100644
index 000000000000..dbb39dc0c07c
--- /dev/null
+++ b/litmus/omlp.c
@@ -0,0 +1,319 @@
1/*
2 * OMLP implementation.
3 * Much of the code here is borrowed from include/asm-i386/semaphore.h
4 */
5
6#include <asm/atomic.h>
7
8#include <linux/semaphore.h>
9#include <linux/sched.h>
10#include <linux/wait.h>
11#include <linux/spinlock.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_plugin.h>
15#include <litmus/edf_common.h>
16
17#include <litmus/fdso.h>
18
19#include <litmus/trace.h>
20
21#ifdef CONFIG_OMLP
22
23static void* create_omlp_semaphore(void)
24{
25 struct omlp_semaphore* sem;
26 int i;
27
28 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
29 if (!sem)
30 return NULL;
31 atomic_set(&sem->pi.count, 1);
32 sem->pi.sleepers = 0;
33 init_waitqueue_head(&sem->pi.wait);
34 sem->pi.hp.task = NULL;
35 sem->holder = NULL;
36 for (i = 0; i < NR_CPUS; i++)
37 sem->pi.hp.cpu_task[i] = NULL;
38
39 sem->pi.stack_node.inh_task = NULL;
40 INIT_LIST_HEAD(&sem->pi.stack_node.list);
41
42
43 INIT_LIST_HEAD(&sem->pq_task_list);
44
45 TRACE_CUR("number of online CPUs: %d", num_online_cpus());
46
47 return sem;
48}
49
50static int open_omlp_semaphore(struct od_table_entry* entry, void* __user arg)
51{
52 if (!omlp_active())
53 return -EBUSY;
54 return 0;
55}
56
57static void destroy_omlp_semaphore(void* sem)
58{
59 /* XXX assert invariants */
60 kfree(sem);
61}
62
63struct fdso_ops omlp_sem_ops = {
64 .create = create_omlp_semaphore,
65 .open = open_omlp_semaphore,
66 .destroy = destroy_omlp_semaphore
67};
68
69struct wq_pair {
70 struct task_struct* tsk;
71 struct omlp_semaphore* sem;
72};
73
74static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync,
75 void *key)
76{
77 struct wq_pair* wqp = (struct wq_pair*) wait->private;
78 set_rt_flags(wqp->tsk, RT_F_EXIT_SEM);
79 litmus->omlp_inherit_priority(to_pi(wqp->sem), wqp->tsk);
80 TRACE_TASK(wqp->tsk,
81 "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n");
82 /* point to task for default_wake_function() */
83 wait->private = wqp->tsk;
84 default_wake_function(wait, mode, sync, key);
85
86 /* Always return true since we know that if we encountered a task
87 * that was already running the wake_up raced with the schedule in
88 * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled
89 * immediately and own the lock. We must not wake up another task in
90 * any case.
91 */
92 return 1;
93}
94
95typedef int (*list_order_t)(struct wq_pair*, struct wq_pair*);
96static int edf_order(struct wq_pair *a, struct wq_pair *b)
97{
98 return(edf_higher_prio(a->tsk, b->tsk));
99}
100
101static inline void add_list_ordered(
102 struct list_head* pq,
103 wait_queue_t *wait,
104 list_order_t cmp)
105{
106 struct wq_pair *curr_queued;
107 wait_queue_t *tmp;
108 struct list_head *pos, *next;
109
110 struct wq_pair *to_insert =
111 (struct wq_pair*) list_entry(&wait->task_list, wait_queue_t, task_list)->private;
112
113 /* list is empty. belongs at tail. */
114 if(list_empty(pq)) {
115 list_add_tail(&wait->task_list, pq);
116 return;
117 }
118
119 /* new job has the least priority. belongs at tail. */
120 tmp = list_entry(pq->prev, wait_queue_t, task_list);
121 curr_queued = (struct wq_pair*)tmp->private;
122 if(curr_queued && !cmp(to_insert, curr_queued))
123 {
124 list_add_tail(&wait->task_list, pq);
125 return;
126 }
127
128 /* belongs between nodes. this is the hardest case
129 * and hence left for last.
130 */
131 list_for_each_safe(pos, next, pq) {
132 tmp = list_entry(pos, wait_queue_t, task_list);
133 curr_queued = (struct wq_pair*)tmp->private;
134 if (curr_queued && cmp(to_insert, curr_queued)) {
135 list_add(&wait->task_list, pos->prev);
136 return;
137 }
138 }
139
140 /* fail-safe?? */
141 list_add_tail(&wait->task_list, pq);
142 return;
143}
144
145static int do_omlp_down(struct omlp_semaphore* sem)
146{
147 unsigned long flags;
148 struct task_struct *tsk = current;
149 struct wq_pair pair;
150 int suspended = 1;
151 int count;
152
153 wait_queue_t wait = {
154 .flags = 0,
155 .private = &pair,
156 .func = rt_pi_wake_up,
157 .task_list = {NULL, NULL}
158 };
159
160 pair.tsk = tsk;
161 pair.sem = sem;
162
163 spin_lock_irqsave(&sem->pi.wait.lock, flags);
164
165 count = atomic_dec_return(&sem->pi.count);
166
167 if (count < 0 || waitqueue_active(&sem->pi.wait)) {
168
169 tsk->state = TASK_UNINTERRUPTIBLE;
170
171 if(count <= -(num_online_cpus()) || !list_empty(&sem->pq_task_list)) {
172 wait.flags |= WQ_FLAG_EXCLUSIVE;
173 /* G-EDF only, so there's one PQ. */
174 add_list_ordered(&sem->pq_task_list, &wait, edf_order);
175 TRACE_CUR("count = %d, suspends (%p) on PI lock (PQ) %p\n", count, tsk, sem);
176 } else {
177 /* we need to suspend on the wait queue */
178 add_wait_queue_exclusive_locked(&sem->pi.wait, &wait);
179
180 TRACE_CUR("count = %d, suspends (%p) on PI lock (FIFO) %p\n", count, tsk, sem);
181 litmus->omlp_pi_block(to_pi(sem), tsk);
182 }
183
184 /* release lock before sleeping */
185 spin_unlock_irqrestore(&sem->pi.wait.lock, flags);
186
187 TS_PI_DOWN_END;
188 preempt_enable_no_resched();
189
190
191 /* we depend on the FIFO order
192 * Thus, we don't need to recheck when we wake up, we
193 * are guaranteed to have the lock since there is only one
194 * wake up per release
195 */
196 schedule();
197
198 TRACE_CUR("woke up (%p), now owns PI lock %p\n", tsk, sem);
199
200 /* try_to_wake_up() set our state to TASK_RUNNING,
201 * all we need to do is to remove our wait queue entry
202 * We'll now be on pi.wait even if we enqueued on pqwait.
203 */
204 remove_wait_queue(&sem->pi.wait, &wait);
205 } else {
206 /* no priority inheritance necessary, since there are no queued
207 * tasks.
208 */
209 suspended = 0;
210 TRACE_CUR("(%p) acquired PI lock %p, no contention\n", tsk, sem);
211 sem->holder = tsk;
212
213 /* don't know if we're global or partitioned. */
214 sem->pi.hp.task = tsk;
215 sem->pi.hp.cpu_task[get_partition(tsk)] = tsk;
216
217 litmus->omlp_inherit_priority(to_pi(sem), tsk);
218 spin_unlock_irqrestore(&sem->pi.wait.lock, flags);
219 }
220 return suspended;
221}
222
223static void do_omlp_up(struct omlp_semaphore* sem)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&sem->pi.wait.lock, flags);
228
229 TRACE_CUR("(%p) releases PI lock %p\n", current, sem);
230 litmus->omlp_return_priority(to_pi(sem));
231 sem->holder = NULL;
232
233 if (atomic_inc_return(&sem->pi.count) < 1) {
234 if (!list_empty(&sem->pq_task_list)) {
235 /* Move the head task from pq_task_list to pi.wait.
236 * Do this before waking up the next task so it gets
237 * the proper pi.
238 */
239 wait_queue_t *wait =
240 list_entry(sem->pq_task_list.next, wait_queue_t, task_list);
241 list_del(&wait->task_list);
242 INIT_LIST_HEAD(&wait->task_list);
243
244 /* we need to suspend on the wait queue */
245 add_wait_queue_exclusive_locked(&sem->pi.wait, wait);
246
247 TRACE_CUR("moving waiter (%p) to FIFO queue. %p\n", ((struct wq_pair*)(wait->private))->tsk, sem);
248 /* Safe since sem->holder == NULL and pi_block checks
249 * for the case.
250 */
251 litmus->omlp_pi_block(to_pi(sem), ((struct wq_pair*)(wait->private))->tsk);
252 }
253
254 /* there is a task queued */
255 wake_up_locked(&sem->pi.wait);
256 }
257
258 spin_unlock_irqrestore(&sem->pi.wait.lock, flags);
259}
260
261asmlinkage long sys_omlp_down(int sem_od)
262{
263 long ret = 0;
264 struct omlp_semaphore * sem;
265 int suspended = 0;
266
267 preempt_disable();
268 TS_PI_DOWN_START;
269
270 sem = lookup_omlp_sem(sem_od);
271 if (sem)
272 suspended = do_omlp_down(sem);
273 else
274 ret = -EINVAL;
275
276 if (!suspended) {
277 TS_PI_DOWN_END;
278 preempt_enable();
279 }
280
281 return ret;
282}
283
284asmlinkage long sys_omlp_up(int sem_od)
285{
286 long ret = 0;
287 struct omlp_semaphore * sem;
288
289 preempt_disable();
290 TS_PI_UP_START;
291
292 sem = lookup_omlp_sem(sem_od);
293 if (sem)
294 do_omlp_up(sem);
295 else
296 ret = -EINVAL;
297
298
299 TS_PI_UP_END;
300 preempt_enable();
301
302 return ret;
303}
304
305#else
306
307struct fdso_ops omlp_sem_ops = {};
308
309asmlinkage long sys_omlp_down(int sem_od)
310{
311 return -ENOSYS;
312}
313
314asmlinkage long sys_omlp_up(int sem_od)
315{
316 return -ENOSYS;
317}
318
319#endif
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 4c8a21705810..6b4e4d9aa099 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -775,9 +775,34 @@ static long gsnedf_fmlp_return_priority(struct pi_semaphore *_sem)
775 775
776 return ret; 776 return ret;
777} 777}
778#endif
779
780
781#ifdef CONFIG_OMLP
782/* Reuse FMLP hooks since OMLP implements the G-EDF stuff
783 * already. May have to do G-EDF specific work here in the
784 * future. Will probably need additional callback to move
785 * items from PQ to FIFO.
786 */
787static long gsnedf_omlp_pi_block(struct pi_semaphore *_sem,
788 struct task_struct *new_waiter)
789{
790 return gsnedf_fmlp_pi_block(_sem, new_waiter);
791}
778 792
793static long gsnedf_omlp_inherit_priority(struct pi_semaphore *_sem,
794 struct task_struct *new_owner)
795{
796 return gsnedf_fmlp_inherit_priority(_sem, new_owner);
797}
798
799static long gsnedf_omlp_return_priority(struct pi_semaphore *_sem)
800{
801 return gsnedf_fmlp_return_priority(_sem);
802}
779#endif 803#endif
780 804
805
781static long gsnedf_admit_task(struct task_struct* tsk) 806static long gsnedf_admit_task(struct task_struct* tsk)
782{ 807{
783 return 0; 808 return 0;
@@ -830,6 +855,12 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
830 .fmlp_inherit_priority = gsnedf_fmlp_inherit_priority, 855 .fmlp_inherit_priority = gsnedf_fmlp_inherit_priority,
831 .fmlp_return_priority = gsnedf_fmlp_return_priority, 856 .fmlp_return_priority = gsnedf_fmlp_return_priority,
832#endif 857#endif
858#ifdef CONFIG_OMLP
859 .omlp_active = 1,
860 .omlp_pi_block = gsnedf_omlp_pi_block,
861 .omlp_inherit_priority = gsnedf_omlp_inherit_priority,
862 .omlp_return_priority = gsnedf_omlp_return_priority,
863#endif
833 .admit_task = gsnedf_admit_task, 864 .admit_task = gsnedf_admit_task,
834 .activate_plugin = gsnedf_activate_plugin, 865 .activate_plugin = gsnedf_activate_plugin,
835}; 866};
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 0ebb677198d5..c240f5236e81 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -167,6 +167,11 @@ struct sched_plugin linux_sched_plugin = {
167 .fmlp_return_priority = litmus_dummy_return_priority, 167 .fmlp_return_priority = litmus_dummy_return_priority,
168 .fmlp_pi_block = litmus_dummy_pi_block, 168 .fmlp_pi_block = litmus_dummy_pi_block,
169#endif 169#endif
170#ifdef CONFIG_OMLP
171 .omlp_inherit_priority = litmus_dummy_inherit_priority,
172 .omlp_return_priority = litmus_dummy_return_priority,
173 .omlp_pi_block = litmus_dummy_pi_block,
174#endif
170 .admit_task = litmus_dummy_admit_task 175 .admit_task = litmus_dummy_admit_task
171}; 176};
172 177
@@ -219,6 +224,11 @@ int register_sched_plugin(struct sched_plugin* plugin)
219 CHECK_PI(fmlp, return_priority); 224 CHECK_PI(fmlp, return_priority);
220 CHECK_PI(fmlp, pi_block); 225 CHECK_PI(fmlp, pi_block);
221#endif 226#endif
227#ifdef CONFIG_OMLP
228 CHECK_PI(omlp, inherit_priority);
229 CHECK_PI(omlp, return_priority);
230 CHECK_PI(omlp, pi_block);
231#endif
222 CHECK(admit_task); 232 CHECK(admit_task);
223 233
224 if (!plugin->release_at) 234 if (!plugin->release_at)