diff options
Diffstat (limited to 'litmus/omlp.c')
| -rw-r--r-- | litmus/omlp.c | 319 |
1 files changed, 319 insertions, 0 deletions
diff --git a/litmus/omlp.c b/litmus/omlp.c new file mode 100644 index 000000000000..dbb39dc0c07c --- /dev/null +++ b/litmus/omlp.c | |||
| @@ -0,0 +1,319 @@ | |||
| 1 | /* | ||
| 2 | * OMLP implementation. | ||
| 3 | * Much of the code here is borrowed from include/asm-i386/semaphore.h | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <asm/atomic.h> | ||
| 7 | |||
| 8 | #include <linux/semaphore.h> | ||
| 9 | #include <linux/sched.h> | ||
| 10 | #include <linux/wait.h> | ||
| 11 | #include <linux/spinlock.h> | ||
| 12 | |||
| 13 | #include <litmus/litmus.h> | ||
| 14 | #include <litmus/sched_plugin.h> | ||
| 15 | #include <litmus/edf_common.h> | ||
| 16 | |||
| 17 | #include <litmus/fdso.h> | ||
| 18 | |||
| 19 | #include <litmus/trace.h> | ||
| 20 | |||
| 21 | #ifdef CONFIG_OMLP | ||
| 22 | |||
| 23 | static void* create_omlp_semaphore(void) | ||
| 24 | { | ||
| 25 | struct omlp_semaphore* sem; | ||
| 26 | int i; | ||
| 27 | |||
| 28 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
| 29 | if (!sem) | ||
| 30 | return NULL; | ||
| 31 | atomic_set(&sem->pi.count, 1); | ||
| 32 | sem->pi.sleepers = 0; | ||
| 33 | init_waitqueue_head(&sem->pi.wait); | ||
| 34 | sem->pi.hp.task = NULL; | ||
| 35 | sem->holder = NULL; | ||
| 36 | for (i = 0; i < NR_CPUS; i++) | ||
| 37 | sem->pi.hp.cpu_task[i] = NULL; | ||
| 38 | |||
| 39 | sem->pi.stack_node.inh_task = NULL; | ||
| 40 | INIT_LIST_HEAD(&sem->pi.stack_node.list); | ||
| 41 | |||
| 42 | |||
| 43 | INIT_LIST_HEAD(&sem->pq_task_list); | ||
| 44 | |||
| 45 | TRACE_CUR("number of online CPUs: %d", num_online_cpus()); | ||
| 46 | |||
| 47 | return sem; | ||
| 48 | } | ||
| 49 | |||
| 50 | static int open_omlp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
| 51 | { | ||
| 52 | if (!omlp_active()) | ||
| 53 | return -EBUSY; | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | static void destroy_omlp_semaphore(void* sem) | ||
| 58 | { | ||
| 59 | /* XXX assert invariants */ | ||
| 60 | kfree(sem); | ||
| 61 | } | ||
| 62 | |||
| 63 | struct fdso_ops omlp_sem_ops = { | ||
| 64 | .create = create_omlp_semaphore, | ||
| 65 | .open = open_omlp_semaphore, | ||
| 66 | .destroy = destroy_omlp_semaphore | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct wq_pair { | ||
| 70 | struct task_struct* tsk; | ||
| 71 | struct omlp_semaphore* sem; | ||
| 72 | }; | ||
| 73 | |||
| 74 | static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
| 75 | void *key) | ||
| 76 | { | ||
| 77 | struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
| 78 | set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
| 79 | litmus->omlp_inherit_priority(to_pi(wqp->sem), wqp->tsk); | ||
| 80 | TRACE_TASK(wqp->tsk, | ||
| 81 | "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
| 82 | /* point to task for default_wake_function() */ | ||
| 83 | wait->private = wqp->tsk; | ||
| 84 | default_wake_function(wait, mode, sync, key); | ||
| 85 | |||
| 86 | /* Always return true since we know that if we encountered a task | ||
| 87 | * that was already running the wake_up raced with the schedule in | ||
| 88 | * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
| 89 | * immediately and own the lock. We must not wake up another task in | ||
| 90 | * any case. | ||
| 91 | */ | ||
| 92 | return 1; | ||
| 93 | } | ||
| 94 | |||
| 95 | typedef int (*list_order_t)(struct wq_pair*, struct wq_pair*); | ||
| 96 | static int edf_order(struct wq_pair *a, struct wq_pair *b) | ||
| 97 | { | ||
| 98 | return(edf_higher_prio(a->tsk, b->tsk)); | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void add_list_ordered( | ||
| 102 | struct list_head* pq, | ||
| 103 | wait_queue_t *wait, | ||
| 104 | list_order_t cmp) | ||
| 105 | { | ||
| 106 | struct wq_pair *curr_queued; | ||
| 107 | wait_queue_t *tmp; | ||
| 108 | struct list_head *pos, *next; | ||
| 109 | |||
| 110 | struct wq_pair *to_insert = | ||
| 111 | (struct wq_pair*) list_entry(&wait->task_list, wait_queue_t, task_list)->private; | ||
| 112 | |||
| 113 | /* list is empty. belongs at tail. */ | ||
| 114 | if(list_empty(pq)) { | ||
| 115 | list_add_tail(&wait->task_list, pq); | ||
| 116 | return; | ||
| 117 | } | ||
| 118 | |||
| 119 | /* new job has the least priority. belongs at tail. */ | ||
| 120 | tmp = list_entry(pq->prev, wait_queue_t, task_list); | ||
| 121 | curr_queued = (struct wq_pair*)tmp->private; | ||
| 122 | if(curr_queued && !cmp(to_insert, curr_queued)) | ||
| 123 | { | ||
| 124 | list_add_tail(&wait->task_list, pq); | ||
| 125 | return; | ||
| 126 | } | ||
| 127 | |||
| 128 | /* belongs between nodes. this is the hardest case | ||
| 129 | * and hence left for last. | ||
| 130 | */ | ||
| 131 | list_for_each_safe(pos, next, pq) { | ||
| 132 | tmp = list_entry(pos, wait_queue_t, task_list); | ||
| 133 | curr_queued = (struct wq_pair*)tmp->private; | ||
| 134 | if (curr_queued && cmp(to_insert, curr_queued)) { | ||
| 135 | list_add(&wait->task_list, pos->prev); | ||
| 136 | return; | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | /* fail-safe?? */ | ||
| 141 | list_add_tail(&wait->task_list, pq); | ||
| 142 | return; | ||
| 143 | } | ||
| 144 | |||
| 145 | static int do_omlp_down(struct omlp_semaphore* sem) | ||
| 146 | { | ||
| 147 | unsigned long flags; | ||
| 148 | struct task_struct *tsk = current; | ||
| 149 | struct wq_pair pair; | ||
| 150 | int suspended = 1; | ||
| 151 | int count; | ||
| 152 | |||
| 153 | wait_queue_t wait = { | ||
| 154 | .flags = 0, | ||
| 155 | .private = &pair, | ||
| 156 | .func = rt_pi_wake_up, | ||
| 157 | .task_list = {NULL, NULL} | ||
| 158 | }; | ||
| 159 | |||
| 160 | pair.tsk = tsk; | ||
| 161 | pair.sem = sem; | ||
| 162 | |||
| 163 | spin_lock_irqsave(&sem->pi.wait.lock, flags); | ||
| 164 | |||
| 165 | count = atomic_dec_return(&sem->pi.count); | ||
| 166 | |||
| 167 | if (count < 0 || waitqueue_active(&sem->pi.wait)) { | ||
| 168 | |||
| 169 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
| 170 | |||
| 171 | if(count <= -(num_online_cpus()) || !list_empty(&sem->pq_task_list)) { | ||
| 172 | wait.flags |= WQ_FLAG_EXCLUSIVE; | ||
| 173 | /* G-EDF only, so there's one PQ. */ | ||
| 174 | add_list_ordered(&sem->pq_task_list, &wait, edf_order); | ||
| 175 | TRACE_CUR("count = %d, suspends (%p) on PI lock (PQ) %p\n", count, tsk, sem); | ||
| 176 | } else { | ||
| 177 | /* we need to suspend on the wait queue */ | ||
| 178 | add_wait_queue_exclusive_locked(&sem->pi.wait, &wait); | ||
| 179 | |||
| 180 | TRACE_CUR("count = %d, suspends (%p) on PI lock (FIFO) %p\n", count, tsk, sem); | ||
| 181 | litmus->omlp_pi_block(to_pi(sem), tsk); | ||
| 182 | } | ||
| 183 | |||
| 184 | /* release lock before sleeping */ | ||
| 185 | spin_unlock_irqrestore(&sem->pi.wait.lock, flags); | ||
| 186 | |||
| 187 | TS_PI_DOWN_END; | ||
| 188 | preempt_enable_no_resched(); | ||
| 189 | |||
| 190 | |||
| 191 | /* we depend on the FIFO order | ||
| 192 | * Thus, we don't need to recheck when we wake up, we | ||
| 193 | * are guaranteed to have the lock since there is only one | ||
| 194 | * wake up per release | ||
| 195 | */ | ||
| 196 | schedule(); | ||
| 197 | |||
| 198 | TRACE_CUR("woke up (%p), now owns PI lock %p\n", tsk, sem); | ||
| 199 | |||
| 200 | /* try_to_wake_up() set our state to TASK_RUNNING, | ||
| 201 | * all we need to do is to remove our wait queue entry | ||
| 202 | * We'll now be on pi.wait even if we enqueued on pqwait. | ||
| 203 | */ | ||
| 204 | remove_wait_queue(&sem->pi.wait, &wait); | ||
| 205 | } else { | ||
| 206 | /* no priority inheritance necessary, since there are no queued | ||
| 207 | * tasks. | ||
| 208 | */ | ||
| 209 | suspended = 0; | ||
| 210 | TRACE_CUR("(%p) acquired PI lock %p, no contention\n", tsk, sem); | ||
| 211 | sem->holder = tsk; | ||
| 212 | |||
| 213 | /* don't know if we're global or partitioned. */ | ||
| 214 | sem->pi.hp.task = tsk; | ||
| 215 | sem->pi.hp.cpu_task[get_partition(tsk)] = tsk; | ||
| 216 | |||
| 217 | litmus->omlp_inherit_priority(to_pi(sem), tsk); | ||
| 218 | spin_unlock_irqrestore(&sem->pi.wait.lock, flags); | ||
| 219 | } | ||
| 220 | return suspended; | ||
| 221 | } | ||
| 222 | |||
| 223 | static void do_omlp_up(struct omlp_semaphore* sem) | ||
| 224 | { | ||
| 225 | unsigned long flags; | ||
| 226 | |||
| 227 | spin_lock_irqsave(&sem->pi.wait.lock, flags); | ||
| 228 | |||
| 229 | TRACE_CUR("(%p) releases PI lock %p\n", current, sem); | ||
| 230 | litmus->omlp_return_priority(to_pi(sem)); | ||
| 231 | sem->holder = NULL; | ||
| 232 | |||
| 233 | if (atomic_inc_return(&sem->pi.count) < 1) { | ||
| 234 | if (!list_empty(&sem->pq_task_list)) { | ||
| 235 | /* Move the head task from pq_task_list to pi.wait. | ||
| 236 | * Do this before waking up the next task so it gets | ||
| 237 | * the proper pi. | ||
| 238 | */ | ||
| 239 | wait_queue_t *wait = | ||
| 240 | list_entry(sem->pq_task_list.next, wait_queue_t, task_list); | ||
| 241 | list_del(&wait->task_list); | ||
| 242 | INIT_LIST_HEAD(&wait->task_list); | ||
| 243 | |||
| 244 | /* we need to suspend on the wait queue */ | ||
| 245 | add_wait_queue_exclusive_locked(&sem->pi.wait, wait); | ||
| 246 | |||
| 247 | TRACE_CUR("moving waiter (%p) to FIFO queue. %p\n", ((struct wq_pair*)(wait->private))->tsk, sem); | ||
| 248 | /* Safe since sem->holder == NULL and pi_block checks | ||
| 249 | * for the case. | ||
| 250 | */ | ||
| 251 | litmus->omlp_pi_block(to_pi(sem), ((struct wq_pair*)(wait->private))->tsk); | ||
| 252 | } | ||
| 253 | |||
| 254 | /* there is a task queued */ | ||
| 255 | wake_up_locked(&sem->pi.wait); | ||
| 256 | } | ||
| 257 | |||
| 258 | spin_unlock_irqrestore(&sem->pi.wait.lock, flags); | ||
| 259 | } | ||
| 260 | |||
| 261 | asmlinkage long sys_omlp_down(int sem_od) | ||
| 262 | { | ||
| 263 | long ret = 0; | ||
| 264 | struct omlp_semaphore * sem; | ||
| 265 | int suspended = 0; | ||
| 266 | |||
| 267 | preempt_disable(); | ||
| 268 | TS_PI_DOWN_START; | ||
| 269 | |||
| 270 | sem = lookup_omlp_sem(sem_od); | ||
| 271 | if (sem) | ||
| 272 | suspended = do_omlp_down(sem); | ||
| 273 | else | ||
| 274 | ret = -EINVAL; | ||
| 275 | |||
| 276 | if (!suspended) { | ||
| 277 | TS_PI_DOWN_END; | ||
| 278 | preempt_enable(); | ||
| 279 | } | ||
| 280 | |||
| 281 | return ret; | ||
| 282 | } | ||
| 283 | |||
| 284 | asmlinkage long sys_omlp_up(int sem_od) | ||
| 285 | { | ||
| 286 | long ret = 0; | ||
| 287 | struct omlp_semaphore * sem; | ||
| 288 | |||
| 289 | preempt_disable(); | ||
| 290 | TS_PI_UP_START; | ||
| 291 | |||
| 292 | sem = lookup_omlp_sem(sem_od); | ||
| 293 | if (sem) | ||
| 294 | do_omlp_up(sem); | ||
| 295 | else | ||
| 296 | ret = -EINVAL; | ||
| 297 | |||
| 298 | |||
| 299 | TS_PI_UP_END; | ||
| 300 | preempt_enable(); | ||
| 301 | |||
| 302 | return ret; | ||
| 303 | } | ||
| 304 | |||
| 305 | #else | ||
| 306 | |||
| 307 | struct fdso_ops omlp_sem_ops = {}; | ||
| 308 | |||
| 309 | asmlinkage long sys_omlp_down(int sem_od) | ||
| 310 | { | ||
| 311 | return -ENOSYS; | ||
| 312 | } | ||
| 313 | |||
| 314 | asmlinkage long sys_omlp_up(int sem_od) | ||
| 315 | { | ||
| 316 | return -ENOSYS; | ||
| 317 | } | ||
| 318 | |||
| 319 | #endif | ||
