aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-03-04 15:59:07 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-01 11:35:49 -0400
commitf08db18a7b4195691010c7343b998f455c0c9038 (patch)
tree313cf77ecba39f620de120d3dc2eb928bd748c7f
parent67a8d367109c52518f754980d2b23100ca4fd051 (diff)
update SRP impl
This is the first step in cleaning up the SRP implementation.
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/rt_param.h6
-rw-r--r--litmus/litmus.c4
-rw-r--r--litmus/litmus_sem.c79
4 files changed, 43 insertions, 48 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 7a27c987b6..512efef341 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -153,7 +153,7 @@ inline static int budget_exhausted(struct task_struct* t)
153 return get_exec_time(t) >= get_exec_cost(t); 153 return get_exec_time(t) >= get_exec_cost(t);
154} 154}
155 155
156#define is_subject_to_srp(t) ((t)->rt_param.subject_to_srp) 156
157#define is_hrt(t) \ 157#define is_hrt(t) \
158 ((t)->rt_param.task_params.class == RT_CLASS_HARD) 158 ((t)->rt_param.task_params.class == RT_CLASS_HARD)
159#define is_srt(t) \ 159#define is_srt(t) \
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 8ffbf300b0..118e8590fd 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -73,12 +73,6 @@ struct rt_param {
73 /* is the task sleeping? */ 73 /* is the task sleeping? */
74 unsigned int flags:8; 74 unsigned int flags:8;
75 75
76 /* Did this task register any SRP controlled resource accesses?
77 * This, of course, should only ever be true under partitioning.
78 * However, this limitation is not currently enforced.
79 */
80 unsigned int subject_to_srp:1;
81
82 /* user controlled parameters */ 76 /* user controlled parameters */
83 struct rt_task task_params; 77 struct rt_task task_params;
84 78
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 8ab96452e6..2909d5c04e 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -153,7 +153,7 @@ asmlinkage long sys_complete_job(void)
153 * appropriate queue and call schedule 153 * appropriate queue and call schedule
154 */ 154 */
155 retval = litmus->complete_job(); 155 retval = litmus->complete_job();
156 if (!retval && is_subject_to_srp(current)) 156 if (!retval)
157 srp_ceiling_block(); 157 srp_ceiling_block();
158 out: 158 out:
159 return retval; 159 return retval;
@@ -207,7 +207,7 @@ asmlinkage long sys_wait_for_job_release(unsigned int job)
207 207
208 /* We still have to honor the SRP after the actual release. 208 /* We still have to honor the SRP after the actual release.
209 */ 209 */
210 if (!retval && is_subject_to_srp(current)) 210 if (!retval)
211 srp_ceiling_block(); 211 srp_ceiling_block();
212 out: 212 out:
213 return retval; 213 return retval;
diff --git a/litmus/litmus_sem.c b/litmus/litmus_sem.c
index f52941c5ca..f7047058d1 100644
--- a/litmus/litmus_sem.c
+++ b/litmus/litmus_sem.c
@@ -313,6 +313,8 @@ struct srp {
313}; 313};
314 314
315 315
316atomic_t srp_objects_in_use = ATOMIC_INIT(0);
317
316DEFINE_PER_CPU(struct srp, srp); 318DEFINE_PER_CPU(struct srp, srp);
317 319
318#define system_ceiling(srp) list2prio(srp->ceiling.next) 320#define system_ceiling(srp) list2prio(srp->ceiling.next)
@@ -343,6 +345,9 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
343 list_add_tail(&prio->list, &srp->ceiling); 345 list_add_tail(&prio->list, &srp->ceiling);
344} 346}
345 347
348#define UNDEF_SEM -2
349
350
346/* struct for uniprocessor SRP "semaphore" */ 351/* struct for uniprocessor SRP "semaphore" */
347struct srp_semaphore { 352struct srp_semaphore {
348 struct srp_priority ceiling; 353 struct srp_priority ceiling;
@@ -366,18 +371,47 @@ static void* create_srp_semaphore(void)
366 INIT_LIST_HEAD(&sem->ceiling.list); 371 INIT_LIST_HEAD(&sem->ceiling.list);
367 sem->ceiling.period = 0; 372 sem->ceiling.period = 0;
368 sem->claimed = 0; 373 sem->claimed = 0;
369 sem->cpu = get_partition(current); 374 sem->cpu = UNDEF_SEM;
375 atomic_inc(&srp_objects_in_use);
370 return sem; 376 return sem;
371} 377}
372 378
379static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg)
380{
381 struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj;
382 int ret = 0;
383 struct task_struct* t = current;
384 struct srp_priority t_prio;
385
386 TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu);
387
388 if (sem->cpu == UNDEF_SEM)
389 sem->cpu = get_partition(t);
390 else if (sem->cpu != get_partition(t))
391 ret = -EPERM;
392
393 if (ret == 0) {
394 t_prio.period = get_rt_period(t);
395 t_prio.pid = t->pid;
396 if (srp_higher_prio(&t_prio, &sem->ceiling)) {
397 sem->ceiling.period = t_prio.period;
398 sem->ceiling.pid = t_prio.pid;
399 }
400 }
401
402 return ret;
403}
404
373static void destroy_srp_semaphore(void* sem) 405static void destroy_srp_semaphore(void* sem)
374{ 406{
375 /* XXX invariants */ 407 /* XXX invariants */
408 atomic_dec(&srp_objects_in_use);
376 kfree(sem); 409 kfree(sem);
377} 410}
378 411
379struct fdso_ops srp_sem_ops = { 412struct fdso_ops srp_sem_ops = {
380 .create = create_srp_semaphore, 413 .create = create_srp_semaphore,
414 .open = open_srp_semaphore,
381 .destroy = destroy_srp_semaphore 415 .destroy = destroy_srp_semaphore
382}; 416};
383 417
@@ -470,46 +504,9 @@ asmlinkage long sys_srp_up(int sem_od)
470 return ret; 504 return ret;
471} 505}
472 506
473/* Indicate that task will use a resource associated with a given
474 * semaphore. Should be done *a priori* before RT task system is
475 * executed, so this does *not* update the system priority
476 * ceiling! (The ceiling would be meaningless anyway, as the SRP
477 * breaks without this a priori knowledge.)
478 */
479asmlinkage long sys_reg_task_srp_sem(int sem_od) 507asmlinkage long sys_reg_task_srp_sem(int sem_od)
480{ 508{
481 /* 509 /* unused */
482 * FIXME: This whole concept is rather brittle!
483 * There must be a better solution. Maybe register on
484 * first reference?
485 */
486
487 struct task_struct *t = current;
488 struct srp_priority t_prio;
489 struct srp_semaphore* sem;
490
491 sem = lookup_srp_sem(sem_od);
492
493 if (!sem)
494 return -EINVAL;
495
496 if (!is_realtime(t))
497 return -EPERM;
498
499 if (sem->cpu != get_partition(t))
500 return -EINVAL;
501
502 preempt_disable();
503 t->rt_param.subject_to_srp = 1;
504 t_prio.period = get_rt_period(t);
505 t_prio.pid = t->pid;
506 if (srp_higher_prio(&t_prio, &sem->ceiling)) {
507 sem->ceiling.period = t_prio.period;
508 sem->ceiling.pid = t_prio.pid;
509 }
510
511 preempt_enable();
512
513 return 0; 510 return 0;
514} 511}
515 512
@@ -539,6 +536,10 @@ void srp_ceiling_block(void)
539 .task_list = {NULL, NULL} 536 .task_list = {NULL, NULL}
540 }; 537 };
541 538
539 /* bail out early if there aren't any SRP resources around */
540 if (!atomic_read(&srp_objects_in_use))
541 return;
542
542 preempt_disable(); 543 preempt_disable();
543 if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { 544 if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) {
544 tsk->state = TASK_UNINTERRUPTIBLE; 545 tsk->state = TASK_UNINTERRUPTIBLE;