aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-14 16:13:28 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-14 16:13:28 -0400
commit1bd5e0b8bca4601c95f44d39813a535a8e0eb437 (patch)
treea9b82d51b0f934acbbf846594cdd65901ff89bae
parenta2be7799b14ebc487496d1673720dbe337b964c9 (diff)
Reimplemented LITMUS standard FIFO semaphores + misc fixes
This version is much more straightforward than the last one and actually enforces FIFO order in all cases. Further, it removes the need for the sem_wake_up() calls by providing a custom wake up function for the wait queue.
-rw-r--r--include/linux/litmus.h2
-rw-r--r--include/linux/wait.h2
-rw-r--r--kernel/litmus_sem.c85
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/sched_gsn_edf.c2
-rw-r--r--lib/semaphore-sleepers.c4
6 files changed, 79 insertions, 59 deletions
diff --git a/include/linux/litmus.h b/include/linux/litmus.h
index 414d1d28a9..73ea6435f6 100644
--- a/include/linux/litmus.h
+++ b/include/linux/litmus.h
@@ -101,7 +101,7 @@ extern spinlock_t litmus_task_set_lock;
101 sched_trace_log_message("%d: " fmt, raw_smp_processor_id(), ## args) 101 sched_trace_log_message("%d: " fmt, raw_smp_processor_id(), ## args)
102 102
103#define TRACE_TASK(t, fmt, args...) \ 103#define TRACE_TASK(t, fmt, args...) \
104 TRACE("(%s/%d)" fmt, (t)->comm, (t)->pid, ##args) 104 TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args)
105 105
106#define TRACE_CUR(fmt, args...) \ 106#define TRACE_CUR(fmt, args...) \
107 TRACE_TASK(current, fmt, ## args) 107 TRACE_TASK(current, fmt, ## args)
diff --git a/include/linux/wait.h b/include/linux/wait.h
index bcc8698da6..c7e96b6b59 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -161,8 +161,6 @@ wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
161#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) 161#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
162#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) 162#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
163 163
164#define sem_wake_up(x) __sem_wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
165#define sem_wake_up_locked(x) __sem_wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
166#define pi_wake_up(x) __pi_wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL) 164#define pi_wake_up(x) __pi_wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
167 165
168#define __wait_event(wq, condition) \ 166#define __wait_event(wq, condition) \
diff --git a/kernel/litmus_sem.c b/kernel/litmus_sem.c
index 14de7e714b..cdcb6bd51f 100644
--- a/kernel/litmus_sem.c
+++ b/kernel/litmus_sem.c
@@ -13,7 +13,7 @@
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/sched_plugin.h> 14#include <linux/sched_plugin.h>
15#include <linux/queuelock.h> 15#include <linux/queuelock.h>
16 16#include <linux/litmus.h>
17 17
18/* ************************************************************************** */ 18/* ************************************************************************** */
19/* STANDARD FIFO SEMAPHORES */ 19/* STANDARD FIFO SEMAPHORES */
@@ -24,6 +24,69 @@
24struct semaphore sems[MAX_SEMAPHORES]; /* all sems */ 24struct semaphore sems[MAX_SEMAPHORES]; /* all sems */
25typedef int sema_id; /* Userspace ID of a semaphore */ 25typedef int sema_id; /* Userspace ID of a semaphore */
26 26
27static int rt_fifo_wake_up(wait_queue_t *wait, unsigned mode, int sync,
28 void *key)
29{
30 struct task_struct* t = (struct task_struct*) wait->private;
31 set_rt_flags(t, RT_F_EXIT_SEM);
32 TRACE_TASK(t, "woken up by rt_fifo_wake_up(), set RT_F_EXIT_SEM\n");
33 return default_wake_function(wait, mode, sync, key);
34}
35
36static fastcall void rt_fifo_up(struct semaphore * sem)
37{
38 TRACE_CUR("releases lock %p\n");
39 if (atomic_inc_return(&sem->count) < 1)
40 /* there is a task queued */
41 wake_up(&sem->wait);
42}
43
44/* not optimized like the Linux down() implementation, but then
45 * again we incur the cost of a syscall anyway, so this hardly matters
46 */
47static fastcall void rt_fifo_down(struct semaphore * sem)
48{
49 struct task_struct *tsk = current;
50 wait_queue_t wait = {
51 .private = tsk,
52 .func = rt_fifo_wake_up,
53 .task_list = {NULL, NULL}
54 };
55 unsigned long flags;
56
57
58 spin_lock_irqsave(&sem->wait.lock, flags);
59 if (atomic_dec_return(&sem->count) < 0 || waitqueue_active(&sem->wait)) {
60 /* we need to suspend */
61 tsk->state = TASK_UNINTERRUPTIBLE;
62 add_wait_queue_exclusive_locked(&sem->wait, &wait);
63
64 TRACE_CUR("suspends on lock %p\n", sem);
65
66 /* release lock before sleeping */
67 spin_unlock_irqrestore(&sem->wait.lock, flags);
68
69 /* we depend on the FIFO order
70 * Thus, we don't need to recheck when we wake up, we
71 * are guaranteed to have the lock since there is only one
72 * wake up per release
73 */
74 schedule();
75
76 TRACE_CUR("woke up, now owns lock %p\n", sem);
77
78 /* try_to_wake_up() set our state to TASK_RUNNING,
79 * all we need to do is to remove our wait queue entry
80 */
81 remove_wait_queue(&sem->wait, &wait);
82 } else {
83 TRACE_CUR("acquired lock %p, no contention\n", sem);
84 spin_unlock_irqrestore(&sem->wait.lock, flags);
85 }
86}
87
88
89
27/* Initialize semaphores at boot time. */ 90/* Initialize semaphores at boot time. */
28static int __init sema_boot_init(void) 91static int __init sema_boot_init(void)
29{ 92{
@@ -43,8 +106,8 @@ asmlinkage long sys_sema_init (void)
43{ 106{
44 sema_id sem_id; 107 sema_id sem_id;
45 108
46 for (sem_id = 0; sem_id < MAX_SEMAPHORES; sem_id++) { 109 for (sem_id = 0; sem_id < MAX_SEMAPHORES; sem_id++) {
47 if (!cmpxchg(&sems[sem_id].used, 0, 1)) { 110 if (!cmpxchg(&sems[sem_id].used, 0, 1)) {
48 sema_init(&sems[sem_id], 1); 111 sema_init(&sems[sem_id], 1);
49 return sem_id; 112 return sem_id;
50 } 113 }
@@ -57,11 +120,10 @@ asmlinkage long sys_down(sema_id sem_id)
57 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) 120 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES)
58 return -EINVAL; 121 return -EINVAL;
59 122
123 if (!sems[sem_id].used)
124 return -EINVAL;
60 /* This allows for FIFO sems and gives others a chance... */ 125 /* This allows for FIFO sems and gives others a chance... */
61 if (waitqueue_active(&sems[sem_id].wait)) 126 rt_fifo_down(sems + sem_id);
62 __down(&sems[sem_id]);
63 else
64 down(&sems[sem_id]);
65 return 0; 127 return 0;
66} 128}
67 129
@@ -70,7 +132,9 @@ asmlinkage long sys_up(sema_id sem_id)
70 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) 132 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES)
71 return -EINVAL; 133 return -EINVAL;
72 134
73 up(&sems[sem_id]); 135 if (!sems[sem_id].used)
136 return -EINVAL;
137 rt_fifo_up(sems + sem_id);
74 return 0; 138 return 0;
75} 139}
76 140
@@ -82,6 +146,9 @@ asmlinkage long sys_sema_free(sema_id sem_id)
82 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) 146 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES)
83 return -EINVAL; 147 return -EINVAL;
84 148
149 if (!sems[sem_id].used)
150 return -EINVAL;
151
85 spin_lock_irqsave(&sems[sem_id].wait.lock, flags); 152 spin_lock_irqsave(&sems[sem_id].wait.lock, flags);
86 if (waitqueue_active(&sems[sem_id].wait)) { 153 if (waitqueue_active(&sems[sem_id].wait)) {
87 list_for_each_safe(tmp, next, &sems[sem_id].wait.task_list) { 154 list_for_each_safe(tmp, next, &sems[sem_id].wait.task_list) {
@@ -105,8 +172,6 @@ asmlinkage long sys_sema_free(sema_id sem_id)
105 172
106 173
107 174
108
109
110/* ************************************************************************** */ 175/* ************************************************************************** */
111/* PRIORITY INHERITANCE */ 176/* PRIORITY INHERITANCE */
112/* ************************************************************************** */ 177/* ************************************************************************** */
diff --git a/kernel/sched.c b/kernel/sched.c
index b04322ca10..74ad4500df 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3809,25 +3809,6 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3809 } 3809 }
3810} 3810}
3811 3811
3812/* The core wakeup function, this time exclusively for semaphores. In LITMUS,
3813 * semaphores need to set a flag for tasks that they wakeup, and thus needed
3814 * their own wakeup functions.
3815 */
3816static void __sem_wake_up_common(wait_queue_head_t *q, unsigned int mode,
3817 int nr_exclusive, int sync, void *key)
3818{
3819 struct list_head *tmp, *next;
3820
3821 list_for_each_safe(tmp, next, &q->task_list) {
3822 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
3823 unsigned flags = curr->flags;
3824
3825 set_rt_flags((struct task_struct*)curr->private, RT_F_EXIT_SEM);
3826 if (curr->func(curr, mode, sync, key) &&
3827 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3828 break;
3829 }
3830}
3831 3812
3832/* 3813/*
3833 * The core wakeup function, this time including priority inheritance. As a result, 3814 * The core wakeup function, this time including priority inheritance. As a result,
@@ -3877,23 +3858,6 @@ void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
3877} 3858}
3878EXPORT_SYMBOL(__wake_up); 3859EXPORT_SYMBOL(__wake_up);
3879 3860
3880/**
3881 * __sem_wake_up - wake up threads blocked on a waitqueue,
3882 * and set RT_F_EXIT_SEM flag.
3883 * @q: the waitqueue
3884 * @mode: which threads
3885 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3886 * @key: is directly passed to the wakeup function
3887 */
3888void fastcall __sem_wake_up(wait_queue_head_t *q, unsigned int mode,
3889 int nr_exclusive, void *key)
3890{
3891 unsigned long flags;
3892
3893 spin_lock_irqsave(&q->lock, flags);
3894 __sem_wake_up_common(q, mode, nr_exclusive, 0, key);
3895 spin_unlock_irqrestore(&q->lock, flags);
3896}
3897 3861
3898/** 3862/**
3899 * __pi_wake_up - wake up threads blocked on a waitqueue, 3863 * __pi_wake_up - wake up threads blocked on a waitqueue,
@@ -3924,13 +3888,6 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
3924 __wake_up_common(q, mode, 1, 0, NULL); 3888 __wake_up_common(q, mode, 1, 0, NULL);
3925} 3889}
3926 3890
3927/*
3928 * Same as __sem_wake_up but called with the spinlock in wait_queue_head_t held.
3929 */
3930void fastcall __sem_wake_up_locked(wait_queue_head_t *q, unsigned int mode)
3931{
3932 __sem_wake_up_common(q, mode, 1, 0, NULL);
3933}
3934 3891
3935/** 3892/**
3936 * __wake_up_sync - wake up threads blocked on a waitqueue. 3893 * __wake_up_sync - wake up threads blocked on a waitqueue.
diff --git a/kernel/sched_gsn_edf.c b/kernel/sched_gsn_edf.c
index d007eb82c7..046fe41e09 100644
--- a/kernel/sched_gsn_edf.c
+++ b/kernel/sched_gsn_edf.c
@@ -267,7 +267,7 @@ static reschedule_check_t gsnedf_scheduler_tick(void)
267 * no task "runs away forever". 267 * no task "runs away forever".
268 */ 268 */
269 if (is_realtime(t)) 269 if (is_realtime(t))
270 TRACE_TASK(t, "scheduler tick"); 270 TRACE_TASK(t, "scheduler tick\n");
271 271
272 if (is_realtime(t) && t->time_slice && !--t->time_slice) { 272 if (is_realtime(t) && t->time_slice && !--t->time_slice) {
273 if (!is_np(t)) { /* np tasks will be preempted when they become 273 if (!is_np(t)) { /* np tasks will be preempted when they become
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
index 60e4526c5f..e500018f51 100644
--- a/lib/semaphore-sleepers.c
+++ b/lib/semaphore-sleepers.c
@@ -52,7 +52,7 @@
52 52
53fastcall void __up(struct semaphore *sem) 53fastcall void __up(struct semaphore *sem)
54{ 54{
55 sem_wake_up(&sem->wait); 55 wake_up(&sem->wait);
56} 56}
57 57
58fastcall void __sched __down(struct semaphore * sem) 58fastcall void __sched __down(struct semaphore * sem)
@@ -87,7 +87,7 @@ fastcall void __sched __down(struct semaphore * sem)
87 tsk->state = TASK_UNINTERRUPTIBLE; 87 tsk->state = TASK_UNINTERRUPTIBLE;
88 } 88 }
89 remove_wait_queue_locked(&sem->wait, &wait); 89 remove_wait_queue_locked(&sem->wait, &wait);
90 sem_wake_up_locked(&sem->wait); 90 wake_up_locked(&sem->wait);
91 spin_unlock_irqrestore(&sem->wait.lock, flags); 91 spin_unlock_irqrestore(&sem->wait.lock, flags);
92 tsk->state = TASK_RUNNING; 92 tsk->state = TASK_RUNNING;
93} 93}