aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-05-10 00:20:00 -0400
committerJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-05-10 00:20:00 -0400
commit5621897b6614128e91621a8bd1fb1c03f8b5112b (patch)
treefd6b8d5d71ef8df17e8ba772c0fa0e553d7d0cb7
parenta799fe60722267fcdd52c1fbf10e8f704fb53a9a (diff)
First attempt at FIFO semaphores and PI sems. This may not work...
Before FIFO, everything seemed to be (finally) working ok.
-rw-r--r--arch/i386/kernel/pi_sem_syscalls.c4
-rw-r--r--arch/i386/kernel/sem_syscalls.c6
-rw-r--r--kernel/sched_gsn_edf.c10
-rw-r--r--lib/semaphore-sleepers.c5
4 files changed, 13 insertions, 12 deletions
diff --git a/arch/i386/kernel/pi_sem_syscalls.c b/arch/i386/kernel/pi_sem_syscalls.c
index 1034c1ccbe..0d1753495e 100644
--- a/arch/i386/kernel/pi_sem_syscalls.c
+++ b/arch/i386/kernel/pi_sem_syscalls.c
@@ -68,7 +68,9 @@ void __sys_pi_down(struct pi_semaphore * sem)
68// :"+m" (sem->count) 68// :"+m" (sem->count)
69// : 69// :
70// :"memory","ax"); 70// :"memory","ax");
71 if (atomic_dec_return(&sem->count) < 0) 71//
72 /* Checking for active waitqueue gives others a chance... */
73 if (atomic_dec_return(&sem->count) < 0 || waitqueue_active(&sem->wait))
72 __pi_down(sem); 74 __pi_down(sem);
73} 75}
74 76
diff --git a/arch/i386/kernel/sem_syscalls.c b/arch/i386/kernel/sem_syscalls.c
index 274687d21c..4ca92c9be5 100644
--- a/arch/i386/kernel/sem_syscalls.c
+++ b/arch/i386/kernel/sem_syscalls.c
@@ -45,7 +45,11 @@ asmlinkage long sys_down(sema_id sem_id)
45 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) 45 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES)
46 return -EINVAL; 46 return -EINVAL;
47 47
48 down(&sems[sem_id]); 48 /* This allows for FIFO sems and gives others a chance... */
49 if (waitqueue_active(&sems[sem_id].wait))
50 __down(&sems[sem_id]);
51 else
52 down(&sems[sem_id]);
49 return 0; 53 return 0;
50} 54}
51 55
diff --git a/kernel/sched_gsn_edf.c b/kernel/sched_gsn_edf.c
index c8d0f9910a..d2d77f98cc 100644
--- a/kernel/sched_gsn_edf.c
+++ b/kernel/sched_gsn_edf.c
@@ -120,7 +120,6 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
120 /* Currently linked task is set to be unlinked. */ 120 /* Currently linked task is set to be unlinked. */
121 if (entry->linked) { 121 if (entry->linked) {
122 entry->linked->rt_param.linked_on = NO_CPU; 122 entry->linked->rt_param.linked_on = NO_CPU;
123 TRACE("unlinked %d from CPU%d\n", entry->linked->pid, entry->cpu);
124 } 123 }
125 124
126 /* Link new task to CPU. */ 125 /* Link new task to CPU. */
@@ -129,10 +128,8 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
129 /* handle task is already scheduled somewhere! */ 128 /* handle task is already scheduled somewhere! */
130 on_cpu = linked->rt_param.scheduled_on; 129 on_cpu = linked->rt_param.scheduled_on;
131 if (on_cpu != NO_CPU) { 130 if (on_cpu != NO_CPU) {
132 TRACE("to be linked task is already running!\n");
133 TRACE("to be linked = %d, sched = %d\n", entry->cpu, on_cpu)
134 sched = &per_cpu(gsnedf_cpu_entries, on_cpu); 131 sched = &per_cpu(gsnedf_cpu_entries, on_cpu);
135 /* this should only happen if it is not linked already */ 132 /* this should only happen if not linked already */
136 BUG_ON(sched->linked == linked); 133 BUG_ON(sched->linked == linked);
137 134
138 /* If we are already scheduled on the CPU to which we 135 /* If we are already scheduled on the CPU to which we
@@ -145,7 +142,6 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
145 linked->rt_param.linked_on = sched->cpu; 142 linked->rt_param.linked_on = sched->cpu;
146 sched->linked = linked; 143 sched->linked = linked;
147 update_cpu_position(sched); 144 update_cpu_position(sched);
148 TRACE("task %d linked to CPU%d\n", linked->pid, linked->rt_param.linked_on);
149 linked = tmp; 145 linked = tmp;
150 } 146 }
151 } 147 }
@@ -154,10 +150,6 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
154 } 150 }
155 entry->linked = linked; 151 entry->linked = linked;
156 update_cpu_position(entry); 152 update_cpu_position(entry);
157
158 if (linked) {
159 TRACE("task %d linked to CPU%d\n", linked?linked->pid:0, linked?linked->rt_param.linked_on:0);
160 }
161} 153}
162 154
163/* unlink - Make sure a task is not linked any longer to an entry 155/* unlink - Make sure a task is not linked any longer to an entry
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
index 07acc899d3..24c6680fd0 100644
--- a/lib/semaphore-sleepers.c
+++ b/lib/semaphore-sleepers.c
@@ -68,6 +68,8 @@ fastcall void __sched __down(struct semaphore * sem)
68 68
69 tsk->state = TASK_UNINTERRUPTIBLE; 69 tsk->state = TASK_UNINTERRUPTIBLE;
70 spin_lock_irqsave(&sem->wait.lock, flags); 70 spin_lock_irqsave(&sem->wait.lock, flags);
71 if (is_realtime(tsk)) /* these sems should be FIFO */
72 wait.flags = WQ_FLAG_EXCLUSIVE;
71 add_wait_queue_exclusive_locked(&sem->wait, &wait); 73 add_wait_queue_exclusive_locked(&sem->wait, &wait);
72 74
73 sem->sleepers++; 75 sem->sleepers++;
@@ -115,7 +117,7 @@ fastcall int __sched __down_interruptible(struct semaphore * sem)
115 /* 117 /*
116 * With signals pending, this turns into 118 * With signals pending, this turns into
117 * the trylock failure case - we won't be 119 * the trylock failure case - we won't be
118 * sleeping, and we* can't get the lock as 120 * sleeping, and we can't get the lock as
119 * it has contention. Just correct the count 121 * it has contention. Just correct the count
120 * and exit. 122 * and exit.
121 */ 123 */
@@ -189,6 +191,7 @@ fastcall void __sched __pi_down(struct pi_semaphore * sem)
189 unsigned long flags; 191 unsigned long flags;
190 192
191 spin_lock_irqsave(&sem->wait.lock, flags); 193 spin_lock_irqsave(&sem->wait.lock, flags);
194 wait.flags = WQ_FLAG_EXCLUSIVE;
192 add_wait_queue_exclusive_locked(&sem->wait, &wait); 195 add_wait_queue_exclusive_locked(&sem->wait, &wait);
193 196
194 /* Change semaphore priority here, if applicable. */ 197 /* Change semaphore priority here, if applicable. */