diff options
author | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-05-10 14:55:28 -0400 |
---|---|---|
committer | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-05-10 14:55:28 -0400 |
commit | f0fa6cd306de161a41e56382b1a59000f16938f5 (patch) | |
tree | 8d37500b4e1899c64714a7bcad939d1c8d9ffb8a /arch | |
parent | 48f446412e365345f530b93449ba237269f07512 (diff) |
Modified SRP semaphores so that waiting for prio to exceed SPC works
correctly, mostly by adding waitqueues and rewriting wait_until_exceed_spc.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/srp_sem_syscalls.c | 60 |
1 files changed, 54 insertions, 6 deletions
diff --git a/arch/i386/kernel/srp_sem_syscalls.c b/arch/i386/kernel/srp_sem_syscalls.c index cf9ef72a3f..a07c36c7bf 100644 --- a/arch/i386/kernel/srp_sem_syscalls.c +++ b/arch/i386/kernel/srp_sem_syscalls.c | |||
@@ -23,6 +23,7 @@ typedef int srp_sema_id; /* Userspace ID of a srp_semaphore */ | |||
23 | 23 | ||
24 | /* System-wide priority ceiling, represented as a pointer to a task. */ | 24 | /* System-wide priority ceiling, represented as a pointer to a task. */ |
25 | DEFINE_PER_CPU(struct task_struct *, spc_tasks); | 25 | DEFINE_PER_CPU(struct task_struct *, spc_tasks); |
26 | DEFINE_PER_CPU(wait_queue_head_t, spc_waitqueues); | ||
26 | 27 | ||
27 | /* Used to serialize access to SRP semaphores and system priority ceiling. */ | 28 | /* Used to serialize access to SRP semaphores and system priority ceiling. */ |
28 | static queuelock_t srp_lock; | 29 | static queuelock_t srp_lock; |
@@ -39,8 +40,10 @@ static int __init srp_sema_boot_init(void) | |||
39 | srp_sems[sem_id].claimed = 0; | 40 | srp_sems[sem_id].claimed = 0; |
40 | srp_sems[sem_id].cpu = -1; | 41 | srp_sems[sem_id].cpu = -1; |
41 | } | 42 | } |
42 | for (i = 0; i < NR_CPUS; i++) | 43 | for (i = 0; i < NR_CPUS; i++) { |
43 | per_cpu(spc_tasks, i) = NULL; | 44 | per_cpu(spc_tasks, i) = NULL; |
45 | init_waitqueue_head(&per_cpu(spc_waitqueues, i)); | ||
46 | } | ||
44 | queue_lock_init(&srp_lock); | 47 | queue_lock_init(&srp_lock); |
45 | printk(" done!\n"); | 48 | printk(" done!\n"); |
46 | 49 | ||
@@ -98,14 +101,17 @@ asmlinkage long sys_srp_down(srp_sema_id sem_id) | |||
98 | asmlinkage long sys_srp_up(srp_sema_id sem_id) | 101 | asmlinkage long sys_srp_up(srp_sema_id sem_id) |
99 | { | 102 | { |
100 | srp_sema_id sem_ctr; | 103 | srp_sema_id sem_ctr; |
104 | struct list_head *tmp, *next; | ||
101 | unsigned long flags; | 105 | unsigned long flags; |
102 | int cpu = smp_processor_id(); | 106 | int cpu = smp_processor_id(); |
103 | 107 | ||
104 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || | 108 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || |
105 | srp_sems[sem_id].cpu != cpu) | 109 | srp_sems[sem_id].cpu != cpu) |
106 | return -EINVAL; | 110 | return -EINVAL; |
107 | 111 | ||
108 | queue_lock_irqsave(&srp_lock, flags); | 112 | queue_lock_irqsave(&srp_lock, flags); |
113 | |||
114 | /* Determine new system priority ceiling for this CPU. */ | ||
109 | srp_sems[sem_id].claimed = 0; | 115 | srp_sems[sem_id].claimed = 0; |
110 | __get_cpu_var(spc_tasks) = NULL; | 116 | __get_cpu_var(spc_tasks) = NULL; |
111 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) | 117 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) |
@@ -114,8 +120,25 @@ asmlinkage long sys_srp_up(srp_sema_id sem_id) | |||
114 | srp_higher_prio(srp_sems[sem_ctr].pc_task, | 120 | srp_higher_prio(srp_sems[sem_ctr].pc_task, |
115 | __get_cpu_var(spc_tasks))) | 121 | __get_cpu_var(spc_tasks))) |
116 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; | 122 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; |
117 | queue_unlock_irqrestore(&srp_lock, flags); | ||
118 | 123 | ||
124 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
125 | if (waitqueue_active(&__get_cpu_var(spc_waitqueues))) { | ||
126 | list_for_each_safe(tmp, next, | ||
127 | &__get_cpu_var(spc_waitqueues).task_list) { | ||
128 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
129 | task_list); | ||
130 | set_rt_flags((struct task_struct*)curr->private, | ||
131 | RT_F_EXIT_SEM); | ||
132 | |||
133 | if (srp_higher_prio((struct task_struct*)curr->private, | ||
134 | __get_cpu_var(spc_tasks))) | ||
135 | curr->func(curr, | ||
136 | TASK_UNINTERRUPTIBLE | | ||
137 | TASK_INTERRUPTIBLE, 0, NULL); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | queue_unlock_irqrestore(&srp_lock, flags); | ||
119 | return 0; | 142 | return 0; |
120 | } | 143 | } |
121 | 144 | ||
@@ -161,14 +184,19 @@ asmlinkage long sys_reg_task_srp_sem(srp_sema_id sem_id, pid_t t_pid) | |||
161 | */ | 184 | */ |
162 | void wait_until_exceed_spc(void) | 185 | void wait_until_exceed_spc(void) |
163 | { | 186 | { |
187 | struct task_struct *tsk = current; | ||
188 | DECLARE_WAITQUEUE(wait, tsk); | ||
164 | unsigned long flags; | 189 | unsigned long flags; |
165 | 190 | ||
166 | queue_lock_irqsave(&srp_lock, flags); | 191 | queue_lock_irqsave(&srp_lock, flags); |
167 | while(!srp_higher_prio(current, __get_cpu_var(spc_tasks))) { | 192 | add_wait_queue_exclusive_locked(&__get_cpu_var(spc_waitqueues), &wait); |
193 | while(!srp_higher_prio(tsk, __get_cpu_var(spc_tasks))) { | ||
194 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
168 | queue_unlock_irqrestore(&srp_lock, flags); | 195 | queue_unlock_irqrestore(&srp_lock, flags); |
169 | schedule(); | 196 | schedule(); |
170 | queue_lock_irqsave(&srp_lock, flags); | 197 | queue_lock_irqsave(&srp_lock, flags); |
171 | } | 198 | } |
199 | remove_wait_queue_locked(&__get_cpu_var(spc_waitqueues), &wait); | ||
172 | queue_unlock_irqrestore(&srp_lock, flags); | 200 | queue_unlock_irqrestore(&srp_lock, flags); |
173 | } | 201 | } |
174 | 202 | ||
@@ -176,7 +204,8 @@ void wait_until_exceed_spc(void) | |||
176 | asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) | 204 | asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) |
177 | { | 205 | { |
178 | srp_sema_id sem_ctr; | 206 | srp_sema_id sem_ctr; |
179 | unsigned long flags; | 207 | struct list_head *tmp, *next; |
208 | unsigned long flags; | ||
180 | int cpu = smp_processor_id(); | 209 | int cpu = smp_processor_id(); |
181 | 210 | ||
182 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || | 211 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || |
@@ -186,6 +215,8 @@ asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) | |||
186 | queue_lock_irqsave(&srp_lock, flags); | 215 | queue_lock_irqsave(&srp_lock, flags); |
187 | srp_sems[sem_id].claimed = 0; | 216 | srp_sems[sem_id].claimed = 0; |
188 | srp_sems[sem_id].used = 0; | 217 | srp_sems[sem_id].used = 0; |
218 | |||
219 | /* Determine new system priority ceiling for this CPU. */ | ||
189 | __get_cpu_var(spc_tasks) = NULL; | 220 | __get_cpu_var(spc_tasks) = NULL; |
190 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) | 221 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) |
191 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && | 222 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && |
@@ -194,8 +225,25 @@ asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) | |||
194 | __get_cpu_var(spc_tasks))) | 225 | __get_cpu_var(spc_tasks))) |
195 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; | 226 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; |
196 | srp_sems[sem_id].cpu = -1; | 227 | srp_sems[sem_id].cpu = -1; |
197 | queue_unlock_irqrestore(&srp_lock, flags); | ||
198 | 228 | ||
229 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
230 | if (waitqueue_active(&__get_cpu_var(spc_waitqueues))) { | ||
231 | list_for_each_safe(tmp, next, | ||
232 | &__get_cpu_var(spc_waitqueues).task_list) { | ||
233 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
234 | task_list); | ||
235 | set_rt_flags((struct task_struct*)curr->private, | ||
236 | RT_F_EXIT_SEM); | ||
237 | |||
238 | if (srp_higher_prio((struct task_struct*)curr->private, | ||
239 | __get_cpu_var(spc_tasks))) | ||
240 | curr->func(curr, | ||
241 | TASK_UNINTERRUPTIBLE | | ||
242 | TASK_INTERRUPTIBLE, 0, NULL); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | queue_unlock_irqrestore(&srp_lock, flags); | ||
199 | return 0; | 247 | return 0; |
200 | } | 248 | } |
201 | 249 | ||