aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-03-29 11:02:35 -0400
committerJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-03-29 11:02:35 -0400
commit0cc29463179562db081b1eaf2849bab61e0e2d5a (patch)
tree50882b5c940b2701ea99f8b9d00558aed8b66a14 /kernel
parent7cd5a1504de6aeb281cf3d6722c6e29f1a485d97 (diff)
sys_wait_for_zone_exit system call has been implemented.
sys_wait_for_zone_exit waits on a flag which is cleared during the local timer interrupt. Yet more race conditions have been avoided by performing zone checks before waiting for the flag, and by setting the flag *before* performing the zone check, so that if we enter the loop immediately after leaving the blocking zone, we are still okay.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/litmus.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/kernel/litmus.c b/kernel/litmus.c
index 6de1dbe060..dbeca929a7 100644
--- a/kernel/litmus.c
+++ b/kernel/litmus.c
@@ -297,11 +297,9 @@ asmlinkage int sys_in_blocking_zone(void)
297 int quantum_start_time; 297 int quantum_start_time;
298 /* quantum_start_time = 298 /* quantum_start_time =
299 * Time of last quantum boundary = Time of last local timer intr */ 299 * Time of last quantum boundary = Time of last local timer intr */
300 int zone_size; 300 int zone_size = ZONE_SIZE;
301 /* zone_size = 301
302 * 302 /* Assuming us granularity... */
303 * * Size of blocking zone */
304 /* Assuming us granularity... */
305 int quantum_length = jiffies_to_usecs(1); 303 int quantum_length = jiffies_to_usecs(1);
306 do_gettimeofday(&current_time); 304 do_gettimeofday(&current_time);
307 /* Is current_time > quantum_start_time+quantum_length-zone_size? */ 305 /* Is current_time > quantum_start_time+quantum_length-zone_size? */
@@ -319,24 +317,29 @@ asmlinkage int sys_in_blocking_zone(void)
319 */ 317 */
320asmlinkage int sys_wait_for_zone_exit(void) 318asmlinkage int sys_wait_for_zone_exit(void)
321{ 319{
322 /* 320 /* Get task struct for current task. */
323 * Set flag. FIX: race condition, what if we enter this 321 struct task_struct *p = current;
324 * function and interrupt handler clears flags before 322
325 * this function sets the flag? Similar problems could occur 323 /* Set flag. */
326 * if we slept instead. 324 p->rt_param.waiting_to_exit_zone = 1;
327 * CURRENT FIX: If the interrupt handler ran, then we would not
328 * be in the zone anymore, and could return immediately. Otherwise
329 * we would still be in the zone, and would have to wait.
330 */
331 flag = 1; 325 flag = 1;
332 326
333 while (flag) { 327 /*
334 /* 328 * To avoid race condition, perform one final check if we are
335 * Delay, otherwise loop is too tight and flag cannot 329 * in the zone before spinning on the flag. If we are not in
336 * be cleared. 330 * the zone, then the local timer interrupt has already occured,
337 */ 331 * and we should clear the flag and immediately return.
338 udelay(1); 332 */
339 } 333 if (sys_in_blocking_zone()) {
334 while (p->rt_param.waiting_to_exit_zone)
335 udelay(1); /* delay, otherwise tight loop
336 * causes problems. if we want this
337 * to work for EDF, maybe change this
338 * to sched_yield()?
339 */
340 } else {
341 p->rt_param.waiting_to_exit_zone = 0;
342 }
340 343
341 return 0; // everything went okay 344 return 0; // everything went okay
342} 345}