aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-04-25 23:58:40 -0400
committerJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-04-25 23:58:40 -0400
commita57fb73d6bcfd35774dc69ad5856a9c470b22df7 (patch)
treebd2a33b738a3455afef5202b94b6f5f0cd66ec80 /arch
parente1b1448bd002a7120090575fe9e1154759bde919 (diff)
Added kernel SRP "semaphore" support, to be included in PSN-EDF, and fixed
a small bug in semaphore free methods for all user-level semaphores.
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/pi_sem_syscalls.c3
-rw-r--r--arch/i386/kernel/sem_syscalls.c5
-rw-r--r--arch/i386/kernel/srp_sem_syscalls.c56
3 files changed, 44 insertions, 20 deletions
diff --git a/arch/i386/kernel/pi_sem_syscalls.c b/arch/i386/kernel/pi_sem_syscalls.c
index 6aa66d2081..7c4758c42a 100644
--- a/arch/i386/kernel/pi_sem_syscalls.c
+++ b/arch/i386/kernel/pi_sem_syscalls.c
@@ -126,6 +126,9 @@ asmlinkage long sys_pi_sema_free(pi_sema_id sem_id)
126 struct list_head *tmp, *next; 126 struct list_head *tmp, *next;
127 unsigned long flags; 127 unsigned long flags;
128 128
129 if (sem_id < 0 || sem_id >= MAX_PI_SEMAPHORES)
130 return -EINVAL;
131
129 spin_lock_irqsave(&pi_sems[sem_id].wait.lock, flags); 132 spin_lock_irqsave(&pi_sems[sem_id].wait.lock, flags);
130 list_for_each_safe(tmp, next, &pi_sems[sem_id].wait.task_list) { 133 list_for_each_safe(tmp, next, &pi_sems[sem_id].wait.task_list) {
131 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); 134 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
diff --git a/arch/i386/kernel/sem_syscalls.c b/arch/i386/kernel/sem_syscalls.c
index 1a70e0dbbc..bc7c162e0b 100644
--- a/arch/i386/kernel/sem_syscalls.c
+++ b/arch/i386/kernel/sem_syscalls.c
@@ -63,7 +63,10 @@ asmlinkage long sys_sema_free(sema_id sem_id)
63 struct list_head *tmp, *next; 63 struct list_head *tmp, *next;
64 unsigned long flags; 64 unsigned long flags;
65 65
66 spin_lock_irqsave(&sems[sem_id].wait.lock, flags); 66 if (sem_id < 0 || sem_id >= MAX_SEMAPHORES)
67 return -EINVAL;
68
69 spin_lock_irqsave(&sems[sem_id].wait.lock, flags);
67 list_for_each_safe(tmp, next, &sems[sem_id].wait.task_list) { 70 list_for_each_safe(tmp, next, &sems[sem_id].wait.task_list) {
68 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); 71 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
69 list_del(tmp); 72 list_del(tmp);
diff --git a/arch/i386/kernel/srp_sem_syscalls.c b/arch/i386/kernel/srp_sem_syscalls.c
index 838df95202..639f5821eb 100644
--- a/arch/i386/kernel/srp_sem_syscalls.c
+++ b/arch/i386/kernel/srp_sem_syscalls.c
@@ -1,7 +1,5 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2 2
3// comparison by period and pid!
4
5/* 3/*
6 * Uniprocessor SRP "semaphores". 4 * Uniprocessor SRP "semaphores".
7 */ 5 */
@@ -34,8 +32,10 @@ static int __init srp_sema_boot_init(void)
34 srp_sema_id sem_id; 32 srp_sema_id sem_id;
35 33
36 printk("Initializing SRP semaphores..."); 34 printk("Initializing SRP semaphores...");
37 for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) 35 for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) {
38 srp_sems[sem_id].used = 0; 36 srp_sems[sem_id].used = 0;
37 srp_sems[sem_id].claimed = 0;
38 }
39 queue_lock_init(&srp_lock); 39 queue_lock_init(&srp_lock);
40 printk(" complete!\n"); 40 printk(" complete!\n");
41 41
@@ -51,13 +51,23 @@ asmlinkage long sys_srp_sema_init (void)
51 for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) { 51 for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) {
52 if (!cmpxchg(&srp_sems[sem_id].used, 0, 1)) { 52 if (!cmpxchg(&srp_sems[sem_id].used, 0, 1)) {
53 srp_sems[sem_id].pc_task = NULL; 53 srp_sems[sem_id].pc_task = NULL;
54 srp_sems[sem_id].claimed = 0;
55 return sem_id; 54 return sem_id;
56 } 55 }
57 } 56 }
58 return -ENOMEM; 57 return -ENOMEM;
59} 58}
60 59
60/* SRP task priority comparison function. Smaller periods have highest
61 * priority, tie-break is PID.
62 */
63int srp_higher_prio(struct task_struct* first, struct task_struct* second)
64{
65 return !second || !is_realtime(second) ||
66 get_rt_period(first) < get_rt_period(second) ||
67 (get_rt_period(first) == get_rt_period(second) &&
68 first->pid < second->pid);
69}
70
61/* Adjust the system-wide priority ceiling if resource is claimed. */ 71/* Adjust the system-wide priority ceiling if resource is claimed. */
62asmlinkage long sys_srp_down(srp_sema_id sem_id) 72asmlinkage long sys_srp_down(srp_sema_id sem_id)
63{ 73{
@@ -68,7 +78,7 @@ asmlinkage long sys_srp_down(srp_sema_id sem_id)
68 78
69 queue_lock_irqsave(&srp_lock, flags); 79 queue_lock_irqsave(&srp_lock, flags);
70 srp_sems[sem_id].claimed = 1; 80 srp_sems[sem_id].claimed = 1;
71 if (/* srp_sems[sem_id].pc_task higher prio than spc_task */1) { 81 if (srp_higher_prio(srp_sems[sem_id].pc_task, spc_task)) {
72 spc_task = srp_sems[sem_id].pc_task; 82 spc_task = srp_sems[sem_id].pc_task;
73 } 83 }
74 queue_unlock_irqrestore(&srp_lock, flags); 84 queue_unlock_irqrestore(&srp_lock, flags);
@@ -88,12 +98,10 @@ asmlinkage long sys_srp_up(srp_sema_id sem_id)
88 queue_lock_irqsave(&srp_lock, flags); 98 queue_lock_irqsave(&srp_lock, flags);
89 srp_sems[sem_id].claimed = 0; 99 srp_sems[sem_id].claimed = 0;
90 spc_task = NULL; 100 spc_task = NULL;
91 for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) { 101 for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++)
92 if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed /* && */ 102 if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed &&
93 /* srp_sems[sem_ctr].pc_task higher prio than spc_task */) { 103 srp_higher_prio(srp_sems[sem_ctr].pc_task, spc_task))
94 spc_task = srp_sems[sem_ctr].pc_task; 104 spc_task = srp_sems[sem_ctr].pc_task;
95 }
96 }
97 queue_unlock_irqrestore(&srp_lock, flags); 105 queue_unlock_irqrestore(&srp_lock, flags);
98 106
99 return 0; 107 return 0;
@@ -114,9 +122,8 @@ asmlinkage long sys_reg_task_srp_sem(srp_sema_id sem_id,
114 return -EINVAL; 122 return -EINVAL;
115 123
116 queue_lock_irqsave(&srp_lock, flags); 124 queue_lock_irqsave(&srp_lock, flags);
117 if (/* t higher prio than srp_sems[sem_id].pc_task */1) { 125 if (srp_higher_prio(t, srp_sems[sem_id].pc_task))
118 srp_sems[sem_id].pc_task = t; 126 srp_sems[sem_id].pc_task = t;
119 }
120 queue_unlock_irqrestore(&srp_lock, flags); 127 queue_unlock_irqrestore(&srp_lock, flags);
121 128
122 return 0; 129 return 0;
@@ -130,21 +137,32 @@ void wait_until_exceed_spc(void)
130 unsigned long flags; 137 unsigned long flags;
131 138
132 queue_lock_irqsave(&srp_lock, flags); 139 queue_lock_irqsave(&srp_lock, flags);
133 for (;;) { /* use callbacks here... */ 140 while(!srp_higher_prio(current, spc_task)) {
134 if (/* current higher prio than spc_task */1) { 141 queue_unlock_irqrestore(&srp_lock, flags); schedule();
135 break;
136 }
137 queue_unlock_irqrestore(&srp_lock, flags);
138 schedule();
139 queue_lock_irqsave(&srp_lock, flags); 142 queue_lock_irqsave(&srp_lock, flags);
140 } 143 }
141 queue_unlock_irqrestore(&srp_lock, flags); 144 queue_unlock_irqrestore(&srp_lock, flags);
142} 145}
143 146
144/* Free semaphore. */ 147/* Free semaphore, adjusting the system-wide priority ceiling if necessary. */
145asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) 148asmlinkage long sys_srp_sema_free(srp_sema_id sem_id)
146{ 149{
150 srp_sema_id sem_ctr;
151 unsigned long flags;
152
153 if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES)
154 return -EINVAL;
155
156 queue_lock_irqsave(&srp_lock, flags);
157 srp_sems[sem_id].claimed = 0;
147 srp_sems[sem_id].used = 0; 158 srp_sems[sem_id].used = 0;
159 spc_task = NULL;
160 for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++)
161 if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed &&
162 srp_higher_prio(srp_sems[sem_ctr].pc_task, spc_task))
163 spc_task = srp_sems[sem_ctr].pc_task;
164 queue_unlock_irqrestore(&srp_lock, flags);
165
148 return 0; 166 return 0;
149} 167}
150 168