diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-05-14 10:01:10 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-05-14 10:01:10 -0400 |
commit | 49b22590d881d19a7fed73401de148122431671e (patch) | |
tree | 53898ca69005e1d1f3f147c68bc8b3e4ebd94a02 /arch | |
parent | f5c781fccc56aae85f4232df5ee16cd3b27f8ccd (diff) |
Start of LITMUS semaphore, PI, and SRP rewrite.
Rearrange the code to get out of the architecture dependent areas of
the kernel and make it live in a central file, kernel/litmus_sem.c.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/i386/kernel/pi_sem_syscalls.c | 132 | ||||
-rw-r--r-- | arch/i386/kernel/sem_syscalls.c | 92 | ||||
-rw-r--r-- | arch/i386/kernel/srp_sem_syscalls.c | 250 |
4 files changed, 1 insertions, 476 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index f77d3e4bc7..1e8988e558 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -7,8 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := process.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o signal.o entry.o traps.o irq.o \ |
8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ | 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ |
9 | pci-dma.o i386_ksyms.o i387.o bootflag.o e820.o\ | 9 | pci-dma.o i386_ksyms.o i387.o bootflag.o e820.o\ |
10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o \ | 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o |
11 | pi_sem_syscalls.o sem_syscalls.o srp_sem_syscalls.o | ||
12 | 11 | ||
13 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 12 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
14 | obj-y += cpu/ | 13 | obj-y += cpu/ |
diff --git a/arch/i386/kernel/pi_sem_syscalls.c b/arch/i386/kernel/pi_sem_syscalls.c deleted file mode 100644 index 66f79aa88a..0000000000 --- a/arch/i386/kernel/pi_sem_syscalls.c +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | #ifdef __KERNEL__ | ||
2 | |||
3 | /* | ||
4 | * SMP- and interrupt-safe semaphores, with priority inheritance. | ||
5 | * Much of the code here is borrowed from include/asm-i386/semaphore.h. | ||
6 | */ | ||
7 | |||
8 | #include <asm/atomic.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/wait.h> | ||
11 | #include <linux/sched_plugin.h> | ||
12 | |||
13 | #define MAX_PI_SEMAPHORES 256 | ||
14 | |||
15 | struct pi_semaphore pi_sems[MAX_PI_SEMAPHORES]; /* all PI sems */ | ||
16 | typedef int pi_sema_id; /* Userspace ID of a pi_semaphore */ | ||
17 | |||
18 | /* Initialize PI semaphores at boot time. */ | ||
19 | static int __init pi_sema_boot_init(void) | ||
20 | { | ||
21 | pi_sema_id sem_id; | ||
22 | |||
23 | printk("Initializing PI semaphores..."); | ||
24 | for (sem_id = 0; sem_id < MAX_PI_SEMAPHORES; sem_id++) | ||
25 | pi_sems[sem_id].used = 0; | ||
26 | printk(" done!\n"); | ||
27 | |||
28 | return 0; | ||
29 | } | ||
30 | __initcall(pi_sema_boot_init); | ||
31 | |||
32 | /* Find a free semaphore and return. */ | ||
33 | asmlinkage long sys_pi_sema_init (void) | ||
34 | { | ||
35 | pi_sema_id sem_id; | ||
36 | |||
37 | for (sem_id = 0; sem_id < MAX_PI_SEMAPHORES; sem_id++) { | ||
38 | if (!cmpxchg(&pi_sems[sem_id].used, 0, 1)) { | ||
39 | atomic_set(&pi_sems[sem_id].count, 1); | ||
40 | pi_sems[sem_id].sleepers = 0; | ||
41 | init_waitqueue_head(&pi_sems[sem_id].wait); | ||
42 | pi_sems[sem_id].hp_sem_task = NULL; | ||
43 | pi_sems[sem_id].holder = NULL; | ||
44 | return sem_id; | ||
45 | } | ||
46 | } | ||
47 | return -ENOMEM; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * This is ugly, but we want the default case to fall through. | ||
52 | * "__down_failed" is a special asm handler that calls the C | ||
53 | * routine that actually waits. See arch/i386/kernel/semaphore.c | ||
54 | */ | ||
55 | void __sys_pi_down(struct pi_semaphore * sem) | ||
56 | { | ||
57 | might_sleep(); | ||
58 | /* Checking for active waitqueue gives others a chance... */ | ||
59 | if (atomic_dec_return(&sem->count) < 0 || waitqueue_active(&sem->wait)) | ||
60 | __pi_down(sem); | ||
61 | } | ||
62 | |||
63 | asmlinkage long sys_pi_down(pi_sema_id sem_id) | ||
64 | { | ||
65 | unsigned long flags; | ||
66 | |||
67 | if (sem_id < 0 || sem_id >= MAX_PI_SEMAPHORES) | ||
68 | return -EINVAL; | ||
69 | |||
70 | __sys_pi_down(&pi_sems[sem_id]); | ||
71 | |||
72 | /* Update inherited priority if required. */ | ||
73 | spin_lock_irqsave(&pi_sems[sem_id].wait.lock, flags); | ||
74 | if (!pi_sems[sem_id].holder) { | ||
75 | pi_sems[sem_id].holder = current; | ||
76 | curr_sched_plugin->inherit_priority(&pi_sems[sem_id], NULL); | ||
77 | } | ||
78 | spin_unlock_irqrestore(&pi_sems[sem_id].wait.lock, flags); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * We always jump to wake people and update semaphore priority. | ||
84 | * If the wait queue is empty, semaphore and priority update will | ||
85 | * still operate correctly. | ||
86 | */ | ||
87 | void __sys_pi_up(struct pi_semaphore * sem) | ||
88 | { | ||
89 | atomic_inc(&sem->count); | ||
90 | __pi_up(sem); | ||
91 | } | ||
92 | |||
93 | asmlinkage long sys_pi_up(pi_sema_id sem_id) | ||
94 | { | ||
95 | if (sem_id < 0 || sem_id >= MAX_PI_SEMAPHORES) | ||
96 | return -EINVAL; | ||
97 | |||
98 | __sys_pi_up(&pi_sems[sem_id]); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /* Clear wait queue and wakeup waiting tasks, and free semaphore. */ | ||
103 | asmlinkage long sys_pi_sema_free(pi_sema_id sem_id) | ||
104 | { | ||
105 | struct list_head *tmp, *next; | ||
106 | unsigned long flags; | ||
107 | |||
108 | if (sem_id < 0 || sem_id >= MAX_PI_SEMAPHORES) | ||
109 | return -EINVAL; | ||
110 | |||
111 | spin_lock_irqsave(&pi_sems[sem_id].wait.lock, flags); | ||
112 | if (waitqueue_active(&pi_sems[sem_id].wait)) { | ||
113 | list_for_each_safe(tmp, next, | ||
114 | &pi_sems[sem_id].wait.task_list) { | ||
115 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
116 | task_list); | ||
117 | list_del(tmp); | ||
118 | set_rt_flags((struct task_struct*)curr->private, | ||
119 | RT_F_EXIT_SEM); | ||
120 | curr->func(curr, | ||
121 | TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | ||
122 | 0, NULL); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | spin_unlock_irqrestore(&pi_sems[sem_id].wait.lock, flags); | ||
127 | pi_sems[sem_id].used = 0; | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | #endif | ||
diff --git a/arch/i386/kernel/sem_syscalls.c b/arch/i386/kernel/sem_syscalls.c deleted file mode 100644 index 4ca92c9be5..0000000000 --- a/arch/i386/kernel/sem_syscalls.c +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* | ||
2 | * SMP- and interrupt-safe semaphores, using system calls from user space, | ||
3 | * so that they work with the LSO. Uses semaphores as described in | ||
4 | * include/asm-i386/semaphore.h. | ||
5 | */ | ||
6 | |||
7 | #include <asm/semaphore.h> | ||
8 | #include <linux/sched.h> | ||
9 | |||
10 | #define MAX_SEMAPHORES 256 | ||
11 | |||
12 | struct semaphore sems[MAX_SEMAPHORES]; /* all sems */ | ||
13 | typedef int sema_id; /* Userspace ID of a semaphore */ | ||
14 | |||
15 | /* Initialize semaphores at boot time. */ | ||
16 | static int __init sema_boot_init(void) | ||
17 | { | ||
18 | sema_id sem_id; | ||
19 | |||
20 | printk("Initializing semaphores..."); | ||
21 | for (sem_id = 0; sem_id < MAX_SEMAPHORES; sem_id++) | ||
22 | sems[sem_id].used = 0; | ||
23 | printk(" done!\n"); | ||
24 | |||
25 | return 0; | ||
26 | } | ||
27 | __initcall(sema_boot_init); | ||
28 | |||
29 | /* Find a free semaphore and return. */ | ||
30 | asmlinkage long sys_sema_init (void) | ||
31 | { | ||
32 | sema_id sem_id; | ||
33 | |||
34 | for (sem_id = 0; sem_id < MAX_SEMAPHORES; sem_id++) { | ||
35 | if (!cmpxchg(&sems[sem_id].used, 0, 1)) { | ||
36 | sema_init(&sems[sem_id], 1); | ||
37 | return sem_id; | ||
38 | } | ||
39 | } | ||
40 | return -ENOMEM; | ||
41 | } | ||
42 | |||
43 | asmlinkage long sys_down(sema_id sem_id) | ||
44 | { | ||
45 | if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) | ||
46 | return -EINVAL; | ||
47 | |||
48 | /* This allows for FIFO sems and gives others a chance... */ | ||
49 | if (waitqueue_active(&sems[sem_id].wait)) | ||
50 | __down(&sems[sem_id]); | ||
51 | else | ||
52 | down(&sems[sem_id]); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | asmlinkage long sys_up(sema_id sem_id) | ||
57 | { | ||
58 | if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) | ||
59 | return -EINVAL; | ||
60 | |||
61 | up(&sems[sem_id]); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | asmlinkage long sys_sema_free(sema_id sem_id) | ||
66 | { | ||
67 | struct list_head *tmp, *next; | ||
68 | unsigned long flags; | ||
69 | |||
70 | if (sem_id < 0 || sem_id >= MAX_SEMAPHORES) | ||
71 | return -EINVAL; | ||
72 | |||
73 | spin_lock_irqsave(&sems[sem_id].wait.lock, flags); | ||
74 | if (waitqueue_active(&sems[sem_id].wait)) { | ||
75 | list_for_each_safe(tmp, next, &sems[sem_id].wait.task_list) { | ||
76 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
77 | task_list); | ||
78 | list_del(tmp); | ||
79 | set_rt_flags((struct task_struct*)curr->private, | ||
80 | RT_F_EXIT_SEM); | ||
81 | curr->func(curr, | ||
82 | TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | ||
83 | 0, NULL); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | spin_unlock_irqrestore(&sems[sem_id].wait.lock, flags); | ||
88 | sems[sem_id].used = 0; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
diff --git a/arch/i386/kernel/srp_sem_syscalls.c b/arch/i386/kernel/srp_sem_syscalls.c deleted file mode 100644 index a07c36c7bf..0000000000 --- a/arch/i386/kernel/srp_sem_syscalls.c +++ /dev/null | |||
@@ -1,250 +0,0 @@ | |||
1 | #ifdef __KERNEL__ | ||
2 | |||
3 | /* | ||
4 | * Uniprocessor SRP "semaphores". | ||
5 | */ | ||
6 | |||
7 | #include <linux/sched.h> | ||
8 | #include <linux/queuelock.h> | ||
9 | #include <linux/sched_plugin.h> | ||
10 | |||
11 | #define MAX_SRP_SEMAPHORES 256 | ||
12 | |||
13 | /* struct for uniprocessor SRP "semaphore" */ | ||
14 | struct srp_semaphore { | ||
15 | struct task_struct *pc_task; /* task representing prio ceil of sem */ | ||
16 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
17 | int claimed; /* is the resource claimed (ceiling should be used)? */ | ||
18 | int used; /* is the semaphore being used? */ | ||
19 | }; | ||
20 | |||
21 | struct srp_semaphore srp_sems[MAX_SRP_SEMAPHORES]; /* all SRP sems */ | ||
22 | typedef int srp_sema_id; /* Userspace ID of a srp_semaphore */ | ||
23 | |||
24 | /* System-wide priority ceiling, represented as a pointer to a task. */ | ||
25 | DEFINE_PER_CPU(struct task_struct *, spc_tasks); | ||
26 | DEFINE_PER_CPU(wait_queue_head_t, spc_waitqueues); | ||
27 | |||
28 | /* Used to serialize access to SRP semaphores and system priority ceiling. */ | ||
29 | static queuelock_t srp_lock; | ||
30 | |||
31 | /* Initialize SRP semaphores at boot time. */ | ||
32 | static int __init srp_sema_boot_init(void) | ||
33 | { | ||
34 | srp_sema_id sem_id; | ||
35 | int i; | ||
36 | |||
37 | printk("Initializing SRP semaphores..."); | ||
38 | for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) { | ||
39 | srp_sems[sem_id].used = 0; | ||
40 | srp_sems[sem_id].claimed = 0; | ||
41 | srp_sems[sem_id].cpu = -1; | ||
42 | } | ||
43 | for (i = 0; i < NR_CPUS; i++) { | ||
44 | per_cpu(spc_tasks, i) = NULL; | ||
45 | init_waitqueue_head(&per_cpu(spc_waitqueues, i)); | ||
46 | } | ||
47 | queue_lock_init(&srp_lock); | ||
48 | printk(" done!\n"); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | __initcall(srp_sema_boot_init); | ||
53 | |||
54 | /* Find a free semaphore and return. */ | ||
55 | asmlinkage long sys_srp_sema_init (void) | ||
56 | { | ||
57 | srp_sema_id sem_id; | ||
58 | |||
59 | for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) { | ||
60 | if (!cmpxchg(&srp_sems[sem_id].used, 0, 1)) { | ||
61 | srp_sems[sem_id].pc_task = NULL; | ||
62 | return sem_id; | ||
63 | } | ||
64 | } | ||
65 | return -ENOMEM; | ||
66 | } | ||
67 | |||
68 | /* SRP task priority comparison function. Smaller periods have highest | ||
69 | * priority, tie-break is PID. | ||
70 | */ | ||
71 | int srp_higher_prio(struct task_struct* first, struct task_struct* second) | ||
72 | { | ||
73 | return !second || !is_realtime(second) || | ||
74 | get_rt_period(first) < get_rt_period(second) || | ||
75 | (get_rt_period(first) == get_rt_period(second) && | ||
76 | first->pid < second->pid); | ||
77 | } | ||
78 | |||
79 | /* Adjust the system-wide priority ceiling if resource is claimed. */ | ||
80 | asmlinkage long sys_srp_down(srp_sema_id sem_id) | ||
81 | { | ||
82 | unsigned long flags; | ||
83 | int cpu = smp_processor_id(); | ||
84 | |||
85 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || | ||
86 | srp_sems[sem_id].cpu != cpu) | ||
87 | return -EINVAL; | ||
88 | |||
89 | queue_lock_irqsave(&srp_lock, flags); | ||
90 | srp_sems[sem_id].claimed = 1; | ||
91 | if (srp_higher_prio(srp_sems[sem_id].pc_task, | ||
92 | __get_cpu_var(spc_tasks))) { | ||
93 | __get_cpu_var(spc_tasks) = srp_sems[sem_id].pc_task; | ||
94 | } | ||
95 | queue_unlock_irqrestore(&srp_lock, flags); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* Adjust the system-wide priority ceiling if resource is freed. */ | ||
101 | asmlinkage long sys_srp_up(srp_sema_id sem_id) | ||
102 | { | ||
103 | srp_sema_id sem_ctr; | ||
104 | struct list_head *tmp, *next; | ||
105 | unsigned long flags; | ||
106 | int cpu = smp_processor_id(); | ||
107 | |||
108 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || | ||
109 | srp_sems[sem_id].cpu != cpu) | ||
110 | return -EINVAL; | ||
111 | |||
112 | queue_lock_irqsave(&srp_lock, flags); | ||
113 | |||
114 | /* Determine new system priority ceiling for this CPU. */ | ||
115 | srp_sems[sem_id].claimed = 0; | ||
116 | __get_cpu_var(spc_tasks) = NULL; | ||
117 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) | ||
118 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && | ||
119 | srp_sems[sem_ctr].cpu == cpu && | ||
120 | srp_higher_prio(srp_sems[sem_ctr].pc_task, | ||
121 | __get_cpu_var(spc_tasks))) | ||
122 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; | ||
123 | |||
124 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
125 | if (waitqueue_active(&__get_cpu_var(spc_waitqueues))) { | ||
126 | list_for_each_safe(tmp, next, | ||
127 | &__get_cpu_var(spc_waitqueues).task_list) { | ||
128 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
129 | task_list); | ||
130 | set_rt_flags((struct task_struct*)curr->private, | ||
131 | RT_F_EXIT_SEM); | ||
132 | |||
133 | if (srp_higher_prio((struct task_struct*)curr->private, | ||
134 | __get_cpu_var(spc_tasks))) | ||
135 | curr->func(curr, | ||
136 | TASK_UNINTERRUPTIBLE | | ||
137 | TASK_INTERRUPTIBLE, 0, NULL); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | queue_unlock_irqrestore(&srp_lock, flags); | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | /* Indicate that task will use a resource associated with a given | ||
146 | * semaphore. Should be done *a priori* before RT task system is | ||
147 | * executed, so this does *not* update the system priority | ||
148 | * ceiling! (The ceiling would be meaningless anyway, as the SRP | ||
149 | * breaks without this a priori knowledge.) | ||
150 | */ | ||
151 | asmlinkage long sys_reg_task_srp_sem(srp_sema_id sem_id, pid_t t_pid) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | struct pid *task_pid; | ||
155 | struct task_struct *t; | ||
156 | |||
157 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES) | ||
158 | return -EINVAL; | ||
159 | |||
160 | task_pid = find_get_pid(t_pid); | ||
161 | if (!task_pid) | ||
162 | return -EINVAL; | ||
163 | |||
164 | t = get_pid_task(task_pid, PIDTYPE_PID); | ||
165 | if (!t) | ||
166 | return -EINVAL; | ||
167 | |||
168 | queue_lock_irqsave(&srp_lock, flags); | ||
169 | if (srp_sems[sem_id].cpu == -1) | ||
170 | srp_sems[sem_id].cpu = get_partition(t); | ||
171 | else if (srp_sems[sem_id].cpu != get_partition(t)) { | ||
172 | queue_unlock_irqrestore(&srp_lock, flags); | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | if (srp_higher_prio(t, srp_sems[sem_id].pc_task)) | ||
176 | srp_sems[sem_id].pc_task = t; | ||
177 | queue_unlock_irqrestore(&srp_lock, flags); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | /* Wait for current task priority to exceed system-wide priority ceiling. | ||
183 | * Can be used to determine when it is safe to run a job after its release. | ||
184 | */ | ||
185 | void wait_until_exceed_spc(void) | ||
186 | { | ||
187 | struct task_struct *tsk = current; | ||
188 | DECLARE_WAITQUEUE(wait, tsk); | ||
189 | unsigned long flags; | ||
190 | |||
191 | queue_lock_irqsave(&srp_lock, flags); | ||
192 | add_wait_queue_exclusive_locked(&__get_cpu_var(spc_waitqueues), &wait); | ||
193 | while(!srp_higher_prio(tsk, __get_cpu_var(spc_tasks))) { | ||
194 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
195 | queue_unlock_irqrestore(&srp_lock, flags); | ||
196 | schedule(); | ||
197 | queue_lock_irqsave(&srp_lock, flags); | ||
198 | } | ||
199 | remove_wait_queue_locked(&__get_cpu_var(spc_waitqueues), &wait); | ||
200 | queue_unlock_irqrestore(&srp_lock, flags); | ||
201 | } | ||
202 | |||
203 | /* Free semaphore, adjusting the system-wide priority ceiling if necessary. */ | ||
204 | asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) | ||
205 | { | ||
206 | srp_sema_id sem_ctr; | ||
207 | struct list_head *tmp, *next; | ||
208 | unsigned long flags; | ||
209 | int cpu = smp_processor_id(); | ||
210 | |||
211 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || | ||
212 | srp_sems[sem_id].cpu != cpu) | ||
213 | return -EINVAL; | ||
214 | |||
215 | queue_lock_irqsave(&srp_lock, flags); | ||
216 | srp_sems[sem_id].claimed = 0; | ||
217 | srp_sems[sem_id].used = 0; | ||
218 | |||
219 | /* Determine new system priority ceiling for this CPU. */ | ||
220 | __get_cpu_var(spc_tasks) = NULL; | ||
221 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) | ||
222 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && | ||
223 | srp_sems[sem_ctr].cpu == cpu && | ||
224 | srp_higher_prio(srp_sems[sem_ctr].pc_task, | ||
225 | __get_cpu_var(spc_tasks))) | ||
226 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; | ||
227 | srp_sems[sem_id].cpu = -1; | ||
228 | |||
229 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
230 | if (waitqueue_active(&__get_cpu_var(spc_waitqueues))) { | ||
231 | list_for_each_safe(tmp, next, | ||
232 | &__get_cpu_var(spc_waitqueues).task_list) { | ||
233 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
234 | task_list); | ||
235 | set_rt_flags((struct task_struct*)curr->private, | ||
236 | RT_F_EXIT_SEM); | ||
237 | |||
238 | if (srp_higher_prio((struct task_struct*)curr->private, | ||
239 | __get_cpu_var(spc_tasks))) | ||
240 | curr->func(curr, | ||
241 | TASK_UNINTERRUPTIBLE | | ||
242 | TASK_INTERRUPTIBLE, 0, NULL); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | queue_unlock_irqrestore(&srp_lock, flags); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | #endif | ||