diff options
author | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-05-02 15:18:16 -0400 |
---|---|---|
committer | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-05-02 15:18:16 -0400 |
commit | 86afe70125df4ee62cef85cbb035c8c9ab353885 (patch) | |
tree | 6e4bed7e5611c091bd2c507218410a6de9ed0217 /arch | |
parent | af47b4772ce86d29d941cff94dfd56526118e9a2 (diff) |
Fixed SRP so that it uses per-processor priority ceilings.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/srp_sem_syscalls.c | 52 |
1 files changed, 38 insertions, 14 deletions
diff --git a/arch/i386/kernel/srp_sem_syscalls.c b/arch/i386/kernel/srp_sem_syscalls.c index 24436a9bc8..9c73325cf8 100644 --- a/arch/i386/kernel/srp_sem_syscalls.c +++ b/arch/i386/kernel/srp_sem_syscalls.c | |||
@@ -13,6 +13,7 @@ | |||
13 | /* struct for uniprocessor SRP "semaphore" */ | 13 | /* struct for uniprocessor SRP "semaphore" */ |
14 | struct srp_semaphore { | 14 | struct srp_semaphore { |
15 | struct task_struct *pc_task; /* task representing prio ceil of sem */ | 15 | struct task_struct *pc_task; /* task representing prio ceil of sem */ |
16 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
16 | int claimed; /* is the resource claimed (ceiling should be used)? */ | 17 | int claimed; /* is the resource claimed (ceiling should be used)? */ |
17 | int used; /* is the semaphore being used? */ | 18 | int used; /* is the semaphore being used? */ |
18 | }; | 19 | }; |
@@ -21,7 +22,7 @@ struct srp_semaphore srp_sems[MAX_SRP_SEMAPHORES]; /* all SRP sems */ | |||
21 | typedef int srp_sema_id; /* Userspace ID of a srp_semaphore */ | 22 | typedef int srp_sema_id; /* Userspace ID of a srp_semaphore */ |
22 | 23 | ||
23 | /* System-wide priority ceiling, represented as a pointer to a task. */ | 24 | /* System-wide priority ceiling, represented as a pointer to a task. */ |
24 | struct task_struct *spc_task = NULL; | 25 | DEFINE_PER_CPU(struct task_struct *, spc_tasks); |
25 | 26 | ||
26 | /* Used to serialize access to SRP semaphores and system priority ceiling. */ | 27 | /* Used to serialize access to SRP semaphores and system priority ceiling. */ |
27 | static queuelock_t srp_lock; | 28 | static queuelock_t srp_lock; |
@@ -30,12 +31,16 @@ static queuelock_t srp_lock; | |||
30 | static int __init srp_sema_boot_init(void) | 31 | static int __init srp_sema_boot_init(void) |
31 | { | 32 | { |
32 | srp_sema_id sem_id; | 33 | srp_sema_id sem_id; |
34 | int i; | ||
33 | 35 | ||
34 | printk("Initializing SRP semaphores..."); | 36 | printk("Initializing SRP semaphores..."); |
35 | for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) { | 37 | for (sem_id = 0; sem_id < MAX_SRP_SEMAPHORES; sem_id++) { |
36 | srp_sems[sem_id].used = 0; | 38 | srp_sems[sem_id].used = 0; |
37 | srp_sems[sem_id].claimed = 0; | 39 | srp_sems[sem_id].claimed = 0; |
40 | srp_sems[sem_id].cpu = -1; | ||
38 | } | 41 | } |
42 | for (i = 0; i < NR_CPUS; i++) | ||
43 | per_cpu(spc_tasks, i) = NULL; | ||
39 | queue_lock_init(&srp_lock); | 44 | queue_lock_init(&srp_lock); |
40 | printk(" complete!\n"); | 45 | printk(" complete!\n"); |
41 | 46 | ||
@@ -72,14 +77,17 @@ int srp_higher_prio(struct task_struct* first, struct task_struct* second) | |||
72 | asmlinkage long sys_srp_down(srp_sema_id sem_id) | 77 | asmlinkage long sys_srp_down(srp_sema_id sem_id) |
73 | { | 78 | { |
74 | unsigned long flags; | 79 | unsigned long flags; |
80 | int cpu = smp_processor_id(); | ||
75 | 81 | ||
76 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES) | 82 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || |
83 | srp_sems[sem_id].cpu != cpu) | ||
77 | return -EINVAL; | 84 | return -EINVAL; |
78 | 85 | ||
79 | queue_lock_irqsave(&srp_lock, flags); | 86 | queue_lock_irqsave(&srp_lock, flags); |
80 | srp_sems[sem_id].claimed = 1; | 87 | srp_sems[sem_id].claimed = 1; |
81 | if (srp_higher_prio(srp_sems[sem_id].pc_task, spc_task)) { | 88 | if (srp_higher_prio(srp_sems[sem_id].pc_task, |
82 | spc_task = srp_sems[sem_id].pc_task; | 89 | __get_cpu_var(spc_tasks))) { |
90 | __get_cpu_var(spc_tasks) = srp_sems[sem_id].pc_task; | ||
83 | } | 91 | } |
84 | queue_unlock_irqrestore(&srp_lock, flags); | 92 | queue_unlock_irqrestore(&srp_lock, flags); |
85 | 93 | ||
@@ -91,17 +99,21 @@ asmlinkage long sys_srp_up(srp_sema_id sem_id) | |||
91 | { | 99 | { |
92 | srp_sema_id sem_ctr; | 100 | srp_sema_id sem_ctr; |
93 | unsigned long flags; | 101 | unsigned long flags; |
102 | int cpu = smp_processor_id(); | ||
94 | 103 | ||
95 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES) | 104 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || |
105 | srp_sems[sem_id].cpu != cpu) | ||
96 | return -EINVAL; | 106 | return -EINVAL; |
97 | 107 | ||
98 | queue_lock_irqsave(&srp_lock, flags); | 108 | queue_lock_irqsave(&srp_lock, flags); |
99 | srp_sems[sem_id].claimed = 0; | 109 | srp_sems[sem_id].claimed = 0; |
100 | spc_task = NULL; | 110 | __get_cpu_var(spc_tasks) = NULL; |
101 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) | 111 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) |
102 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && | 112 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && |
103 | srp_higher_prio(srp_sems[sem_ctr].pc_task, spc_task)) | 113 | srp_sems[sem_ctr].cpu == cpu && |
104 | spc_task = srp_sems[sem_ctr].pc_task; | 114 | srp_higher_prio(srp_sems[sem_ctr].pc_task, |
115 | __get_cpu_var(spc_tasks))) | ||
116 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; | ||
105 | queue_unlock_irqrestore(&srp_lock, flags); | 117 | queue_unlock_irqrestore(&srp_lock, flags); |
106 | 118 | ||
107 | return 0; | 119 | return 0; |
@@ -131,6 +143,12 @@ asmlinkage long sys_reg_task_srp_sem(srp_sema_id sem_id, pid_t t_pid) | |||
131 | return -EINVAL; | 143 | return -EINVAL; |
132 | 144 | ||
133 | queue_lock_irqsave(&srp_lock, flags); | 145 | queue_lock_irqsave(&srp_lock, flags); |
146 | if (srp_sems[sem_id] == -1) | ||
147 | srp_sems[sem_id] = get_partition(t); | ||
148 | else if (srp_sems[sem_id] != get_partition(t)) { | ||
149 | queue_unlock_irqrestore(&srp_lock, flags); | ||
150 | return -EINVAL; | ||
151 | } | ||
134 | if (srp_higher_prio(t, srp_sems[sem_id].pc_task)) | 152 | if (srp_higher_prio(t, srp_sems[sem_id].pc_task)) |
135 | srp_sems[sem_id].pc_task = t; | 153 | srp_sems[sem_id].pc_task = t; |
136 | queue_unlock_irqrestore(&srp_lock, flags); | 154 | queue_unlock_irqrestore(&srp_lock, flags); |
@@ -146,8 +164,9 @@ void wait_until_exceed_spc(void) | |||
146 | unsigned long flags; | 164 | unsigned long flags; |
147 | 165 | ||
148 | queue_lock_irqsave(&srp_lock, flags); | 166 | queue_lock_irqsave(&srp_lock, flags); |
149 | while(!srp_higher_prio(current, spc_task)) { | 167 | while(!srp_higher_prio(current, __get_cpu_var(spc_tasks))) { |
150 | queue_unlock_irqrestore(&srp_lock, flags); schedule(); | 168 | queue_unlock_irqrestore(&srp_lock, flags); |
169 | schedule(); | ||
151 | queue_lock_irqsave(&srp_lock, flags); | 170 | queue_lock_irqsave(&srp_lock, flags); |
152 | } | 171 | } |
153 | queue_unlock_irqrestore(&srp_lock, flags); | 172 | queue_unlock_irqrestore(&srp_lock, flags); |
@@ -158,18 +177,23 @@ asmlinkage long sys_srp_sema_free(srp_sema_id sem_id) | |||
158 | { | 177 | { |
159 | srp_sema_id sem_ctr; | 178 | srp_sema_id sem_ctr; |
160 | unsigned long flags; | 179 | unsigned long flags; |
180 | int cpu = smp_processor_id(); | ||
161 | 181 | ||
162 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES) | 182 | if (sem_id < 0 || sem_id >= MAX_SRP_SEMAPHORES || |
183 | srp_sems[sem_id].cpu != cpu) | ||
163 | return -EINVAL; | 184 | return -EINVAL; |
164 | 185 | ||
165 | queue_lock_irqsave(&srp_lock, flags); | 186 | queue_lock_irqsave(&srp_lock, flags); |
166 | srp_sems[sem_id].claimed = 0; | 187 | srp_sems[sem_id].claimed = 0; |
167 | srp_sems[sem_id].used = 0; | 188 | srp_sems[sem_id].used = 0; |
168 | spc_task = NULL; | 189 | __get_cpu_var(spc_tasks) = NULL; |
169 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) | 190 | for (sem_ctr = 0; sem_ctr < MAX_SRP_SEMAPHORES; sem_ctr++) |
170 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && | 191 | if (srp_sems[sem_ctr].used && srp_sems[sem_ctr].claimed && |
171 | srp_higher_prio(srp_sems[sem_ctr].pc_task, spc_task)) | 192 | srp_sems[sem_ctr].cpu == cpu && |
172 | spc_task = srp_sems[sem_ctr].pc_task; | 193 | srp_higher_prio(srp_sems[sem_ctr].pc_task, |
194 | __get_cpu_var(spc_tasks))) | ||
195 | __get_cpu_var(spc_tasks) = srp_sems[sem_ctr].pc_task; | ||
196 | spc_task[sem_id].cpu = -1; | ||
173 | queue_unlock_irqrestore(&srp_lock, flags); | 197 | queue_unlock_irqrestore(&srp_lock, flags); |
174 | 198 | ||
175 | return 0; | 199 | return 0; |