diff options
-rw-r--r-- | include/litmus/litmus.h | 2 | ||||
-rw-r--r-- | include/litmus/srp.h | 28 | ||||
-rw-r--r-- | litmus/fdso.c | 4 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 45 | ||||
-rw-r--r-- | litmus/srp.c | 236 |
5 files changed, 211 insertions, 104 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 4a774a9e7acc..8971b25f23e6 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -115,7 +115,7 @@ static inline lt_t litmus_clock(void) | |||
115 | 115 | ||
116 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | 116 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); |
117 | 117 | ||
118 | #ifdef CONFIG_SRP | 118 | #ifdef CONFIG_LITMUS_LOCKING |
119 | void srp_ceiling_block(void); | 119 | void srp_ceiling_block(void); |
120 | #else | 120 | #else |
121 | #define srp_ceiling_block() /* nothing */ | 121 | #define srp_ceiling_block() /* nothing */ |
diff --git a/include/litmus/srp.h b/include/litmus/srp.h new file mode 100644 index 000000000000..c9a4552b2bf3 --- /dev/null +++ b/include/litmus/srp.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef LITMUS_SRP_H | ||
2 | #define LITMUS_SRP_H | ||
3 | |||
4 | struct srp_semaphore; | ||
5 | |||
6 | struct srp_priority { | ||
7 | struct list_head list; | ||
8 | unsigned int priority; | ||
9 | pid_t pid; | ||
10 | }; | ||
11 | #define list2prio(l) list_entry(l, struct srp_priority, list) | ||
12 | |||
13 | /* struct for uniprocessor SRP "semaphore" */ | ||
14 | struct srp_semaphore { | ||
15 | struct litmus_lock litmus_lock; | ||
16 | struct srp_priority ceiling; | ||
17 | struct task_struct* owner; | ||
18 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
19 | }; | ||
20 | |||
21 | /* map a task to its SRP preemption level priority */ | ||
22 | typedef unsigned int (*srp_prioritization_t)(struct task_struct* t); | ||
23 | /* Must be updated by each plugin that uses SRP.*/ | ||
24 | extern srp_prioritization_t get_srp_prio; | ||
25 | |||
26 | struct srp_semaphore* allocate_srp_semaphore(void); | ||
27 | |||
28 | #endif | ||
diff --git a/litmus/fdso.c b/litmus/fdso.c index faede9bcbbbf..209431f3ce11 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -19,11 +19,11 @@ | |||
19 | #include <litmus/fdso.h> | 19 | #include <litmus/fdso.h> |
20 | 20 | ||
21 | extern struct fdso_ops fmlp_sem_ops; | 21 | extern struct fdso_ops fmlp_sem_ops; |
22 | extern struct fdso_ops srp_sem_ops; | 22 | extern struct fdso_ops generic_lock_ops; |
23 | 23 | ||
24 | static const struct fdso_ops* fdso_ops[] = { | 24 | static const struct fdso_ops* fdso_ops[] = { |
25 | &fmlp_sem_ops, | 25 | &fmlp_sem_ops, |
26 | &srp_sem_ops, | 26 | &generic_lock_ops, /* SRP_SEM */ |
27 | }; | 27 | }; |
28 | 28 | ||
29 | static void* fdso_create(obj_type_t type) | 29 | static void* fdso_create(obj_type_t type) |
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 01f31e407082..c1e27960576b 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -435,6 +435,45 @@ static long psnedf_return_priority(struct pi_semaphore *sem) | |||
435 | 435 | ||
436 | #endif | 436 | #endif |
437 | 437 | ||
438 | #ifdef CONFIG_LITMUS_LOCKING | ||
439 | |||
440 | #include <litmus/fdso.h> | ||
441 | #include <litmus/srp.h> | ||
442 | |||
443 | static unsigned int psnedf_get_srp_prio(struct task_struct* t) | ||
444 | { | ||
445 | /* assumes implicit deadlines */ | ||
446 | return get_rt_period(t); | ||
447 | } | ||
448 | |||
449 | static long psnedf_activate_plugin(void) | ||
450 | { | ||
451 | get_srp_prio = psnedf_get_srp_prio; | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static long psnedf_allocate_lock(struct litmus_lock **lock, int type) | ||
456 | { | ||
457 | int err = -ENXIO; | ||
458 | struct srp_semaphore* srp; | ||
459 | |||
460 | switch (type) { | ||
461 | case SRP_SEM: | ||
462 | /* Baker's SRP */ | ||
463 | srp = allocate_srp_semaphore(); | ||
464 | if (srp) { | ||
465 | *lock = &srp->litmus_lock; | ||
466 | err = 0; | ||
467 | } else | ||
468 | err = -ENOMEM; | ||
469 | break; | ||
470 | }; | ||
471 | |||
472 | return err; | ||
473 | } | ||
474 | |||
475 | #endif | ||
476 | |||
438 | static long psnedf_admit_task(struct task_struct* tsk) | 477 | static long psnedf_admit_task(struct task_struct* tsk) |
439 | { | 478 | { |
440 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 479 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; |
@@ -450,7 +489,11 @@ static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = { | |||
450 | .schedule = psnedf_schedule, | 489 | .schedule = psnedf_schedule, |
451 | .task_wake_up = psnedf_task_wake_up, | 490 | .task_wake_up = psnedf_task_wake_up, |
452 | .task_block = psnedf_task_block, | 491 | .task_block = psnedf_task_block, |
453 | .admit_task = psnedf_admit_task | 492 | .admit_task = psnedf_admit_task, |
493 | #ifdef CONFIG_LITMUS_LOCKING | ||
494 | .allocate_lock = psnedf_allocate_lock, | ||
495 | .activate_plugin = psnedf_activate_plugin, | ||
496 | #endif | ||
454 | }; | 497 | }; |
455 | 498 | ||
456 | 499 | ||
diff --git a/litmus/srp.c b/litmus/srp.c index b4c171e79fd4..2ed4ec12a9d3 100644 --- a/litmus/srp.c +++ b/litmus/srp.c | |||
@@ -12,42 +12,25 @@ | |||
12 | #include <litmus/trace.h> | 12 | #include <litmus/trace.h> |
13 | 13 | ||
14 | 14 | ||
15 | #ifdef CONFIG_SRP | 15 | #ifdef CONFIG_LITMUS_LOCKING |
16 | 16 | ||
17 | struct srp_priority { | 17 | #include <litmus/srp.h> |
18 | struct list_head list; | ||
19 | unsigned int period; | ||
20 | pid_t pid; | ||
21 | }; | ||
22 | 18 | ||
23 | #define list2prio(l) list_entry(l, struct srp_priority, list) | 19 | srp_prioritization_t get_srp_prio; |
24 | |||
25 | /* SRP task priority comparison function. Smaller periods have highest | ||
26 | * priority, tie-break is PID. Special case: period == 0 <=> no priority | ||
27 | */ | ||
28 | static int srp_higher_prio(struct srp_priority* first, | ||
29 | struct srp_priority* second) | ||
30 | { | ||
31 | if (!first->period) | ||
32 | return 0; | ||
33 | else | ||
34 | return !second->period || | ||
35 | first->period < second->period || ( | ||
36 | first->period == second->period && | ||
37 | first->pid < second->pid); | ||
38 | } | ||
39 | 20 | ||
40 | struct srp { | 21 | struct srp { |
41 | struct list_head ceiling; | 22 | struct list_head ceiling; |
42 | wait_queue_head_t ceiling_blocked; | 23 | wait_queue_head_t ceiling_blocked; |
43 | }; | 24 | }; |
25 | #define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
26 | #define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
44 | 27 | ||
28 | #define UNDEF_SEM -2 | ||
45 | 29 | ||
46 | atomic_t srp_objects_in_use = ATOMIC_INIT(0); | 30 | atomic_t srp_objects_in_use = ATOMIC_INIT(0); |
47 | 31 | ||
48 | DEFINE_PER_CPU(struct srp, srp); | 32 | DEFINE_PER_CPU(struct srp, srp); |
49 | 33 | ||
50 | |||
51 | /* Initialize SRP semaphores at boot time. */ | 34 | /* Initialize SRP semaphores at boot time. */ |
52 | static int __init srp_init(void) | 35 | static int __init srp_init(void) |
53 | { | 36 | { |
@@ -64,30 +47,35 @@ static int __init srp_init(void) | |||
64 | } | 47 | } |
65 | module_init(srp_init); | 48 | module_init(srp_init); |
66 | 49 | ||
50 | /* SRP task priority comparison function. Smaller numeric values have higher | ||
51 | * priority, tie-break is PID. Special case: priority == 0 <=> no priority | ||
52 | */ | ||
53 | static int srp_higher_prio(struct srp_priority* first, | ||
54 | struct srp_priority* second) | ||
55 | { | ||
56 | if (!first->priority) | ||
57 | return 0; | ||
58 | else | ||
59 | return !second->priority || | ||
60 | first->priority < second->priority || ( | ||
61 | first->priority == second->priority && | ||
62 | first->pid < second->pid); | ||
63 | } | ||
67 | 64 | ||
68 | #define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
69 | |||
70 | |||
71 | #define UNDEF_SEM -2 | ||
72 | |||
73 | |||
74 | /* struct for uniprocessor SRP "semaphore" */ | ||
75 | struct srp_semaphore { | ||
76 | struct srp_priority ceiling; | ||
77 | struct task_struct* owner; | ||
78 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
79 | }; | ||
80 | |||
81 | #define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
82 | 65 | ||
83 | static int srp_exceeds_ceiling(struct task_struct* first, | 66 | static int srp_exceeds_ceiling(struct task_struct* first, |
84 | struct srp* srp) | 67 | struct srp* srp) |
85 | { | 68 | { |
86 | return list_empty(&srp->ceiling) || | 69 | struct srp_priority prio; |
87 | get_rt_period(first) < system_ceiling(srp)->period || | 70 | |
88 | (get_rt_period(first) == system_ceiling(srp)->period && | 71 | if (list_empty(&srp->ceiling)) |
89 | first->pid < system_ceiling(srp)->pid) || | 72 | return 1; |
90 | ceiling2sem(system_ceiling(srp))->owner == first; | 73 | else { |
74 | prio.pid = first->pid; | ||
75 | prio.priority = get_srp_prio(first); | ||
76 | return srp_higher_prio(&prio, system_ceiling(srp)) || | ||
77 | ceiling2sem(system_ceiling(srp))->owner == first; | ||
78 | } | ||
91 | } | 79 | } |
92 | 80 | ||
93 | static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | 81 | static void srp_add_prio(struct srp* srp, struct srp_priority* prio) |
@@ -108,85 +96,139 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | |||
108 | } | 96 | } |
109 | 97 | ||
110 | 98 | ||
111 | static void* create_srp_semaphore(obj_type_t type) | 99 | static int lock_srp_semaphore(struct litmus_lock* l) |
112 | { | 100 | { |
113 | struct srp_semaphore* sem; | 101 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); |
114 | 102 | ||
115 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | 103 | if (!is_realtime(current)) |
116 | if (!sem) | 104 | return -EPERM; |
117 | return NULL; | ||
118 | 105 | ||
119 | INIT_LIST_HEAD(&sem->ceiling.list); | 106 | preempt_disable(); |
120 | sem->ceiling.period = 0; | 107 | |
121 | sem->cpu = UNDEF_SEM; | 108 | /* Update ceiling. */ |
122 | sem->owner = NULL; | 109 | srp_add_prio(&__get_cpu_var(srp), &sem->ceiling); |
123 | atomic_inc(&srp_objects_in_use); | 110 | |
124 | return sem; | 111 | /* SRP invariant: all resources available */ |
112 | BUG_ON(sem->owner != NULL); | ||
113 | |||
114 | sem->owner = current; | ||
115 | TRACE_CUR("acquired srp 0x%p\n", sem); | ||
116 | |||
117 | preempt_enable(); | ||
118 | |||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static int unlock_srp_semaphore(struct litmus_lock* l) | ||
123 | { | ||
124 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
125 | int err = 0; | ||
126 | |||
127 | preempt_disable(); | ||
128 | |||
129 | if (sem->owner != current) { | ||
130 | err = -EINVAL; | ||
131 | } else { | ||
132 | /* Determine new system priority ceiling for this CPU. */ | ||
133 | BUG_ON(!in_list(&sem->ceiling.list)); | ||
134 | |||
135 | list_del(&sem->ceiling.list); | ||
136 | sem->owner = NULL; | ||
137 | |||
138 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
139 | TRACE_CUR("released srp 0x%p\n", sem); | ||
140 | wake_up_all(&__get_cpu_var(srp).ceiling_blocked); | ||
141 | } | ||
142 | |||
143 | preempt_enable(); | ||
144 | return err; | ||
125 | } | 145 | } |
126 | 146 | ||
127 | static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg) | 147 | static int open_srp_semaphore(struct litmus_lock* l, void* __user arg) |
128 | { | 148 | { |
129 | struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj; | 149 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); |
130 | int ret = 0; | 150 | int err = 0; |
131 | struct task_struct* t = current; | 151 | struct task_struct* t = current; |
132 | struct srp_priority t_prio; | 152 | struct srp_priority t_prio; |
133 | 153 | ||
134 | TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); | 154 | if (!is_realtime(t)) |
135 | if (!srp_active()) | 155 | return -EPERM; |
136 | return -EBUSY; | ||
137 | 156 | ||
138 | if (sem->cpu == UNDEF_SEM) | 157 | TRACE_CUR("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); |
139 | sem->cpu = get_partition(t); | ||
140 | else if (sem->cpu != get_partition(t)) | ||
141 | ret = -EPERM; | ||
142 | 158 | ||
143 | if (ret == 0) { | 159 | preempt_disable(); |
144 | t_prio.period = get_rt_period(t); | 160 | |
145 | t_prio.pid = t->pid; | 161 | if (sem->owner != NULL) |
162 | err = -EBUSY; | ||
163 | |||
164 | if (err == 0) { | ||
165 | if (sem->cpu == UNDEF_SEM) | ||
166 | sem->cpu = get_partition(t); | ||
167 | else if (sem->cpu != get_partition(t)) | ||
168 | err = -EPERM; | ||
169 | } | ||
170 | |||
171 | if (err == 0) { | ||
172 | t_prio.priority = get_srp_prio(t); | ||
173 | t_prio.pid = t->pid; | ||
146 | if (srp_higher_prio(&t_prio, &sem->ceiling)) { | 174 | if (srp_higher_prio(&t_prio, &sem->ceiling)) { |
147 | sem->ceiling.period = t_prio.period; | 175 | sem->ceiling.priority = t_prio.priority; |
148 | sem->ceiling.pid = t_prio.pid; | 176 | sem->ceiling.pid = t_prio.pid; |
149 | } | 177 | } |
150 | } | 178 | } |
151 | 179 | ||
152 | return ret; | 180 | preempt_enable(); |
181 | |||
182 | return err; | ||
183 | } | ||
184 | |||
185 | static int close_srp_semaphore(struct litmus_lock* l) | ||
186 | { | ||
187 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); | ||
188 | int err = 0; | ||
189 | |||
190 | preempt_disable(); | ||
191 | |||
192 | if (sem->owner == current) | ||
193 | unlock_srp_semaphore(l); | ||
194 | |||
195 | preempt_enable(); | ||
196 | |||
197 | return err; | ||
153 | } | 198 | } |
154 | 199 | ||
155 | static void destroy_srp_semaphore(obj_type_t type, void* sem) | 200 | static void deallocate_srp_semaphore(struct litmus_lock* l) |
156 | { | 201 | { |
157 | /* XXX invariants */ | 202 | struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock); |
158 | atomic_dec(&srp_objects_in_use); | 203 | atomic_dec(&srp_objects_in_use); |
159 | kfree(sem); | 204 | kfree(sem); |
160 | } | 205 | } |
161 | 206 | ||
162 | struct fdso_ops srp_sem_ops = { | 207 | static struct litmus_lock_ops srp_lock_ops = { |
163 | .create = create_srp_semaphore, | 208 | .open = open_srp_semaphore, |
164 | .open = open_srp_semaphore, | 209 | .close = close_srp_semaphore, |
165 | .destroy = destroy_srp_semaphore | 210 | .lock = lock_srp_semaphore, |
211 | .unlock = unlock_srp_semaphore, | ||
212 | .deallocate = deallocate_srp_semaphore, | ||
166 | }; | 213 | }; |
167 | 214 | ||
168 | 215 | struct srp_semaphore* allocate_srp_semaphore(void) | |
169 | static void do_srp_down(struct srp_semaphore* sem) | ||
170 | { | 216 | { |
171 | /* Update ceiling. */ | 217 | struct srp_semaphore* sem; |
172 | srp_add_prio(&__get_cpu_var(srp), &sem->ceiling); | ||
173 | WARN_ON(sem->owner != NULL); | ||
174 | sem->owner = current; | ||
175 | TRACE_CUR("acquired srp 0x%p\n", sem); | ||
176 | } | ||
177 | 218 | ||
178 | static void do_srp_up(struct srp_semaphore* sem) | 219 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); |
179 | { | 220 | if (!sem) |
180 | /* Determine new system priority ceiling for this CPU. */ | 221 | return NULL; |
181 | WARN_ON(!in_list(&sem->ceiling.list)); | ||
182 | if (in_list(&sem->ceiling.list)) | ||
183 | list_del(&sem->ceiling.list); | ||
184 | 222 | ||
185 | sem->owner = NULL; | 223 | INIT_LIST_HEAD(&sem->ceiling.list); |
224 | sem->ceiling.priority = 0; | ||
225 | sem->cpu = UNDEF_SEM; | ||
226 | sem->owner = NULL; | ||
227 | |||
228 | sem->litmus_lock.ops = &srp_lock_ops; | ||
186 | 229 | ||
187 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | 230 | atomic_inc(&srp_objects_in_use); |
188 | TRACE_CUR("released srp 0x%p\n", sem); | 231 | return sem; |
189 | wake_up_all(&__get_cpu_var(srp).ceiling_blocked); | ||
190 | } | 232 | } |
191 | 233 | ||
192 | static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | 234 | static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, |
@@ -202,8 +244,6 @@ static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | |||
202 | return 0; | 244 | return 0; |
203 | } | 245 | } |
204 | 246 | ||
205 | |||
206 | |||
207 | static void do_ceiling_block(struct task_struct *tsk) | 247 | static void do_ceiling_block(struct task_struct *tsk) |
208 | { | 248 | { |
209 | wait_queue_t wait = { | 249 | wait_queue_t wait = { |
@@ -223,6 +263,7 @@ static void do_ceiling_block(struct task_struct *tsk) | |||
223 | } | 263 | } |
224 | 264 | ||
225 | /* Wait for current task priority to exceed system-wide priority ceiling. | 265 | /* Wait for current task priority to exceed system-wide priority ceiling. |
266 | * FIXME: the hotpath should be inline. | ||
226 | */ | 267 | */ |
227 | void srp_ceiling_block(void) | 268 | void srp_ceiling_block(void) |
228 | { | 269 | { |
@@ -251,9 +292,4 @@ void srp_ceiling_block(void) | |||
251 | preempt_enable(); | 292 | preempt_enable(); |
252 | } | 293 | } |
253 | 294 | ||
254 | |||
255 | #else | ||
256 | |||
257 | struct fdso_ops srp_sem_ops = {}; | ||
258 | |||
259 | #endif | 295 | #endif |