aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/srp.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/srp.c')
-rw-r--r--litmus/srp.c236
1 files changed, 136 insertions, 100 deletions
diff --git a/litmus/srp.c b/litmus/srp.c
index b4c171e79fd4..2ed4ec12a9d3 100644
--- a/litmus/srp.c
+++ b/litmus/srp.c
@@ -12,42 +12,25 @@
12#include <litmus/trace.h> 12#include <litmus/trace.h>
13 13
14 14
15#ifdef CONFIG_SRP 15#ifdef CONFIG_LITMUS_LOCKING
16 16
17struct srp_priority { 17#include <litmus/srp.h>
18 struct list_head list;
19 unsigned int period;
20 pid_t pid;
21};
22 18
23#define list2prio(l) list_entry(l, struct srp_priority, list) 19srp_prioritization_t get_srp_prio;
24
25/* SRP task priority comparison function. Smaller periods have highest
26 * priority, tie-break is PID. Special case: period == 0 <=> no priority
27 */
28static int srp_higher_prio(struct srp_priority* first,
29 struct srp_priority* second)
30{
31 if (!first->period)
32 return 0;
33 else
34 return !second->period ||
35 first->period < second->period || (
36 first->period == second->period &&
37 first->pid < second->pid);
38}
39 20
40struct srp { 21struct srp {
41 struct list_head ceiling; 22 struct list_head ceiling;
42 wait_queue_head_t ceiling_blocked; 23 wait_queue_head_t ceiling_blocked;
43}; 24};
25#define system_ceiling(srp) list2prio(srp->ceiling.next)
26#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling)
44 27
28#define UNDEF_SEM -2
45 29
46atomic_t srp_objects_in_use = ATOMIC_INIT(0); 30atomic_t srp_objects_in_use = ATOMIC_INIT(0);
47 31
48DEFINE_PER_CPU(struct srp, srp); 32DEFINE_PER_CPU(struct srp, srp);
49 33
50
51/* Initialize SRP semaphores at boot time. */ 34/* Initialize SRP semaphores at boot time. */
52static int __init srp_init(void) 35static int __init srp_init(void)
53{ 36{
@@ -64,30 +47,35 @@ static int __init srp_init(void)
64} 47}
65module_init(srp_init); 48module_init(srp_init);
66 49
50/* SRP task priority comparison function. Smaller numeric values have higher
51 * priority, tie-break is PID. Special case: priority == 0 <=> no priority
52 */
53static int srp_higher_prio(struct srp_priority* first,
54 struct srp_priority* second)
55{
56 if (!first->priority)
57 return 0;
58 else
59 return !second->priority ||
60 first->priority < second->priority || (
61 first->priority == second->priority &&
62 first->pid < second->pid);
63}
67 64
68#define system_ceiling(srp) list2prio(srp->ceiling.next)
69
70
71#define UNDEF_SEM -2
72
73
74/* struct for uniprocessor SRP "semaphore" */
75struct srp_semaphore {
76 struct srp_priority ceiling;
77 struct task_struct* owner;
78 int cpu; /* cpu associated with this "semaphore" and resource */
79};
80
81#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling)
82 65
83static int srp_exceeds_ceiling(struct task_struct* first, 66static int srp_exceeds_ceiling(struct task_struct* first,
84 struct srp* srp) 67 struct srp* srp)
85{ 68{
86 return list_empty(&srp->ceiling) || 69 struct srp_priority prio;
87 get_rt_period(first) < system_ceiling(srp)->period || 70
88 (get_rt_period(first) == system_ceiling(srp)->period && 71 if (list_empty(&srp->ceiling))
89 first->pid < system_ceiling(srp)->pid) || 72 return 1;
90 ceiling2sem(system_ceiling(srp))->owner == first; 73 else {
74 prio.pid = first->pid;
75 prio.priority = get_srp_prio(first);
76 return srp_higher_prio(&prio, system_ceiling(srp)) ||
77 ceiling2sem(system_ceiling(srp))->owner == first;
78 }
91} 79}
92 80
93static void srp_add_prio(struct srp* srp, struct srp_priority* prio) 81static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
@@ -108,85 +96,139 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
108} 96}
109 97
110 98
111static void* create_srp_semaphore(obj_type_t type) 99static int lock_srp_semaphore(struct litmus_lock* l)
112{ 100{
113 struct srp_semaphore* sem; 101 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
114 102
115 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 103 if (!is_realtime(current))
116 if (!sem) 104 return -EPERM;
117 return NULL;
118 105
119 INIT_LIST_HEAD(&sem->ceiling.list); 106 preempt_disable();
120 sem->ceiling.period = 0; 107
121 sem->cpu = UNDEF_SEM; 108 /* Update ceiling. */
122 sem->owner = NULL; 109 srp_add_prio(&__get_cpu_var(srp), &sem->ceiling);
123 atomic_inc(&srp_objects_in_use); 110
124 return sem; 111 /* SRP invariant: all resources available */
112 BUG_ON(sem->owner != NULL);
113
114 sem->owner = current;
115 TRACE_CUR("acquired srp 0x%p\n", sem);
116
117 preempt_enable();
118
119 return 0;
120}
121
122static int unlock_srp_semaphore(struct litmus_lock* l)
123{
124 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
125 int err = 0;
126
127 preempt_disable();
128
129 if (sem->owner != current) {
130 err = -EINVAL;
131 } else {
132 /* Determine new system priority ceiling for this CPU. */
133 BUG_ON(!in_list(&sem->ceiling.list));
134
135 list_del(&sem->ceiling.list);
136 sem->owner = NULL;
137
138 /* Wake tasks on this CPU, if they exceed current ceiling. */
139 TRACE_CUR("released srp 0x%p\n", sem);
140 wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
141 }
142
143 preempt_enable();
144 return err;
125} 145}
126 146
127static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg) 147static int open_srp_semaphore(struct litmus_lock* l, void* __user arg)
128{ 148{
129 struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj; 149 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
130 int ret = 0; 150 int err = 0;
131 struct task_struct* t = current; 151 struct task_struct* t = current;
132 struct srp_priority t_prio; 152 struct srp_priority t_prio;
133 153
134 TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); 154 if (!is_realtime(t))
135 if (!srp_active()) 155 return -EPERM;
136 return -EBUSY;
137 156
138 if (sem->cpu == UNDEF_SEM) 157 TRACE_CUR("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu);
139 sem->cpu = get_partition(t);
140 else if (sem->cpu != get_partition(t))
141 ret = -EPERM;
142 158
143 if (ret == 0) { 159 preempt_disable();
144 t_prio.period = get_rt_period(t); 160
145 t_prio.pid = t->pid; 161 if (sem->owner != NULL)
162 err = -EBUSY;
163
164 if (err == 0) {
165 if (sem->cpu == UNDEF_SEM)
166 sem->cpu = get_partition(t);
167 else if (sem->cpu != get_partition(t))
168 err = -EPERM;
169 }
170
171 if (err == 0) {
172 t_prio.priority = get_srp_prio(t);
173 t_prio.pid = t->pid;
146 if (srp_higher_prio(&t_prio, &sem->ceiling)) { 174 if (srp_higher_prio(&t_prio, &sem->ceiling)) {
147 sem->ceiling.period = t_prio.period; 175 sem->ceiling.priority = t_prio.priority;
148 sem->ceiling.pid = t_prio.pid; 176 sem->ceiling.pid = t_prio.pid;
149 } 177 }
150 } 178 }
151 179
152 return ret; 180 preempt_enable();
181
182 return err;
183}
184
185static int close_srp_semaphore(struct litmus_lock* l)
186{
187 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
188 int err = 0;
189
190 preempt_disable();
191
192 if (sem->owner == current)
193 unlock_srp_semaphore(l);
194
195 preempt_enable();
196
197 return err;
153} 198}
154 199
155static void destroy_srp_semaphore(obj_type_t type, void* sem) 200static void deallocate_srp_semaphore(struct litmus_lock* l)
156{ 201{
157 /* XXX invariants */ 202 struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
158 atomic_dec(&srp_objects_in_use); 203 atomic_dec(&srp_objects_in_use);
159 kfree(sem); 204 kfree(sem);
160} 205}
161 206
162struct fdso_ops srp_sem_ops = { 207static struct litmus_lock_ops srp_lock_ops = {
163 .create = create_srp_semaphore, 208 .open = open_srp_semaphore,
164 .open = open_srp_semaphore, 209 .close = close_srp_semaphore,
165 .destroy = destroy_srp_semaphore 210 .lock = lock_srp_semaphore,
211 .unlock = unlock_srp_semaphore,
212 .deallocate = deallocate_srp_semaphore,
166}; 213};
167 214
168 215struct srp_semaphore* allocate_srp_semaphore(void)
169static void do_srp_down(struct srp_semaphore* sem)
170{ 216{
171 /* Update ceiling. */ 217 struct srp_semaphore* sem;
172 srp_add_prio(&__get_cpu_var(srp), &sem->ceiling);
173 WARN_ON(sem->owner != NULL);
174 sem->owner = current;
175 TRACE_CUR("acquired srp 0x%p\n", sem);
176}
177 218
178static void do_srp_up(struct srp_semaphore* sem) 219 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
179{ 220 if (!sem)
180 /* Determine new system priority ceiling for this CPU. */ 221 return NULL;
181 WARN_ON(!in_list(&sem->ceiling.list));
182 if (in_list(&sem->ceiling.list))
183 list_del(&sem->ceiling.list);
184 222
185 sem->owner = NULL; 223 INIT_LIST_HEAD(&sem->ceiling.list);
224 sem->ceiling.priority = 0;
225 sem->cpu = UNDEF_SEM;
226 sem->owner = NULL;
227
228 sem->litmus_lock.ops = &srp_lock_ops;
186 229
187 /* Wake tasks on this CPU, if they exceed current ceiling. */ 230 atomic_inc(&srp_objects_in_use);
188 TRACE_CUR("released srp 0x%p\n", sem); 231 return sem;
189 wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
190} 232}
191 233
192static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, 234static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync,
@@ -202,8 +244,6 @@ static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync,
202 return 0; 244 return 0;
203} 245}
204 246
205
206
207static void do_ceiling_block(struct task_struct *tsk) 247static void do_ceiling_block(struct task_struct *tsk)
208{ 248{
209 wait_queue_t wait = { 249 wait_queue_t wait = {
@@ -223,6 +263,7 @@ static void do_ceiling_block(struct task_struct *tsk)
223} 263}
224 264
225/* Wait for current task priority to exceed system-wide priority ceiling. 265/* Wait for current task priority to exceed system-wide priority ceiling.
266 * FIXME: the hotpath should be inline.
226 */ 267 */
227void srp_ceiling_block(void) 268void srp_ceiling_block(void)
228{ 269{
@@ -251,9 +292,4 @@ void srp_ceiling_block(void)
251 preempt_enable(); 292 preempt_enable();
252} 293}
253 294
254
255#else
256
257struct fdso_ops srp_sem_ops = {};
258
259#endif 295#endif