diff options
| author | Andrea Bastoni <bastoni@cs.unc.edu> | 2009-12-17 21:34:09 -0500 |
|---|---|---|
| committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2009-12-17 21:34:09 -0500 |
| commit | a2ac69aab6a363f3e450e4f54d72026dfcd2c72d (patch) | |
| tree | 4c3fffe16789f60da5584b511f90b6f1a732d34a /litmus | |
| parent | a18a4da0233492c15bb7b62a329061cf7dcce7a2 (diff) | |
Add Stack Resource Policy (SRP) support
Diffstat (limited to 'litmus')
| -rw-r--r-- | litmus/Kconfig | 13 | ||||
| -rw-r--r-- | litmus/Makefile | 1 | ||||
| -rw-r--r-- | litmus/fdso.c | 3 | ||||
| -rw-r--r-- | litmus/srp.c | 318 |
4 files changed, 334 insertions, 1 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index f8c642658a2f..31cd527c25ef 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
| @@ -1,5 +1,18 @@ | |||
| 1 | menu "LITMUS^RT" | 1 | menu "LITMUS^RT" |
| 2 | 2 | ||
| 3 | menu "Real-Time Synchronization" | ||
| 4 | |||
| 5 | config SRP | ||
| 6 | bool "Stack Resource Policy (SRP)" | ||
| 7 | default n | ||
| 8 | help | ||
| 9 | Include support for Baker's Stack Resource Policy. | ||
| 10 | |||
| 11 | Say Yes if you want FMLP local long | ||
| 12 | critical section synchronization support. | ||
| 13 | |||
| 14 | endmenu | ||
| 15 | |||
| 3 | menu "Tracing" | 16 | menu "Tracing" |
| 4 | 17 | ||
| 5 | config FEATHER_TRACE | 18 | config FEATHER_TRACE |
diff --git a/litmus/Makefile b/litmus/Makefile index 3d18cff62cee..612457fa773e 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
| @@ -8,6 +8,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
| 8 | rt_domain.o \ | 8 | rt_domain.o \ |
| 9 | edf_common.o \ | 9 | edf_common.o \ |
| 10 | fdso.o \ | 10 | fdso.o \ |
| 11 | srp.o \ | ||
| 11 | heap.o | 12 | heap.o |
| 12 | 13 | ||
| 13 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 14 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
diff --git a/litmus/fdso.c b/litmus/fdso.c index 323efac17a47..16e3a43a67d1 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | 18 | ||
| 19 | #include <litmus/fdso.h> | 19 | #include <litmus/fdso.h> |
| 20 | 20 | ||
| 21 | extern struct fdso_ops srp_sem_ops; | ||
| 21 | 22 | ||
| 22 | static struct fdso_ops dummy_ops = { | 23 | static struct fdso_ops dummy_ops = { |
| 23 | .create = NULL | 24 | .create = NULL |
| @@ -25,7 +26,7 @@ static struct fdso_ops dummy_ops = { | |||
| 25 | 26 | ||
| 26 | static const struct fdso_ops* fdso_ops[] = { | 27 | static const struct fdso_ops* fdso_ops[] = { |
| 27 | &dummy_ops, | 28 | &dummy_ops, |
| 28 | &dummy_ops, | 29 | &srp_sem_ops, |
| 29 | }; | 30 | }; |
| 30 | 31 | ||
| 31 | static void* fdso_create(obj_type_t type) | 32 | static void* fdso_create(obj_type_t type) |
diff --git a/litmus/srp.c b/litmus/srp.c new file mode 100644 index 000000000000..71639b991630 --- /dev/null +++ b/litmus/srp.c | |||
| @@ -0,0 +1,318 @@ | |||
| 1 | /* ************************************************************************** */ | ||
| 2 | /* STACK RESOURCE POLICY */ | ||
| 3 | /* ************************************************************************** */ | ||
| 4 | |||
| 5 | #include <asm/atomic.h> | ||
| 6 | #include <linux/wait.h> | ||
| 7 | #include <litmus/litmus.h> | ||
| 8 | #include <litmus/sched_plugin.h> | ||
| 9 | |||
| 10 | #include <litmus/fdso.h> | ||
| 11 | |||
| 12 | #include <litmus/trace.h> | ||
| 13 | |||
| 14 | |||
| 15 | #ifdef CONFIG_SRP | ||
| 16 | |||
| 17 | struct srp_priority { | ||
| 18 | struct list_head list; | ||
| 19 | unsigned int period; | ||
| 20 | pid_t pid; | ||
| 21 | }; | ||
| 22 | |||
| 23 | #define list2prio(l) list_entry(l, struct srp_priority, list) | ||
| 24 | |||
| 25 | /* SRP task priority comparison function. Smaller periods have highest | ||
| 26 | * priority, tie-break is PID. Special case: period == 0 <=> no priority | ||
| 27 | */ | ||
| 28 | static int srp_higher_prio(struct srp_priority* first, | ||
| 29 | struct srp_priority* second) | ||
| 30 | { | ||
| 31 | if (!first->period) | ||
| 32 | return 0; | ||
| 33 | else | ||
| 34 | return !second->period || | ||
| 35 | first->period < second->period || ( | ||
| 36 | first->period == second->period && | ||
| 37 | first->pid < second->pid); | ||
| 38 | } | ||
| 39 | |||
| 40 | struct srp { | ||
| 41 | struct list_head ceiling; | ||
| 42 | wait_queue_head_t ceiling_blocked; | ||
| 43 | }; | ||
| 44 | |||
| 45 | |||
| 46 | atomic_t srp_objects_in_use = ATOMIC_INIT(0); | ||
| 47 | |||
| 48 | DEFINE_PER_CPU(struct srp, srp); | ||
| 49 | |||
| 50 | |||
| 51 | /* Initialize SRP semaphores at boot time. */ | ||
| 52 | static int __init srp_init(void) | ||
| 53 | { | ||
| 54 | int i; | ||
| 55 | |||
| 56 | printk("Initializing SRP per-CPU ceilings..."); | ||
| 57 | for (i = 0; i < NR_CPUS; i++) { | ||
| 58 | init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked); | ||
| 59 | INIT_LIST_HEAD(&per_cpu(srp, i).ceiling); | ||
| 60 | } | ||
| 61 | printk(" done!\n"); | ||
| 62 | |||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | module_init(srp_init); | ||
| 66 | |||
| 67 | |||
| 68 | #define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
| 69 | |||
| 70 | |||
| 71 | #define UNDEF_SEM -2 | ||
| 72 | |||
| 73 | |||
| 74 | /* struct for uniprocessor SRP "semaphore" */ | ||
| 75 | struct srp_semaphore { | ||
| 76 | struct srp_priority ceiling; | ||
| 77 | struct task_struct* owner; | ||
| 78 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
| 79 | }; | ||
| 80 | |||
| 81 | #define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
| 82 | |||
| 83 | static int srp_exceeds_ceiling(struct task_struct* first, | ||
| 84 | struct srp* srp) | ||
| 85 | { | ||
| 86 | return list_empty(&srp->ceiling) || | ||
| 87 | get_rt_period(first) < system_ceiling(srp)->period || | ||
| 88 | (get_rt_period(first) == system_ceiling(srp)->period && | ||
| 89 | first->pid < system_ceiling(srp)->pid) || | ||
| 90 | ceiling2sem(system_ceiling(srp))->owner == first; | ||
| 91 | } | ||
| 92 | |||
| 93 | static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | ||
| 94 | { | ||
| 95 | struct list_head *pos; | ||
| 96 | if (in_list(&prio->list)) { | ||
| 97 | printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in " | ||
| 98 | "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio)); | ||
| 99 | return; | ||
| 100 | } | ||
| 101 | list_for_each(pos, &srp->ceiling) | ||
| 102 | if (unlikely(srp_higher_prio(prio, list2prio(pos)))) { | ||
| 103 | __list_add(&prio->list, pos->prev, pos); | ||
| 104 | return; | ||
| 105 | } | ||
| 106 | |||
| 107 | list_add_tail(&prio->list, &srp->ceiling); | ||
| 108 | } | ||
| 109 | |||
| 110 | |||
| 111 | static void* create_srp_semaphore(void) | ||
| 112 | { | ||
| 113 | struct srp_semaphore* sem; | ||
| 114 | |||
| 115 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
| 116 | if (!sem) | ||
| 117 | return NULL; | ||
| 118 | |||
| 119 | INIT_LIST_HEAD(&sem->ceiling.list); | ||
| 120 | sem->ceiling.period = 0; | ||
| 121 | sem->cpu = UNDEF_SEM; | ||
| 122 | sem->owner = NULL; | ||
| 123 | atomic_inc(&srp_objects_in_use); | ||
| 124 | return sem; | ||
| 125 | } | ||
| 126 | |||
| 127 | static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
| 128 | { | ||
| 129 | struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj; | ||
| 130 | int ret = 0; | ||
| 131 | struct task_struct* t = current; | ||
| 132 | struct srp_priority t_prio; | ||
| 133 | |||
| 134 | TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); | ||
| 135 | if (!srp_active()) | ||
| 136 | return -EBUSY; | ||
| 137 | |||
| 138 | if (sem->cpu == UNDEF_SEM) | ||
| 139 | sem->cpu = get_partition(t); | ||
| 140 | else if (sem->cpu != get_partition(t)) | ||
| 141 | ret = -EPERM; | ||
| 142 | |||
| 143 | if (ret == 0) { | ||
| 144 | t_prio.period = get_rt_period(t); | ||
| 145 | t_prio.pid = t->pid; | ||
| 146 | if (srp_higher_prio(&t_prio, &sem->ceiling)) { | ||
| 147 | sem->ceiling.period = t_prio.period; | ||
| 148 | sem->ceiling.pid = t_prio.pid; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | |||
| 152 | return ret; | ||
| 153 | } | ||
| 154 | |||
| 155 | static void destroy_srp_semaphore(void* sem) | ||
| 156 | { | ||
| 157 | /* XXX invariants */ | ||
| 158 | atomic_dec(&srp_objects_in_use); | ||
| 159 | kfree(sem); | ||
| 160 | } | ||
| 161 | |||
| 162 | struct fdso_ops srp_sem_ops = { | ||
| 163 | .create = create_srp_semaphore, | ||
| 164 | .open = open_srp_semaphore, | ||
| 165 | .destroy = destroy_srp_semaphore | ||
| 166 | }; | ||
| 167 | |||
| 168 | |||
| 169 | static void do_srp_down(struct srp_semaphore* sem) | ||
| 170 | { | ||
| 171 | /* Update ceiling. */ | ||
| 172 | srp_add_prio(&__get_cpu_var(srp), &sem->ceiling); | ||
| 173 | WARN_ON(sem->owner != NULL); | ||
| 174 | sem->owner = current; | ||
| 175 | TRACE_CUR("acquired srp 0x%p\n", sem); | ||
| 176 | } | ||
| 177 | |||
| 178 | static void do_srp_up(struct srp_semaphore* sem) | ||
| 179 | { | ||
| 180 | /* Determine new system priority ceiling for this CPU. */ | ||
| 181 | WARN_ON(!in_list(&sem->ceiling.list)); | ||
| 182 | if (in_list(&sem->ceiling.list)) | ||
| 183 | list_del(&sem->ceiling.list); | ||
| 184 | |||
| 185 | sem->owner = NULL; | ||
| 186 | |||
| 187 | /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
| 188 | TRACE_CUR("released srp 0x%p\n", sem); | ||
| 189 | wake_up_all(&__get_cpu_var(srp).ceiling_blocked); | ||
| 190 | } | ||
| 191 | |||
| 192 | /* Adjust the system-wide priority ceiling if resource is claimed. */ | ||
| 193 | asmlinkage long sys_srp_down(int sem_od) | ||
| 194 | { | ||
| 195 | int cpu; | ||
| 196 | int ret = -EINVAL; | ||
| 197 | struct srp_semaphore* sem; | ||
| 198 | |||
| 199 | /* disabling preemptions is sufficient protection since | ||
| 200 | * SRP is strictly per CPU and we don't interfere with any | ||
| 201 | * interrupt handlers | ||
| 202 | */ | ||
| 203 | preempt_disable(); | ||
| 204 | TS_SRP_DOWN_START; | ||
| 205 | |||
| 206 | cpu = smp_processor_id(); | ||
| 207 | sem = lookup_srp_sem(sem_od); | ||
| 208 | if (sem && sem->cpu == cpu) { | ||
| 209 | do_srp_down(sem); | ||
| 210 | ret = 0; | ||
| 211 | } | ||
| 212 | |||
| 213 | TS_SRP_DOWN_END; | ||
| 214 | preempt_enable(); | ||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | |||
| 218 | /* Adjust the system-wide priority ceiling if resource is freed. */ | ||
| 219 | asmlinkage long sys_srp_up(int sem_od) | ||
| 220 | { | ||
| 221 | int cpu; | ||
| 222 | int ret = -EINVAL; | ||
| 223 | struct srp_semaphore* sem; | ||
| 224 | |||
| 225 | preempt_disable(); | ||
| 226 | TS_SRP_UP_START; | ||
| 227 | |||
| 228 | cpu = smp_processor_id(); | ||
| 229 | sem = lookup_srp_sem(sem_od); | ||
| 230 | |||
| 231 | if (sem && sem->cpu == cpu) { | ||
| 232 | do_srp_up(sem); | ||
| 233 | ret = 0; | ||
| 234 | } | ||
| 235 | |||
| 236 | TS_SRP_UP_END; | ||
| 237 | preempt_enable(); | ||
| 238 | return ret; | ||
| 239 | } | ||
| 240 | |||
| 241 | static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
| 242 | void *key) | ||
| 243 | { | ||
| 244 | int cpu = smp_processor_id(); | ||
| 245 | struct task_struct *tsk = wait->private; | ||
| 246 | if (cpu != get_partition(tsk)) | ||
| 247 | TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b", | ||
| 248 | get_partition(tsk)); | ||
| 249 | else if (srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
| 250 | return default_wake_function(wait, mode, sync, key); | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | |||
| 255 | |||
| 256 | static void do_ceiling_block(struct task_struct *tsk) | ||
| 257 | { | ||
| 258 | wait_queue_t wait = { | ||
| 259 | .private = tsk, | ||
| 260 | .func = srp_wake_up, | ||
| 261 | .task_list = {NULL, NULL} | ||
| 262 | }; | ||
| 263 | |||
| 264 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
| 265 | add_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
| 266 | tsk->rt_param.srp_non_recurse = 1; | ||
| 267 | preempt_enable_no_resched(); | ||
| 268 | schedule(); | ||
| 269 | preempt_disable(); | ||
| 270 | tsk->rt_param.srp_non_recurse = 0; | ||
| 271 | remove_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
| 272 | } | ||
| 273 | |||
| 274 | /* Wait for current task priority to exceed system-wide priority ceiling. | ||
| 275 | */ | ||
| 276 | void srp_ceiling_block(void) | ||
| 277 | { | ||
| 278 | struct task_struct *tsk = current; | ||
| 279 | |||
| 280 | /* Only applies to real-time tasks, but optimize for RT tasks. */ | ||
| 281 | if (unlikely(!is_realtime(tsk))) | ||
| 282 | return; | ||
| 283 | |||
| 284 | /* Avoid recursive ceiling blocking. */ | ||
| 285 | if (unlikely(tsk->rt_param.srp_non_recurse)) | ||
| 286 | return; | ||
| 287 | |||
| 288 | /* Bail out early if there aren't any SRP resources around. */ | ||
| 289 | if (likely(!atomic_read(&srp_objects_in_use))) | ||
| 290 | return; | ||
| 291 | |||
| 292 | preempt_disable(); | ||
| 293 | if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { | ||
| 294 | TRACE_CUR("is priority ceiling blocked.\n"); | ||
| 295 | while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
| 296 | do_ceiling_block(tsk); | ||
| 297 | TRACE_CUR("finally exceeds system ceiling.\n"); | ||
| 298 | } else | ||
| 299 | TRACE_CUR("is not priority ceiling blocked\n"); | ||
| 300 | preempt_enable(); | ||
| 301 | } | ||
| 302 | |||
| 303 | |||
| 304 | #else | ||
| 305 | |||
| 306 | asmlinkage long sys_srp_down(int sem_od) | ||
| 307 | { | ||
| 308 | return -ENOSYS; | ||
| 309 | } | ||
| 310 | |||
| 311 | asmlinkage long sys_srp_up(int sem_od) | ||
| 312 | { | ||
| 313 | return -ENOSYS; | ||
| 314 | } | ||
| 315 | |||
| 316 | struct fdso_ops srp_sem_ops = {}; | ||
| 317 | |||
| 318 | #endif | ||
