diff options
Diffstat (limited to 'litmus/reservations/gedf_reservation.c')
-rw-r--r-- | litmus/reservations/gedf_reservation.c | 69 |
1 files changed, 44 insertions, 25 deletions
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c index 41a4feeb6680..1c604212acf2 100644 --- a/litmus/reservations/gedf_reservation.c +++ b/litmus/reservations/gedf_reservation.c | |||
@@ -201,7 +201,7 @@ static void check_for_preemptions(struct gedf_reservation_environment* gedf_env) | |||
201 | } | 201 | } |
202 | } | 202 | } |
203 | 203 | ||
204 | if (last->linked && last->linked->res.cur_budget) | 204 | if (last->linked) |
205 | requeue(gedf_env, last->linked); | 205 | requeue(gedf_env, last->linked); |
206 | 206 | ||
207 | link_task_to_cpu(gedf_env, gedf_res, last); | 207 | link_task_to_cpu(gedf_env, gedf_res, last); |
@@ -685,6 +685,8 @@ static void gedf_env_resume( | |||
685 | /* adds cpu back to scheduling consideration */ | 685 | /* adds cpu back to scheduling consideration */ |
686 | bheap_insert(cpu_lower_prio, &gedf_env->cpu_heap, entry->hn); | 686 | bheap_insert(cpu_lower_prio, &gedf_env->cpu_heap, entry->hn); |
687 | gedf_env->num_cpus++; | 687 | gedf_env->num_cpus++; |
688 | gedf_env->num_active_cpus = (gedf_env->num_cpus > gedf_env->num_active_cpus) ? | ||
689 | gedf_env->num_cpus : gedf_env->num_active_cpus; | ||
688 | 690 | ||
689 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); | 691 | raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); |
690 | 692 | ||
@@ -715,7 +717,15 @@ static struct task_struct* gedf_env_dispatch( | |||
715 | if (entry->scheduled) | 717 | if (entry->scheduled) |
716 | np = entry->scheduled->res.ops->is_np(&entry->scheduled->res, cpu); | 718 | np = entry->scheduled->res.ops->is_np(&entry->scheduled->res, cpu); |
717 | 719 | ||
720 | /* if flagged for removal from environment, invoke shutdown callback */ | ||
721 | if (entry->scheduled && entry->scheduled->will_remove) { | ||
722 | /* assumed to already been unlinked by whatever set will_remove */ | ||
723 | entry->scheduled->res.ops->shutdown(&entry->scheduled->res); | ||
724 | entry->scheduled = NULL; | ||
725 | } | ||
726 | |||
718 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); | 727 | raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags); |
728 | |||
719 | /* Budget replenishment has to happen under a lock, otherwise | 729 | /* Budget replenishment has to happen under a lock, otherwise |
720 | * check_for_preemptions() may be called concurrently, and try to link and | 730 | * check_for_preemptions() may be called concurrently, and try to link and |
721 | * schedule our now-out-of-budget task on another CPU. This would result in | 731 | * schedule our now-out-of-budget task on another CPU. This would result in |
@@ -727,7 +737,8 @@ static struct task_struct* gedf_env_dispatch( | |||
727 | entry->scheduled->res.ops->replenish_budget(&entry->scheduled->res, cpu); | 737 | entry->scheduled->res.ops->replenish_budget(&entry->scheduled->res, cpu); |
728 | /* unlink and requeue if not blocked and not np*/ | 738 | /* unlink and requeue if not blocked and not np*/ |
729 | if (!entry->scheduled->blocked && | 739 | if (!entry->scheduled->blocked && |
730 | !entry->scheduled->res.ops->is_np(&entry->scheduled->res, cpu)) { | 740 | !entry->scheduled->res.ops->is_np(&entry->scheduled->res, cpu) && |
741 | !entry->scheduled->will_remove) { | ||
731 | unlink(gedf_env, entry->scheduled); | 742 | unlink(gedf_env, entry->scheduled); |
732 | requeue(gedf_env, entry->scheduled); | 743 | requeue(gedf_env, entry->scheduled); |
733 | check_for_preemptions(gedf_env); | 744 | check_for_preemptions(gedf_env); |
@@ -798,13 +809,6 @@ static void gedf_env_update_time( | |||
798 | * Therefore, no lock is needed for this operation | 809 | * Therefore, no lock is needed for this operation |
799 | */ | 810 | */ |
800 | entry->scheduled->res.ops->drain_budget(&entry->scheduled->res, how_much, cpu); | 811 | entry->scheduled->res.ops->drain_budget(&entry->scheduled->res, how_much, cpu); |
801 | |||
802 | /* if flagged for removal from environment, invoke shutdown callback */ | ||
803 | if (entry->scheduled->will_remove) { | ||
804 | /* assumed to already been unlinked by whatever set will_remove */ | ||
805 | entry->scheduled->res.ops->shutdown(&entry->scheduled->res); | ||
806 | entry->scheduled = NULL; | ||
807 | } | ||
808 | } | 812 | } |
809 | 813 | ||
810 | /* callback for how the domain will release jobs */ | 814 | /* callback for how the domain will release jobs */ |
@@ -990,8 +994,12 @@ static struct task_struct* omlp_dequeue(struct omlp_semaphore *sem) | |||
990 | { | 994 | { |
991 | struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait); | 995 | struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait); |
992 | 996 | ||
993 | if (first && !omlp_move(sem)) | 997 | /* don't replace tmp with omlp_move! shortcircuiting will break omlp for m=1 */ |
998 | int tmp = omlp_move(sem); | ||
999 | if (first && !tmp) | ||
994 | sem->num_free++; | 1000 | sem->num_free++; |
1001 | if (!first && tmp) | ||
1002 | first = __waitqueue_remove_first(&sem->fifo_wait); | ||
995 | 1003 | ||
996 | return first; | 1004 | return first; |
997 | } | 1005 | } |
@@ -1119,6 +1127,7 @@ static enum hrtimer_restart wake_fz_waiter(struct hrtimer* timer) | |||
1119 | { | 1127 | { |
1120 | struct omlp_semaphore* sem = container_of(timer, struct omlp_semaphore, wake_timer); | 1128 | struct omlp_semaphore* sem = container_of(timer, struct omlp_semaphore, wake_timer); |
1121 | sem->policed = 0; | 1129 | sem->policed = 0; |
1130 | BUG_ON(!sem->owner); | ||
1122 | wake_up_process(sem->owner); | 1131 | wake_up_process(sem->owner); |
1123 | 1132 | ||
1124 | return HRTIMER_NORESTART; | 1133 | return HRTIMER_NORESTART; |
@@ -1129,6 +1138,7 @@ static lt_t next_component_start(struct mtd_reservation* mtd_res, int cpu) | |||
1129 | int i_index; | 1138 | int i_index; |
1130 | lt_t resume_time; | 1139 | lt_t resume_time; |
1131 | 1140 | ||
1141 | BUG_ON(mtd_res->major_cycle_start[cpu] + mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].start > litmus_clock()); | ||
1132 | i_index = (mtd_res->interval_index[cpu] + 1) % mtd_res->num_intervals[cpu]; | 1142 | i_index = (mtd_res->interval_index[cpu] + 1) % mtd_res->num_intervals[cpu]; |
1133 | resume_time = mtd_res->major_cycle_start[cpu]; | 1143 | resume_time = mtd_res->major_cycle_start[cpu]; |
1134 | if (!i_index) | 1144 | if (!i_index) |
@@ -1169,10 +1179,10 @@ int gedf_env_omlp_lock_cs(struct litmus_lock* l, lt_t cs_len) | |||
1169 | init_prio_waitqueue_entry(&wait, t, ULLONG_MAX - t_res->priority); | 1179 | init_prio_waitqueue_entry(&wait, t, ULLONG_MAX - t_res->priority); |
1170 | wait.wq.cs_len = cs_len; | 1180 | wait.wq.cs_len = cs_len; |
1171 | 1181 | ||
1172 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1173 | |||
1174 | omlp_enqueue(sem, &wait); | 1182 | omlp_enqueue(sem, &wait); |
1175 | 1183 | ||
1184 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1185 | |||
1176 | /* check if we need to activate priority inheritance */ | 1186 | /* check if we need to activate priority inheritance */ |
1177 | if (higher_res_prio(t_res, sem->hp_waiter_res)) { | 1187 | if (higher_res_prio(t_res, sem->hp_waiter_res)) { |
1178 | sem->hp_waiter = t; | 1188 | sem->hp_waiter = t; |
@@ -1251,6 +1261,7 @@ static int gedf_env_omlp_access_fz_check(struct litmus_lock* l, lt_t fz_len, lt_ | |||
1251 | schedule(); | 1261 | schedule(); |
1252 | TS_LOCK_RESUME; | 1262 | TS_LOCK_RESUME; |
1253 | } | 1263 | } |
1264 | |||
1254 | sem->policed = 0; | 1265 | sem->policed = 0; |
1255 | 1266 | ||
1256 | t_res = (struct ext_reservation *) tsk_rt(t)->plugin_state; | 1267 | t_res = (struct ext_reservation *) tsk_rt(t)->plugin_state; |
@@ -1275,22 +1286,23 @@ static int gedf_env_omlp_access_fz_check(struct litmus_lock* l, lt_t fz_len, lt_ | |||
1275 | cutahead_t = omlp_cutahead(sem, ULLONG_MAX); | 1286 | cutahead_t = omlp_cutahead(sem, ULLONG_MAX); |
1276 | if (cutahead_t) { | 1287 | if (cutahead_t) { |
1277 | wake_up_process(cutahead_t); | 1288 | wake_up_process(cutahead_t); |
1278 | local_irq_restore(flags); | ||
1279 | set_current_state(TASK_UNINTERRUPTIBLE); | 1289 | set_current_state(TASK_UNINTERRUPTIBLE); |
1280 | TS_LOCK_SUSPEND; | 1290 | TS_LOCK_SUSPEND; |
1291 | local_irq_restore(flags); | ||
1281 | schedule(); | 1292 | schedule(); |
1282 | TS_LOCK_RESUME; | 1293 | TS_LOCK_RESUME; |
1283 | } else { | 1294 | } else { |
1284 | /* go on a wait queue to be woken up when the parent reservation | 1295 | /* go on a wait queue to be woken up when the parent reservation |
1285 | * is next scheduled */ | 1296 | * is next scheduled */ |
1286 | BUG_ON(t != sem->owner); | 1297 | BUG_ON(t != sem->owner); |
1287 | resume_time = next_component_start(mtd_res, cpu); | 1298 | resume_time = max(next_component_start(mtd_res, cpu), litmus_clock()); |
1288 | hrtimer_start(&sem->wake_timer, | 1299 | hrtimer_start(&sem->wake_timer, |
1289 | ns_to_ktime(resume_time), | 1300 | ns_to_ktime(resume_time), |
1290 | HRTIMER_MODE_ABS_PINNED_HARD); | 1301 | HRTIMER_MODE_ABS_PINNED_HARD); |
1291 | local_irq_restore(flags); | 1302 | BUG_ON(!hrtimer_active(&sem->wake_timer)); |
1292 | set_current_state(TASK_UNINTERRUPTIBLE); | 1303 | set_current_state(TASK_INTERRUPTIBLE); |
1293 | TS_LOCK_SUSPEND; | 1304 | TS_LOCK_SUSPEND; |
1305 | local_irq_restore(flags); | ||
1294 | schedule(); | 1306 | schedule(); |
1295 | TS_LOCK_RESUME; | 1307 | TS_LOCK_RESUME; |
1296 | } | 1308 | } |
@@ -1330,6 +1342,7 @@ static int gedf_env_omlp_cancel_watchdog(struct litmus_lock* l) | |||
1330 | schedule(); | 1342 | schedule(); |
1331 | TS_LOCK_RESUME; | 1343 | TS_LOCK_RESUME; |
1332 | } | 1344 | } |
1345 | |||
1333 | sem->policed = 0; | 1346 | sem->policed = 0; |
1334 | 1347 | ||
1335 | BUG_ON(!tsk_rt(t)->ctrl_page); | 1348 | BUG_ON(!tsk_rt(t)->ctrl_page); |
@@ -1347,6 +1360,8 @@ static void __gedf_env_omlp_unlock(struct omlp_semaphore* sem, struct task_struc | |||
1347 | 1360 | ||
1348 | tsk_rt(t)->num_locks_held--; | 1361 | tsk_rt(t)->num_locks_held--; |
1349 | 1362 | ||
1363 | BUG_ON(sem->owner != t); | ||
1364 | |||
1350 | /* check if there are jobs waiting vor this resource */ | 1365 | /* check if there are jobs waiting vor this resource */ |
1351 | next = omlp_dequeue(sem); | 1366 | next = omlp_dequeue(sem); |
1352 | if (next) { | 1367 | if (next) { |
@@ -1381,9 +1396,10 @@ static void __gedf_env_omlp_unlock(struct omlp_semaphore* sem, struct task_struc | |||
1381 | 1396 | ||
1382 | /* wake up next */ | 1397 | /* wake up next */ |
1383 | wake_up_process(next); | 1398 | wake_up_process(next); |
1384 | } else | 1399 | } else { |
1385 | /* becomes available */ | 1400 | /* becomes available */ |
1386 | sem->owner = NULL; | 1401 | sem->owner = NULL; |
1402 | } | ||
1387 | 1403 | ||
1388 | /* we lose the benefit of priority inheritance (if any) */ | 1404 | /* we lose the benefit of priority inheritance (if any) */ |
1389 | if (tsk_rt(t)->plugin_state && ((struct ext_reservation *)tsk_rt(t)->plugin_state)->inh_res) | 1405 | if (tsk_rt(t)->plugin_state && ((struct ext_reservation *)tsk_rt(t)->plugin_state)->inh_res) |
@@ -1397,6 +1413,7 @@ static int gedf_env_omlp_unlock(struct litmus_lock* l) | |||
1397 | unsigned long flags; | 1413 | unsigned long flags; |
1398 | 1414 | ||
1399 | if (sem->owner != t) { | 1415 | if (sem->owner != t) { |
1416 | BUG_ON(true); | ||
1400 | return -EINVAL; | 1417 | return -EINVAL; |
1401 | } | 1418 | } |
1402 | 1419 | ||
@@ -1411,6 +1428,7 @@ static int gedf_env_omlp_unlock(struct litmus_lock* l) | |||
1411 | TS_LOCK_RESUME; | 1428 | TS_LOCK_RESUME; |
1412 | } | 1429 | } |
1413 | sem->policed = 0; | 1430 | sem->policed = 0; |
1431 | |||
1414 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | 1432 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); |
1415 | 1433 | ||
1416 | __gedf_env_omlp_unlock(sem, t); | 1434 | __gedf_env_omlp_unlock(sem, t); |
@@ -1451,23 +1469,21 @@ static enum hrtimer_restart omlp_fz_police(struct hrtimer *timer) | |||
1451 | mtd_res = (struct mtd_reservation*)t_res->par_env->res; | 1469 | mtd_res = (struct mtd_reservation*)t_res->par_env->res; |
1452 | 1470 | ||
1453 | cpu = smp_processor_id(); | 1471 | cpu = smp_processor_id(); |
1454 | |||
1455 | sem->policed = 1; | ||
1456 | |||
1457 | BUG_ON(t != sem->owner); | ||
1458 | 1472 | ||
1459 | // set wake timer | 1473 | // set wake timer |
1460 | resume_time = next_component_start(mtd_res, cpu); | 1474 | resume_time = max(next_component_start(mtd_res, cpu), litmus_clock()); |
1461 | hrtimer_start(&sem->wake_timer, | 1475 | hrtimer_start(&sem->wake_timer, |
1462 | ns_to_ktime(resume_time), | 1476 | ns_to_ktime(resume_time), |
1463 | HRTIMER_MODE_ABS_PINNED_HARD); | 1477 | HRTIMER_MODE_ABS_PINNED_HARD); |
1478 | BUG_ON(!hrtimer_active(&sem->wake_timer)); | ||
1479 | set_current_state(TASK_INTERRUPTIBLE); | ||
1480 | sem->policed = 1; | ||
1464 | 1481 | ||
1465 | //memset(&info, 0, sizeof(struct kernel_siginfo)); | 1482 | //memset(&info, 0, sizeof(struct kernel_siginfo)); |
1466 | //info.si_signo = 31; | 1483 | //info.si_signo = 31; |
1467 | //info.si_code = SI_KERNEL; | 1484 | //info.si_code = SI_KERNEL; |
1468 | //send_sig_info(31, &info, t); | 1485 | //send_sig_info(31, &info, t); |
1469 | 1486 | ||
1470 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1471 | litmus_reschedule_local(); | 1487 | litmus_reschedule_local(); |
1472 | 1488 | ||
1473 | return HRTIMER_NORESTART; | 1489 | return HRTIMER_NORESTART; |
@@ -1515,8 +1531,10 @@ static struct litmus_lock* gedf_env_new_omlp(void) | |||
1515 | struct gedf_reservation_environment *gedf_env; | 1531 | struct gedf_reservation_environment *gedf_env; |
1516 | 1532 | ||
1517 | sem = kmalloc(sizeof(*sem), GFP_ATOMIC); | 1533 | sem = kmalloc(sizeof(*sem), GFP_ATOMIC); |
1518 | if (!sem) | 1534 | if (!sem) { |
1535 | BUG_ON(true); | ||
1519 | return NULL; | 1536 | return NULL; |
1537 | } | ||
1520 | 1538 | ||
1521 | t_res = (struct ext_reservation *) tsk_rt(current)->plugin_state; | 1539 | t_res = (struct ext_reservation *) tsk_rt(current)->plugin_state; |
1522 | gedf_env = container_of(t_res->par_env, struct gedf_reservation_environment, env); | 1540 | gedf_env = container_of(t_res->par_env, struct gedf_reservation_environment, env); |
@@ -1528,7 +1546,7 @@ static struct litmus_lock* gedf_env_new_omlp(void) | |||
1528 | init_waitqueue_head(&sem->prio_wait); | 1546 | init_waitqueue_head(&sem->prio_wait); |
1529 | sem->litmus_lock.ops = &gedf_env_omlp_lock_ops; | 1547 | sem->litmus_lock.ops = &gedf_env_omlp_lock_ops; |
1530 | /* free = cpus-1 since ->owner is the head and also counted */ | 1548 | /* free = cpus-1 since ->owner is the head and also counted */ |
1531 | sem->num_free = gedf_env->num_cpus - 1; | 1549 | sem->num_free = 0; //gedf_env->num_active_cpus - 1; |
1532 | hrtimer_init(&sem->police_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); | 1550 | hrtimer_init(&sem->police_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); |
1533 | hrtimer_init(&sem->wake_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); | 1551 | hrtimer_init(&sem->wake_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); |
1534 | sem->police_timer.function = omlp_fz_police; | 1552 | sem->police_timer.function = omlp_fz_police; |
@@ -1614,6 +1632,7 @@ long alloc_gedf_reservation_environment( | |||
1614 | INIT_LIST_HEAD(&gedf_env->env.all_reservations); | 1632 | INIT_LIST_HEAD(&gedf_env->env.all_reservations); |
1615 | 1633 | ||
1616 | gedf_env->num_cpus = 0; | 1634 | gedf_env->num_cpus = 0; |
1635 | gedf_env->num_active_cpus = 0; | ||
1617 | bheap_init(&gedf_env->cpu_heap); | 1636 | bheap_init(&gedf_env->cpu_heap); |
1618 | for (i = 0; i < max_cpus; i++) { | 1637 | for (i = 0; i < max_cpus; i++) { |
1619 | gedf_env->cpu_entries[i].id = i; | 1638 | gedf_env->cpu_entries[i].id = i; |