diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-25 07:07:31 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-25 07:07:31 -0400 |
commit | fef9a0ca81a70b5b36962026a40b71e437dcc5be (patch) | |
tree | bc0da70e49d5144e5a727b820da4bdc4c77218a0 /litmus/sched_mc2.c | |
parent | 087949222cb5cf39037a821e14e2f3841ad2a5c0 (diff) |
Deadlock fix
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r-- | litmus/sched_mc2.c | 88 |
1 files changed, 41 insertions, 47 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 0c9bb1812367..f8ad5c615e5d 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -148,8 +148,7 @@ asmlinkage long sys_enact_mode(void) | |||
148 | } | 148 | } |
149 | mode_changed = false; | 149 | mode_changed = false; |
150 | if (pending){ //MCR has entered | 150 | if (pending){ //MCR has entered |
151 | local_irq_save(flags); | 151 | raw_spin_lock_irqsave(&state->lock,flags); |
152 | raw_spin_lock(&state->lock); | ||
153 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 152 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); |
154 | raw_spin_lock(&global_lock); | 153 | raw_spin_lock(&global_lock); |
155 | raw_spin_lock(&mode_lock); | 154 | raw_spin_lock(&mode_lock); |
@@ -256,7 +255,9 @@ asmlinkage long sys_enact_mode(void) | |||
256 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 255 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); |
257 | raw_spin_unlock(&global_lock); | 256 | raw_spin_unlock(&global_lock); |
258 | //raw_spin_unlock(&state->lock); | 257 | //raw_spin_unlock(&state->lock); |
259 | local_irq_restore(flags); | 258 | raw_spin_unlock_irqrestore(&state->lock, flags); |
259 | |||
260 | raw_spin_lock(&state->lock); | ||
260 | mc2_update_timer_and_unlock(state); | 261 | mc2_update_timer_and_unlock(state); |
261 | } | 262 | } |
262 | this_cpu_inc(mode_counter); | 263 | this_cpu_inc(mode_counter); |
@@ -299,8 +300,7 @@ asmlinkage long sys_enact_mode(void) | |||
299 | //TRACE("CPU%d mode changed\n",state->cpu); | 300 | //TRACE("CPU%d mode changed\n",state->cpu); |
300 | hrtimer_cancel(&state->timer); //stop listening to old mode timers | 301 | hrtimer_cancel(&state->timer); //stop listening to old mode timers |
301 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); | 302 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); |
302 | local_irq_save(flags); | 303 | raw_spin_lock_irqsave(&state->lock, flags); |
303 | raw_spin_lock(&state->lock); | ||
304 | state->sup_env = &state->sup_env_modes[mode]; | 304 | state->sup_env = &state->sup_env_modes[mode]; |
305 | list_for_each(pos, &state->sup_env->active_reservations){ | 305 | list_for_each(pos, &state->sup_env->active_reservations){ |
306 | res = list_entry(pos, struct reservation, list); | 306 | res = list_entry(pos, struct reservation, list); |
@@ -315,8 +315,9 @@ asmlinkage long sys_enact_mode(void) | |||
315 | release_at(res->tsk, new_mode_basetime); | 315 | release_at(res->tsk, new_mode_basetime); |
316 | } | 316 | } |
317 | sup_update_time(state->sup_env, litmus_clock()); | 317 | sup_update_time(state->sup_env, litmus_clock()); |
318 | //raw_spin_unlock(&state->lock); | 318 | raw_spin_unlock_irqrestore(&state->lock, flags); |
319 | local_irq_restore(flags); | 319 | |
320 | raw_spin_lock(&state->lock); | ||
320 | mc2_update_timer_and_unlock(state); | 321 | mc2_update_timer_and_unlock(state); |
321 | //local_irq_restore(flags); | 322 | //local_irq_restore(flags); |
322 | 323 | ||
@@ -1048,7 +1049,7 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
1048 | } | 1049 | } |
1049 | } | 1050 | } |
1050 | 1051 | ||
1051 | raw_spin_lock(&mode_lock); | 1052 | //raw_spin_lock(&mode_lock); |
1052 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | 1053 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { |
1053 | BUG_ON(!res); | 1054 | BUG_ON(!res); |
1054 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | 1055 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { |
@@ -1069,12 +1070,12 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
1069 | res->is_ghost = NO_CPU; | 1070 | res->is_ghost = NO_CPU; |
1070 | TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu); | 1071 | TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu); |
1071 | res->scheduled_on = state->cpu; | 1072 | res->scheduled_on = state->cpu; |
1072 | raw_spin_unlock(&mode_lock); | 1073 | //raw_spin_unlock(&mode_lock); |
1073 | return tsk; | 1074 | return tsk; |
1074 | } | 1075 | } |
1075 | } | 1076 | } |
1076 | } | 1077 | } |
1077 | raw_spin_unlock(&mode_lock); | 1078 | //raw_spin_unlock(&mode_lock); |
1078 | return NULL; | 1079 | return NULL; |
1079 | } | 1080 | } |
1080 | 1081 | ||
@@ -1121,19 +1122,12 @@ static inline void post_schedule(struct task_struct *next, int cpu) | |||
1121 | */ | 1122 | */ |
1122 | static struct task_struct* mc2_schedule(struct task_struct * prev) | 1123 | static struct task_struct* mc2_schedule(struct task_struct * prev) |
1123 | { | 1124 | { |
1124 | int np, blocks, exists, preempt, to_schedule; | 1125 | int np, blocks, exists, to_schedule; |
1125 | /* next == NULL means "schedule background work". */ | 1126 | /* next == NULL means "schedule background work". */ |
1126 | lt_t now = litmus_clock(); | 1127 | lt_t now = litmus_clock(); |
1127 | struct mc2_cpu_state *state = local_cpu_state(); | 1128 | struct mc2_cpu_state *state = local_cpu_state(); |
1128 | 1129 | ||
1129 | raw_spin_lock(&state->lock); | 1130 | raw_spin_lock(&state->lock); |
1130 | |||
1131 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1132 | raw_spin_lock(&global_lock); | ||
1133 | preempt = resched_cpu[state->cpu]; | ||
1134 | resched_cpu[state->cpu] = 0; | ||
1135 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1136 | raw_spin_unlock(&global_lock); | ||
1137 | 1131 | ||
1138 | pre_schedule(prev, state->cpu); | 1132 | pre_schedule(prev, state->cpu); |
1139 | 1133 | ||
@@ -1273,13 +1267,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1273 | } | 1267 | } |
1274 | */ | 1268 | */ |
1275 | post_schedule(state->scheduled, state->cpu); | 1269 | post_schedule(state->scheduled, state->cpu); |
1276 | /* | 1270 | |
1277 | raw_spin_lock(&global_lock); | ||
1278 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1279 | update_cpu_prio(state); | ||
1280 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1281 | raw_spin_unlock(&global_lock); | ||
1282 | */ | ||
1283 | raw_spin_unlock(&state->lock); | 1271 | raw_spin_unlock(&state->lock); |
1284 | if (state->scheduled) { | 1272 | if (state->scheduled) { |
1285 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 1273 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
@@ -1331,8 +1319,6 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1331 | /* Requeue only if self-suspension was already processed. */ | 1319 | /* Requeue only if self-suspension was already processed. */ |
1332 | if (tinfo->has_departed) | 1320 | if (tinfo->has_departed) |
1333 | { | 1321 | { |
1334 | raw_spin_lock(&state->lock); | ||
1335 | local_irq_save(flags); | ||
1336 | /* We don't want to consider jobs before synchronous releases */ | 1322 | /* We don't want to consider jobs before synchronous releases */ |
1337 | if (tsk_rt(tsk)->job_params.job_no == 2) { | 1323 | if (tsk_rt(tsk)->job_params.job_no == 2) { |
1338 | /* | 1324 | /* |
@@ -1353,7 +1339,7 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1353 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); | 1339 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); |
1354 | tsk_mc2_data(tsk)->init_finished = 1; | 1340 | tsk_mc2_data(tsk)->init_finished = 1; |
1355 | atomic_dec(&num_sync_released); | 1341 | atomic_dec(&num_sync_released); |
1356 | //raw_spin_unlock(&global_lock); | 1342 | |
1357 | if (atomic_read(&num_sync_released) == 0) { | 1343 | if (atomic_read(&num_sync_released) == 0) { |
1358 | lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); | 1344 | lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); |
1359 | TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n"); | 1345 | TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n"); |
@@ -1362,9 +1348,8 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1362 | } | 1348 | } |
1363 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released)); | 1349 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released)); |
1364 | } | 1350 | } |
1351 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1365 | 1352 | ||
1366 | // raw_spin_lock(&state->lock); | ||
1367 | // local_irq_save(flags); | ||
1368 | /* Assumption: litmus_clock() is synchronized across cores, | 1353 | /* Assumption: litmus_clock() is synchronized across cores, |
1369 | * since we might not actually be executing on tinfo->cpu | 1354 | * since we might not actually be executing on tinfo->cpu |
1370 | * at the moment. */ | 1355 | * at the moment. */ |
@@ -1386,7 +1371,9 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1386 | //task_arrives(state, tsk); | 1371 | //task_arrives(state, tsk); |
1387 | /* NOTE: drops state->lock */ | 1372 | /* NOTE: drops state->lock */ |
1388 | TRACE_TASK(tsk, "mc2_resume()\n"); | 1373 | TRACE_TASK(tsk, "mc2_resume()\n"); |
1389 | local_irq_restore(flags); | 1374 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1375 | |||
1376 | raw_spin_lock(&state->lock); | ||
1390 | mc2_update_timer_and_unlock(state); | 1377 | mc2_update_timer_and_unlock(state); |
1391 | } else { | 1378 | } else { |
1392 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | 1379 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); |
@@ -1426,9 +1413,9 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1426 | 1413 | ||
1427 | if (lv < CRIT_LEVEL_C) { | 1414 | if (lv < CRIT_LEVEL_C) { |
1428 | state = cpu_state_for(task_cpu(tsk)); | 1415 | state = cpu_state_for(task_cpu(tsk)); |
1429 | raw_spin_lock(&state->lock); | 1416 | //local_irq_save(flags); |
1430 | local_irq_save(flags); | 1417 | //raw_spin_lock(&state->lock); |
1431 | //raw_spin_lock_irqsave(&state->lock, flags); | 1418 | raw_spin_lock_irqsave(&state->lock, flags); |
1432 | 1419 | ||
1433 | tinfo->mc2_param.crit = mp->crit; | 1420 | tinfo->mc2_param.crit = mp->crit; |
1434 | tinfo->cpu = task_cpu(tsk); | 1421 | tinfo->cpu = task_cpu(tsk); |
@@ -1484,15 +1471,15 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1484 | cpu_0_task_exist = true; | 1471 | cpu_0_task_exist = true; |
1485 | } | 1472 | } |
1486 | atomic_inc(&num_sync_released); | 1473 | atomic_inc(&num_sync_released); |
1487 | local_irq_restore(flags); | 1474 | //raw_spin_unlock(&state->lock); |
1488 | raw_spin_unlock(&state->lock); | 1475 | //local_irq_restore(flags); |
1489 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1476 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1490 | } else if (lv == CRIT_LEVEL_C) { | 1477 | } else if (lv == CRIT_LEVEL_C) { |
1491 | // TRACE_TASK(tsk, "Task being admitted is Level C\n"); | 1478 | // TRACE_TASK(tsk, "Task being admitted is Level C\n"); |
1492 | state = local_cpu_state(); | 1479 | state = local_cpu_state(); |
1493 | //raw_spin_lock_irqsave(&state->lock, flags); | 1480 | raw_spin_lock_irqsave(&state->lock, flags); |
1494 | raw_spin_lock(&state->lock); | 1481 | //local_irq_save(flags); |
1495 | local_irq_save(flags); | 1482 | //raw_spin_lock(&state->lock); |
1496 | //state = local_cpu_state(); | 1483 | //state = local_cpu_state(); |
1497 | 1484 | ||
1498 | //raw_spin_lock(&state->lock); | 1485 | //raw_spin_lock(&state->lock); |
@@ -1538,6 +1525,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1538 | 1525 | ||
1539 | } | 1526 | } |
1540 | } | 1527 | } |
1528 | raw_spin_unlock(&global_lock); | ||
1541 | 1529 | ||
1542 | if (!err){ | 1530 | if (!err){ |
1543 | /* disable LITMUS^RT's per-thread budget enforcement */ | 1531 | /* disable LITMUS^RT's per-thread budget enforcement */ |
@@ -1553,11 +1541,11 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1553 | raw_spin_unlock(&mode_lock); | 1541 | raw_spin_unlock(&mode_lock); |
1554 | 1542 | ||
1555 | } | 1543 | } |
1544 | |||
1556 | atomic_inc(&num_sync_released); | 1545 | atomic_inc(&num_sync_released); |
1557 | raw_spin_unlock(&global_lock); | 1546 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1558 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1547 | //raw_spin_unlock(&state->lock); |
1559 | local_irq_restore(flags); | 1548 | //local_irq_restore(flags); |
1560 | raw_spin_unlock(&state->lock); | ||
1561 | } | 1549 | } |
1562 | 1550 | ||
1563 | 1551 | ||
@@ -1603,7 +1591,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1603 | 1591 | ||
1604 | local_irq_save(flags); | 1592 | local_irq_save(flags); |
1605 | raw_spin_lock(&state->lock); | 1593 | raw_spin_lock(&state->lock); |
1606 | 1594 | ||
1607 | if (lv == CRIT_LEVEL_C) { | 1595 | if (lv == CRIT_LEVEL_C) { |
1608 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 1596 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); |
1609 | raw_spin_lock(&global_lock); | 1597 | raw_spin_lock(&global_lock); |
@@ -1640,7 +1628,10 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1640 | } | 1628 | } |
1641 | /* NOTE: drops state->lock */ | 1629 | /* NOTE: drops state->lock */ |
1642 | TRACE("mc2_new()\n"); | 1630 | TRACE("mc2_new()\n"); |
1631 | raw_spin_unlock(&state->lock); | ||
1632 | local_irq_restore(flags); | ||
1643 | 1633 | ||
1634 | raw_spin_lock(&state->lock); | ||
1644 | mc2_update_timer_and_unlock(state); | 1635 | mc2_update_timer_and_unlock(state); |
1645 | } else { | 1636 | } else { |
1646 | if (lv == CRIT_LEVEL_C){ | 1637 | if (lv == CRIT_LEVEL_C){ |
@@ -1648,10 +1639,11 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1648 | raw_spin_unlock(&global_lock); | 1639 | raw_spin_unlock(&global_lock); |
1649 | } | 1640 | } |
1650 | raw_spin_unlock(&state->lock); | 1641 | raw_spin_unlock(&state->lock); |
1651 | //raw_spin_unlock(&_global_env.lock); | 1642 | local_irq_restore(flags); |
1652 | } | 1643 | } |
1653 | release = res->next_replenishment; | 1644 | release = res->next_replenishment; |
1654 | local_irq_restore(flags); | 1645 | |
1646 | //local_irq_restore(flags); | ||
1655 | 1647 | ||
1656 | if (!release) { | 1648 | if (!release) { |
1657 | /*TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | 1649 | /*TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); |
@@ -2331,6 +2323,7 @@ static long mc2_activate_plugin(void) | |||
2331 | ce->will_schedule = false; | 2323 | ce->will_schedule = false; |
2332 | 2324 | ||
2333 | raw_spin_lock_init(&state->lock); | 2325 | raw_spin_lock_init(&state->lock); |
2326 | printk(KERN_ALERT "CPU%d state->lock %p\n", cpu, &state->lock); | ||
2334 | state->cpu = cpu; | 2327 | state->cpu = cpu; |
2335 | state->scheduled = NULL; | 2328 | state->scheduled = NULL; |
2336 | //for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | 2329 | //for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { |
@@ -2376,6 +2369,7 @@ static void mc2_finish_switch(struct task_struct *prev) | |||
2376 | if (lv == CRIT_LEVEL_C) { | 2369 | if (lv == CRIT_LEVEL_C) { |
2377 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 2370 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
2378 | if (resched_cpu[cpus]) { | 2371 | if (resched_cpu[cpus]) { |
2372 | resched_cpu[cpus] = 0; | ||
2379 | litmus_reschedule(cpus); | 2373 | litmus_reschedule(cpus); |
2380 | } | 2374 | } |
2381 | } | 2375 | } |