diff options
author | Joshua Bakita <jbakita@cs.unc.edu> | 2020-03-06 18:04:39 -0500 |
---|---|---|
committer | Joshua Bakita <jbakita@cs.unc.edu> | 2020-03-06 18:04:39 -0500 |
commit | a5849efb79030b7bdc11b3328f5a6b7367c28b7e (patch) | |
tree | 8373ecef95d32c19dfc61099e336d3ab2e949f79 | |
parent | 289013db51e6e7c802c092dba687963750407165 (diff) |
Fix edge cases in initialization and state tracking
Adds missing allocations in init and starts fixing some bad
tracking of container->scheduled in g_finish_switch(), plus
some fixes for migrating tasks and background scheduling.
-rw-r--r-- | litmus/sched_edfsc.c | 42 |
1 files changed, 29 insertions, 13 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 10ebf7c098f4..b7ae97d38267 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -73,6 +73,13 @@ u64 sys_util, future_sys_util; | |||
73 | #define from_fp(a) ((a) >> FP_SHIFT) | 73 | #define from_fp(a) ((a) >> FP_SHIFT) |
74 | #define fp_div(a, b) (to_fp((a)) / (b)) | 74 | #define fp_div(a, b) (to_fp((a)) / (b)) |
75 | 75 | ||
76 | // We need these from litmus.c for partially initializing our container tasks | ||
77 | struct release_heap* release_heap_alloc(int gfp_flags); | ||
78 | void release_heap_free(struct release_heap* rh); | ||
79 | struct bheap_node* bheap_node_alloc(int gfp_flags); | ||
80 | void bheap_node_free(struct bheap_node* hn); | ||
81 | |||
82 | |||
76 | /* Do a backwards comparison based on future_f_util so that heavier containers | 83 | /* Do a backwards comparison based on future_f_util so that heavier containers |
77 | * will come first | 84 | * will come first |
78 | */ | 85 | */ |
@@ -134,7 +141,7 @@ static void preempt(cpu_entry_t *entry) | |||
134 | */ | 141 | */ |
135 | 142 | ||
136 | static struct bheap_node* edfsc_cpu_heap_node; | 143 | static struct bheap_node* edfsc_cpu_heap_node; |
137 | static struct bheap edfsc_cpu_heap; | 144 | static struct bheap edfsc_cpu_heap; |
138 | 145 | ||
139 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 146 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) |
140 | { | 147 | { |
@@ -1204,6 +1211,7 @@ static void edfsc_setup_domain_proc(void) | |||
1204 | static long edfsc_activate_plugin(void) | 1211 | static long edfsc_activate_plugin(void) |
1205 | { | 1212 | { |
1206 | lt_t now; | 1213 | lt_t now; |
1214 | int i; | ||
1207 | now = litmus_clock(); | 1215 | now = litmus_clock(); |
1208 | /* TODO This will need to: | 1216 | /* TODO This will need to: |
1209 | * - Initialize the containers and container tasks | 1217 | * - Initialize the containers and container tasks |
@@ -1212,6 +1220,9 @@ static long edfsc_activate_plugin(void) | |||
1212 | * be reusable if we don't destroy them when the plugin is deactivated) | 1220 | * be reusable if we don't destroy them when the plugin is deactivated) |
1213 | * - ... | 1221 | * - ... |
1214 | */ | 1222 | */ |
1223 | for (i = 0; i < num_online_cpus(); i++) | ||
1224 | requeue(&container_tasks[i]); | ||
1225 | |||
1215 | // Start the container boundary timer | 1226 | // Start the container boundary timer |
1216 | hrtimer_start(&container_release_timer, | 1227 | hrtimer_start(&container_release_timer, |
1217 | ns_to_ktime(now + LITMUS_QUANTUM_LENGTH_NS), | 1228 | ns_to_ktime(now + LITMUS_QUANTUM_LENGTH_NS), |
@@ -1306,12 +1317,14 @@ static int __init init_edfsc(void) | |||
1306 | HRTIMER_MODE_ABS_PINNED); | 1317 | HRTIMER_MODE_ABS_PINNED); |
1307 | container_release_timer.function = container_boundary; | 1318 | container_release_timer.function = container_boundary; |
1308 | 1319 | ||
1309 | container_tasks = kmalloc(sizeof(struct task_struct) * num_online_cpus(), GFP_KERNEL); | 1320 | container_tasks = kmalloc(sizeof(struct task_struct) * num_online_cpus(), GFP_KERNEL); |
1310 | container_domains = kmalloc(sizeof(cont_domain_t) * num_online_cpus(), GFP_KERNEL); | 1321 | container_domains = kmalloc(sizeof(cont_domain_t) * num_online_cpus(), GFP_KERNEL); |
1311 | container_list = kmalloc(sizeof(cont_domain_t*) * num_online_cpus(), GFP_KERNEL); | 1322 | container_list = kmalloc(sizeof(cont_domain_t*) * num_online_cpus(), GFP_KERNEL); |
1312 | |||
1313 | edfsc_cpu_heap_node = kmalloc(sizeof(struct bheap_node) * num_online_cpus(), GFP_KERNEL); | 1323 | edfsc_cpu_heap_node = kmalloc(sizeof(struct bheap_node) * num_online_cpus(), GFP_KERNEL); |
1314 | 1324 | ||
1325 | memset(container_tasks, 0, sizeof(struct task_struct) * num_online_cpus()); | ||
1326 | memset(container_domains, 0, sizeof(cont_domain_t) * num_online_cpus()); | ||
1327 | |||
1315 | // Initialize container domains | 1328 | // Initialize container domains |
1316 | for (i = 0; i < num_online_cpus(); i++) { | 1329 | for (i = 0; i < num_online_cpus(); i++) { |
1317 | edf_domain_init(&container_domains[i].domain, c_check_resched, NULL); | 1330 | edf_domain_init(&container_domains[i].domain, c_check_resched, NULL); |
@@ -1320,7 +1333,7 @@ static int __init init_edfsc(void) | |||
1320 | hrtimer_init(&(container_domains[i].idle_enforcement_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 1333 | hrtimer_init(&(container_domains[i].idle_enforcement_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1321 | container_domains[i].idle_enforcement_timer.function = on_idle_enforcement_timeout; | 1334 | container_domains[i].idle_enforcement_timer.function = on_idle_enforcement_timeout; |
1322 | 1335 | ||
1323 | container_tasks[i].policy = SCHED_LITMUS; | 1336 | |
1324 | tsk_rt(&container_tasks[i])->scheduled_on = NO_CPU; | 1337 | tsk_rt(&container_tasks[i])->scheduled_on = NO_CPU; |
1325 | tsk_rt(&container_tasks[i])->task_params.exec_cost = LITMUS_QUANTUM_LENGTH_NS / 2; | 1338 | tsk_rt(&container_tasks[i])->task_params.exec_cost = LITMUS_QUANTUM_LENGTH_NS / 2; |
1326 | tsk_rt(&container_tasks[i])->task_params.period = | 1339 | tsk_rt(&container_tasks[i])->task_params.period = |
@@ -1332,15 +1345,19 @@ static int __init init_edfsc(void) | |||
1332 | tsk_rt(&container_tasks[i])->domain = &gsched_domain; | 1345 | tsk_rt(&container_tasks[i])->domain = &gsched_domain; |
1333 | tsk_rt(&container_tasks[i])->edfsc_params.domain = &container_domains[i]; | 1346 | tsk_rt(&container_tasks[i])->edfsc_params.domain = &container_domains[i]; |
1334 | tsk_rt(&container_tasks[i])->edfsc_params.can_release = 0; | 1347 | tsk_rt(&container_tasks[i])->edfsc_params.can_release = 0; |
1335 | |||
1336 | tsk_rt(&container_tasks[i])->sporadic_release = 0; | 1348 | tsk_rt(&container_tasks[i])->sporadic_release = 0; |
1337 | |||
1338 | tsk_rt(&container_tasks[i])->heap_node = kmalloc(sizeof(struct bheap_node), GFP_KERNEL); | ||
1339 | bheap_node_init(&tsk_rt(&container_tasks[i])->heap_node, &container_tasks[i]); | ||
1340 | requeue(&container_tasks[i]); | ||
1341 | |||
1342 | tsk_rt(&container_tasks[i])->edfsc_params.id = i; | 1349 | tsk_rt(&container_tasks[i])->edfsc_params.id = i; |
1343 | // TODO maybe more to do? | 1350 | tsk_rt(&container_tasks[i])->heap_node = bheap_node_alloc(GFP_ATOMIC); |
1351 | tsk_rt(&container_tasks[i])->rel_heap = release_heap_alloc(GFP_ATOMIC); | ||
1352 | |||
1353 | if (!tsk_rt(&container_tasks[i])->heap_node || !tsk_rt(&container_tasks[i])->rel_heap) { | ||
1354 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | ||
1355 | return -ENOMEM; | ||
1356 | } else { | ||
1357 | bheap_node_init(&tsk_rt(&container_tasks[i])->heap_node, &container_tasks[i]); | ||
1358 | } | ||
1359 | |||
1360 | container_tasks[i].policy = SCHED_LITMUS; | ||
1344 | 1361 | ||
1345 | // Populate the container_list while we're at it. | 1362 | // Populate the container_list while we're at it. |
1346 | container_list[i] = &container_domains[i]; | 1363 | container_list[i] = &container_domains[i]; |
@@ -1355,7 +1372,6 @@ static int __init init_edfsc(void) | |||
1355 | entry->scheduled = NULL; | 1372 | entry->scheduled = NULL; |
1356 | } | 1373 | } |
1357 | 1374 | ||
1358 | |||
1359 | return register_sched_plugin(&edfsc_plugin); | 1375 | return register_sched_plugin(&edfsc_plugin); |
1360 | } | 1376 | } |
1361 | 1377 | ||