diff options
| author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-27 15:47:22 -0400 |
|---|---|---|
| committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-27 15:47:22 -0400 |
| commit | a4d08351aacc56e79b03c85fc9bbcb03567309fa (patch) | |
| tree | 8f15cd5c23a86c72c7376c3da8248e4850629b7f /kernel/sched_edf_hsb.c | |
| parent | 2e4b97e2e722699fe2ce2699714887eac0ff84cc (diff) | |
cleanup: tidy up code before release2007.2
This gets rid of quite a few FIXMEs and TODOs, as well as some cruft.
Diffstat (limited to 'kernel/sched_edf_hsb.c')
| -rw-r--r-- | kernel/sched_edf_hsb.c | 81 |
1 files changed, 1 insertions, 80 deletions
diff --git a/kernel/sched_edf_hsb.c b/kernel/sched_edf_hsb.c index c2a4c6c679..a2f670d994 100644 --- a/kernel/sched_edf_hsb.c +++ b/kernel/sched_edf_hsb.c | |||
| @@ -1077,52 +1077,6 @@ static struct task_struct* null_heuristic(struct task_struct *prev, | |||
| 1077 | return NULL; | 1077 | return NULL; |
| 1078 | } | 1078 | } |
| 1079 | 1079 | ||
| 1080 | /*static struct task_struct* history_heuristic(struct task_struct *prev, rt_domain_t* edf) | ||
| 1081 | { | ||
| 1082 | struct list_head *pos; | ||
| 1083 | struct task_struct* tsk = NULL; | ||
| 1084 | struct task_struct* cur; | ||
| 1085 | |||
| 1086 | if (is_realtime(prev) && is_running(prev) && | ||
| 1087 | get_rt_flags(prev) != RT_F_SLEEP) | ||
| 1088 | tsk = prev; | ||
| 1089 | list_for_each(pos, &edf->ready_queue) { | ||
| 1090 | cur = list_entry(pos, struct task_struct, rt_list); | ||
| 1091 | if (!tsk || | ||
| 1092 | tsk->rt_param.stats.nontardy_jobs_ctr > | ||
| 1093 | cur->rt_param.stats.nontardy_jobs_ctr) | ||
| 1094 | tsk = cur; | ||
| 1095 | } | ||
| 1096 | if (tsk && tsk->rt_param.stats.nontardy_jobs_ctr < 5) | ||
| 1097 | return tsk; | ||
| 1098 | else | ||
| 1099 | return NULL; | ||
| 1100 | } | ||
| 1101 | */ | ||
| 1102 | /* TODO: write slack heuristic.*/ | ||
| 1103 | /*static struct task_struct* slack_heuristic(struct task_struct *prev, rt_domain_t* edf) | ||
| 1104 | { | ||
| 1105 | struct list_head *pos; | ||
| 1106 | struct task_struct* tsk = NULL; | ||
| 1107 | struct task_struct* cur; | ||
| 1108 | |||
| 1109 | if (is_realtime(prev) && is_running(prev) && | ||
| 1110 | get_rt_flags(prev) != RT_F_SLEEP) | ||
| 1111 | tsk = prev; | ||
| 1112 | list_for_each(pos, &edf->ready_queue) { | ||
| 1113 | cur = list_entry(pos, struct task_struct, rt_list); | ||
| 1114 | if (!tsk || | ||
| 1115 | tsk->rt_param.stats.nontardy_job_ctr > | ||
| 1116 | cur->rt_param.stats.nontardy_job_ctr) | ||
| 1117 | tsk = cur; | ||
| 1118 | } | ||
| 1119 | if (tsk && tsk->rt_param.stats.nontardy_job_ctr < 5) | ||
| 1120 | return tsk; | ||
| 1121 | else | ||
| 1122 | return NULL; | ||
| 1123 | }*/ | ||
| 1124 | |||
| 1125 | |||
| 1126 | /* caller holds all locks | 1080 | /* caller holds all locks |
| 1127 | */ | 1081 | */ |
| 1128 | 1082 | ||
| @@ -1229,7 +1183,7 @@ static int schedule_srt_be_cap(struct task_struct *prev, | |||
| 1229 | int deactivate = 1; | 1183 | int deactivate = 1; |
| 1230 | be_server_t* bes; | 1184 | be_server_t* bes; |
| 1231 | cpu_state_t* state; | 1185 | cpu_state_t* state; |
| 1232 | int type; /* FIXME: Initialize? */ | 1186 | int type = BG; |
| 1233 | 1187 | ||
| 1234 | reschedule: | 1188 | reschedule: |
| 1235 | write_lock_irqsave(&srt.ready_lock, flags); | 1189 | write_lock_irqsave(&srt.ready_lock, flags); |
| @@ -1531,9 +1485,6 @@ static void hsb_wake_up_task(struct task_struct *task) | |||
| 1531 | } | 1485 | } |
| 1532 | else if (task->time_slice) { | 1486 | else if (task->time_slice) { |
| 1533 | /* came back in time before deadline | 1487 | /* came back in time before deadline |
| 1534 | * TODO: clip budget to fit into period, otherwise it could | ||
| 1535 | * cause a deadline overrun in the next period, i.e. | ||
| 1536 | * over allocation in the next period. | ||
| 1537 | */ | 1488 | */ |
| 1538 | set_rt_flags(task, RT_F_RUNNING); | 1489 | set_rt_flags(task, RT_F_RUNNING); |
| 1539 | hsb_add_ready(task); | 1490 | hsb_add_ready(task); |
| @@ -1546,43 +1497,16 @@ static void hsb_wake_up_task(struct task_struct *task) | |||
| 1546 | 1497 | ||
| 1547 | static void hsb_task_blocks(struct task_struct *t) | 1498 | static void hsb_task_blocks(struct task_struct *t) |
| 1548 | { | 1499 | { |
| 1549 | /* CLEANUP: The BUG_ON actually triggerd in a really weierd case if a | ||
| 1550 | * BEST_EFFORT gets caught in a migration right after execv | ||
| 1551 | * The next version of Litmus should deal with this more gracefully. | ||
| 1552 | */ | ||
| 1553 | |||
| 1554 | /*BUG_ON(!is_realtime(t));*/ | ||
| 1555 | /* not really anything to do since it can only block if | 1500 | /* not really anything to do since it can only block if |
| 1556 | * it is running, and when it is not running it is not in any | 1501 | * it is running, and when it is not running it is not in any |
| 1557 | * queue anyway. | 1502 | * queue anyway. |
| 1558 | * | ||
| 1559 | * TODO: Check whether the assumption is correct for SIGKILL and | ||
| 1560 | * SIGSTOP. | ||
| 1561 | */ | 1503 | */ |
| 1562 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); | 1504 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); |
| 1563 | /*BUG_ON(t->rt_list.next != LIST_POISON1);*/ | ||
| 1564 | /*BUG_ON(t->rt_list.prev != LIST_POISON2);*/ | ||
| 1565 | |||
| 1566 | if (is_be(t)) | 1505 | if (is_be(t)) |
| 1567 | sched_trace_job_completion(t); | 1506 | sched_trace_job_completion(t); |
| 1568 | } | 1507 | } |
| 1569 | 1508 | ||
| 1570 | 1509 | ||
| 1571 | /* When _tear_down is called, the task should not be in any queue any more | ||
| 1572 | * as it must have blocked first. We don't have any internal state for the task, | ||
| 1573 | * it is all in the task_struct. | ||
| 1574 | */ | ||
| 1575 | static long hsb_tear_down(struct task_struct * t) | ||
| 1576 | { | ||
| 1577 | /* CLEANUP: see hsb_task_blocks */ | ||
| 1578 | /*BUG_ON(!is_realtime(t)); | ||
| 1579 | TRACE("edf-hsb: tear down called for %d \n", t->pid); | ||
| 1580 | BUG_ON(t->array); | ||
| 1581 | BUG_ON(t->rt_list.next != LIST_POISON1); | ||
| 1582 | BUG_ON(t->rt_list.prev != LIST_POISON2);*/ | ||
| 1583 | return 0; | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | static int hsb_mode_change(int new_mode) | 1510 | static int hsb_mode_change(int new_mode) |
| 1587 | { | 1511 | { |
| 1588 | int cpu; | 1512 | int cpu; |
| @@ -1769,8 +1693,6 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
| 1769 | .scheduler_tick = hsb_scheduler_tick,\ | 1693 | .scheduler_tick = hsb_scheduler_tick,\ |
| 1770 | .prepare_task = hsb_prepare_task,\ | 1694 | .prepare_task = hsb_prepare_task,\ |
| 1771 | .sleep_next_period = edf_sleep_next_period,\ | 1695 | .sleep_next_period = edf_sleep_next_period,\ |
| 1772 | .tear_down = hsb_tear_down,\ | ||
| 1773 | .shutdown_hook = 0,\ | ||
| 1774 | .schedule = hsb_schedule,\ | 1696 | .schedule = hsb_schedule,\ |
| 1775 | .finish_switch = hsb_finish_switch,\ | 1697 | .finish_switch = hsb_finish_switch,\ |
| 1776 | .mode_change = hsb_mode_change,\ | 1698 | .mode_change = hsb_mode_change,\ |
| @@ -1789,7 +1711,6 @@ sched_plugin_t *__init init_edf_hsb_plugin(void) | |||
| 1789 | capacity_queue_init(&cap_queue); | 1711 | capacity_queue_init(&cap_queue); |
| 1790 | edf_domain_init(&srt, srt_check_resched); | 1712 | edf_domain_init(&srt, srt_check_resched); |
| 1791 | edf_domain_init(&be, be_check_resched); | 1713 | edf_domain_init(&be, be_check_resched); |
| 1792 | /* TODO: Re-implement FIFO time slicing, was 50ms. */ | ||
| 1793 | fifo_domain_init(&hsb_fifo, NULL); | 1714 | fifo_domain_init(&hsb_fifo, NULL); |
| 1794 | for (i = 0; i < NR_CPUS; i++) | 1715 | for (i = 0; i < NR_CPUS; i++) |
| 1795 | { | 1716 | { |
