diff options
| -rw-r--r-- | include/linux/litmus.h | 23 | ||||
| -rw-r--r-- | include/linux/pfair_math.h | 3 | ||||
| -rw-r--r-- | include/linux/sched_plugin.h | 8 | ||||
| -rw-r--r-- | kernel/litmus.c | 15 | ||||
| -rw-r--r-- | kernel/sched.c | 11 | ||||
| -rw-r--r-- | kernel/sched_adaptive.c | 4 | ||||
| -rw-r--r-- | kernel/sched_edf_hsb.c | 81 | ||||
| -rw-r--r-- | kernel/sched_global_edf.c | 12 | ||||
| -rw-r--r-- | kernel/sched_part_edf.c | 11 | ||||
| -rw-r--r-- | kernel/sched_pfair.c | 2 | ||||
| -rw-r--r-- | kernel/sched_plugin.c | 53 | ||||
| -rw-r--r-- | kernel/sched_psn_edf.c | 29 |
12 files changed, 22 insertions, 230 deletions
diff --git a/include/linux/litmus.h b/include/linux/litmus.h index dc39ecbf7e..259594e922 100644 --- a/include/linux/litmus.h +++ b/include/linux/litmus.h | |||
| @@ -35,7 +35,6 @@ typedef enum { | |||
| 35 | typedef enum { | 35 | typedef enum { |
| 36 | LITMUS_RESERVED_RANGE = 1024, | 36 | LITMUS_RESERVED_RANGE = 1024, |
| 37 | 37 | ||
| 38 | ENABLE_WEIGHT_CHANGE_SIGNAL | ||
| 39 | } sched_setup_cmd_t; | 38 | } sched_setup_cmd_t; |
| 40 | 39 | ||
| 41 | /* Runtime modes */ | 40 | /* Runtime modes */ |
| @@ -49,30 +48,14 @@ enum rt_mode_t { | |||
| 49 | #define PLUGIN_PFAIR "pfair" | 48 | #define PLUGIN_PFAIR "pfair" |
| 50 | #define PLUGIN_PART_EDF "part_edf" | 49 | #define PLUGIN_PART_EDF "part_edf" |
| 51 | #define PLUGIN_GLOBAL_EDF "global_edf" | 50 | #define PLUGIN_GLOBAL_EDF "global_edf" |
| 52 | #define PLUGIN_PFAIR_STAGGER "stagger" | ||
| 53 | #define PLUGIN_PFAIR_DESYNC "desync" | ||
| 54 | #define PLUGIN_GLOBAL_EDF_NP "global_edf_np" | 51 | #define PLUGIN_GLOBAL_EDF_NP "global_edf_np" |
| 55 | #define PLUGIN_EDF_HSB "edf_hsb" | 52 | #define PLUGIN_EDF_HSB "edf_hsb" |
| 56 | #define PLUGIN_GSN_EDF "gsn_edf" | 53 | #define PLUGIN_GSN_EDF "gsn_edf" |
| 57 | #define PLUGIN_PSN_EDF "psn_edf" | 54 | #define PLUGIN_PSN_EDF "psn_edf" |
| 58 | #define PLUGIN_ADAPTIVE "adaptive" | 55 | #define PLUGIN_ADAPTIVE "adaptive" |
| 59 | 56 | ||
| 60 | /* Additional clone flags | ||
| 61 | Indicates that the thread is to be used in | ||
| 62 | realtime mode, therefore it should not be | ||
| 63 | woken up in a linux manner, | ||
| 64 | we just set its state to TASK_STOPPED | ||
| 65 | It must be prepared and added to the ready queue explicitly | ||
| 66 | */ | ||
| 67 | |||
| 68 | /* Type definition for our quantums */ | ||
| 69 | typedef unsigned long long quantum_t; | ||
| 70 | |||
| 71 | extern spolicy sched_policy; | 57 | extern spolicy sched_policy; |
| 72 | 58 | ||
| 73 | extern unsigned long slot_size; | ||
| 74 | extern unsigned long stagger_offset; | ||
| 75 | |||
| 76 | /* RT mode start time */ | 59 | /* RT mode start time */ |
| 77 | extern volatile unsigned long rt_start_time; | 60 | extern volatile unsigned long rt_start_time; |
| 78 | 61 | ||
| @@ -82,10 +65,6 @@ extern atomic_t rt_mode; | |||
| 82 | #define get_rt_mode() (atomic_read(&rt_mode)) | 65 | #define get_rt_mode() (atomic_read(&rt_mode)) |
| 83 | #define set_rt_mode(a) atomic_set(&rt_mode,(a)) | 66 | #define set_rt_mode(a) atomic_set(&rt_mode,(a)) |
| 84 | 67 | ||
| 85 | /* CLEANUP: Should be queue_lock, does it really belong here? */ | ||
| 86 | extern spinlock_t litmus_task_set_lock; | ||
| 87 | |||
| 88 | |||
| 89 | #define TRACE(fmt, args...) \ | 68 | #define TRACE(fmt, args...) \ |
| 90 | sched_trace_log_message("%d: " fmt, raw_smp_processor_id(), ## args) | 69 | sched_trace_log_message("%d: " fmt, raw_smp_processor_id(), ## args) |
| 91 | 70 | ||
| @@ -99,7 +78,7 @@ extern spinlock_t litmus_task_set_lock; | |||
| 99 | do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ | 78 | do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ |
| 100 | "called from %p current=%s/%d state=%d " \ | 79 | "called from %p current=%s/%d state=%d " \ |
| 101 | "flags=%x mode=%d partition=%d cpu=%d rtflags=%d"\ | 80 | "flags=%x mode=%d partition=%d cpu=%d rtflags=%d"\ |
| 102 | " job=%u knp=%d timeslice=%u\n", \ | 81 | " job=%u knp=%d timeslice=%u\n", \ |
| 103 | #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ | 82 | #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ |
| 104 | current->pid, current->state, current->flags, get_rt_mode(), \ | 83 | current->pid, current->state, current->flags, get_rt_mode(), \ |
| 105 | get_partition(current), smp_processor_id(), get_rt_flags(current), \ | 84 | get_partition(current), smp_processor_id(), get_rt_flags(current), \ |
diff --git a/include/linux/pfair_math.h b/include/linux/pfair_math.h index dab1778f0b..b2a14e4c54 100644 --- a/include/linux/pfair_math.h +++ b/include/linux/pfair_math.h | |||
| @@ -7,6 +7,9 @@ | |||
| 7 | #include <linux/litmus.h> | 7 | #include <linux/litmus.h> |
| 8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
| 9 | 9 | ||
| 10 | /* Type definition for our quantums */ | ||
| 11 | typedef unsigned long long quantum_t; | ||
| 12 | |||
| 10 | /* | 13 | /* |
| 11 | * This file defines mathematical functions "ceiling", "floor", | 14 | * This file defines mathematical functions "ceiling", "floor", |
| 12 | * and PFAIR specific functions for computing the release and | 15 | * and PFAIR specific functions for computing the release and |
diff --git a/include/linux/sched_plugin.h b/include/linux/sched_plugin.h index fa0716f0eb..1ea8178b25 100644 --- a/include/linux/sched_plugin.h +++ b/include/linux/sched_plugin.h | |||
| @@ -33,13 +33,6 @@ struct pi_semaphore { | |||
| 33 | */ | 33 | */ |
| 34 | typedef void runqueue_t; | 34 | typedef void runqueue_t; |
| 35 | 35 | ||
| 36 | /********************* real-time callbacks ********************/ | ||
| 37 | |||
| 38 | /* Special plugin shutdown hook that clear plugin data structures | ||
| 39 | Currently is not supported | ||
| 40 | */ | ||
| 41 | typedef void (*plugin_shutdown_hook_t) (void); | ||
| 42 | |||
| 43 | 36 | ||
| 44 | /********************* scheduler invocation ******************/ | 37 | /********************* scheduler invocation ******************/ |
| 45 | 38 | ||
| @@ -105,7 +98,6 @@ struct sched_plugin { | |||
| 105 | int ready_to_use; | 98 | int ready_to_use; |
| 106 | 99 | ||
| 107 | /* management interface */ | 100 | /* management interface */ |
| 108 | plugin_shutdown_hook_t shutdown_hook; /*currently unsupported */ | ||
| 109 | mode_change_t mode_change; | 101 | mode_change_t mode_change; |
| 110 | 102 | ||
| 111 | /* scheduler invocation */ | 103 | /* scheduler invocation */ |
diff --git a/kernel/litmus.c b/kernel/litmus.c index ee1e09a627..8f238ba979 100644 --- a/kernel/litmus.c +++ b/kernel/litmus.c | |||
| @@ -19,25 +19,22 @@ | |||
| 19 | spolicy sched_policy = SCHED_DEFAULT; | 19 | spolicy sched_policy = SCHED_DEFAULT; |
| 20 | int sched_options = 0; | 20 | int sched_options = 0; |
| 21 | 21 | ||
| 22 | /* avoid races with multiple task wake-ups */ | ||
| 23 | DEFINE_SPINLOCK(litmus_task_set_lock); | ||
| 24 | 22 | ||
| 25 | /* This is a flag for switching the system into RT mode when it is booted up | 23 | /* This is a flag for switching the system into RT mode when it is booted up |
| 26 | * In RT-mode non-realtime tasks are shut down and scheduled as spare | 24 | * In RT-mode non-realtime tasks are scheduled as background tasks. |
| 27 | * time available | ||
| 28 | */ | 25 | */ |
| 29 | 26 | ||
| 30 | /* The system is booting in non-realtime mode */ | 27 | /* The system is booting in non-realtime mode */ |
| 31 | atomic_t rt_mode = ATOMIC_INIT(MODE_NON_RT); | 28 | atomic_t rt_mode = ATOMIC_INIT(MODE_NON_RT); |
| 32 | /* Here we specify a mode change to be made */ | 29 | /* Here we specify a mode change to be made */ |
| 33 | atomic_t new_mode = ATOMIC_INIT(MODE_NON_RT); | 30 | atomic_t new_mode = ATOMIC_INIT(MODE_NON_RT); |
| 34 | /* Number of RT tasks that exist in the system */ | 31 | /* Number of RT tasks that exist in the system */ |
| 35 | atomic_t n_rt_tasks = ATOMIC_INIT(0); | 32 | atomic_t n_rt_tasks = ATOMIC_INIT(0); |
| 36 | 33 | ||
| 37 | /* Only one process can perform mode change */ | 34 | /* Only one CPU may perform a mode change. */ |
| 38 | static queuelock_t mode_change_lock; | 35 | static queuelock_t mode_change_lock; |
| 39 | 36 | ||
| 40 | /* A time instant when we switched to RT mode */ | 37 | /* The time instant when we switched to RT mode */ |
| 41 | volatile jiffie_t rt_start_time = 0; | 38 | volatile jiffie_t rt_start_time = 0; |
| 42 | 39 | ||
| 43 | /* To send signals from the scheduler | 40 | /* To send signals from the scheduler |
diff --git a/kernel/sched.c b/kernel/sched.c index d7fd42a431..5ad4276e4b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -61,9 +61,11 @@ | |||
| 61 | #include <linux/sched_plugin.h> | 61 | #include <linux/sched_plugin.h> |
| 62 | #include <linux/sched_trace.h> | 62 | #include <linux/sched_trace.h> |
| 63 | #include <linux/rt_param.h> | 63 | #include <linux/rt_param.h> |
| 64 | |||
| 65 | #include <linux/trace.h> | 64 | #include <linux/trace.h> |
| 66 | 65 | ||
| 66 | /* LITMUS: avoid races with multiple task wake-ups */ | ||
| 67 | DEFINE_SPINLOCK(litmus_task_set_lock); | ||
| 68 | |||
| 67 | /* | 69 | /* |
| 68 | * Convert user-nice values [ -20 ... 0 ... 19 ] | 70 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
| 69 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], | 71 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
| @@ -1693,7 +1695,6 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
| 1693 | 1695 | ||
| 1694 | if (clone_flags & CLONE_REALTIME) { | 1696 | if (clone_flags & CLONE_REALTIME) { |
| 1695 | /* just mark the task as stopped */ | 1697 | /* just mark the task as stopped */ |
| 1696 | /* CLEANUP: Do we have to remove the task from the rq? */ | ||
| 1697 | p->state = TASK_STOPPED; | 1698 | p->state = TASK_STOPPED; |
| 1698 | return; | 1699 | return; |
| 1699 | } | 1700 | } |
| @@ -5541,7 +5542,7 @@ static struct notifier_block __cpuinitdata migration_notifier = { | |||
| 5541 | .priority = 10 | 5542 | .priority = 10 |
| 5542 | }; | 5543 | }; |
| 5543 | 5544 | ||
| 5544 | int __init linux_migration_init(void) | 5545 | int __init migration_init(void) |
| 5545 | { | 5546 | { |
| 5546 | void *cpu = (void *)(long)smp_processor_id(); | 5547 | void *cpu = (void *)(long)smp_processor_id(); |
| 5547 | int err; | 5548 | int err; |
| @@ -7003,7 +7004,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
| 7003 | return NOTIFY_OK; | 7004 | return NOTIFY_OK; |
| 7004 | } | 7005 | } |
| 7005 | 7006 | ||
| 7006 | void __init linux_sched_init_smp(void) | 7007 | void __init sched_init_smp(void) |
| 7007 | { | 7008 | { |
| 7008 | cpumask_t non_isolated_cpus; | 7009 | cpumask_t non_isolated_cpus; |
| 7009 | 7010 | ||
| @@ -7036,7 +7037,7 @@ int in_sched_functions(unsigned long addr) | |||
| 7036 | && addr < (unsigned long)__sched_text_end); | 7037 | && addr < (unsigned long)__sched_text_end); |
| 7037 | } | 7038 | } |
| 7038 | 7039 | ||
| 7039 | void __init linux_sched_init(void) | 7040 | void __init sched_init(void) |
| 7040 | { | 7041 | { |
| 7041 | int i, j, k; | 7042 | int i, j, k; |
| 7042 | 7043 | ||
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c index 97d06b1ad4..319ebbc11c 100644 --- a/kernel/sched_adaptive.c +++ b/kernel/sched_adaptive.c | |||
| @@ -838,8 +838,6 @@ static noinline void preempt(cpu_entry_t *entry) | |||
| 838 | } else | 838 | } else |
| 839 | /* in case that it is a remote CPU we have to defer the | 839 | /* in case that it is a remote CPU we have to defer the |
| 840 | * the decision to the remote CPU | 840 | * the decision to the remote CPU |
| 841 | * FIXME: We could save a few IPI's here if we leave the flag | ||
| 842 | * set when we are waiting for a np_exit(). | ||
| 843 | */ | 841 | */ |
| 844 | if (!test_will_schedule(entry->cpu)) | 842 | if (!test_will_schedule(entry->cpu)) |
| 845 | smp_send_reschedule(entry->cpu); | 843 | smp_send_reschedule(entry->cpu); |
| @@ -1432,7 +1430,7 @@ sched_plugin_t *__init init_adaptive_plugin(void) | |||
| 1432 | fc_b = _frac( 303, 1000); | 1430 | fc_b = _frac( 303, 1000); |
| 1433 | 1431 | ||
| 1434 | optimizer_period = 1000; | 1432 | optimizer_period = 1000; |
| 1435 | optimizer_min_invocation_sep = 50; | 1433 | optimizer_min_invocation_sep = 200; |
| 1436 | task_error_threshold = _frac(1, 2); | 1434 | task_error_threshold = _frac(1, 2); |
| 1437 | 1435 | ||
| 1438 | if (!s_plugin.ready_to_use) | 1436 | if (!s_plugin.ready_to_use) |
diff --git a/kernel/sched_edf_hsb.c b/kernel/sched_edf_hsb.c index c2a4c6c679..a2f670d994 100644 --- a/kernel/sched_edf_hsb.c +++ b/kernel/sched_edf_hsb.c | |||
| @@ -1077,52 +1077,6 @@ static struct task_struct* null_heuristic(struct task_struct *prev, | |||
| 1077 | return NULL; | 1077 | return NULL; |
| 1078 | } | 1078 | } |
| 1079 | 1079 | ||
| 1080 | /*static struct task_struct* history_heuristic(struct task_struct *prev, rt_domain_t* edf) | ||
| 1081 | { | ||
| 1082 | struct list_head *pos; | ||
| 1083 | struct task_struct* tsk = NULL; | ||
| 1084 | struct task_struct* cur; | ||
| 1085 | |||
| 1086 | if (is_realtime(prev) && is_running(prev) && | ||
| 1087 | get_rt_flags(prev) != RT_F_SLEEP) | ||
| 1088 | tsk = prev; | ||
| 1089 | list_for_each(pos, &edf->ready_queue) { | ||
| 1090 | cur = list_entry(pos, struct task_struct, rt_list); | ||
| 1091 | if (!tsk || | ||
| 1092 | tsk->rt_param.stats.nontardy_jobs_ctr > | ||
| 1093 | cur->rt_param.stats.nontardy_jobs_ctr) | ||
| 1094 | tsk = cur; | ||
| 1095 | } | ||
| 1096 | if (tsk && tsk->rt_param.stats.nontardy_jobs_ctr < 5) | ||
| 1097 | return tsk; | ||
| 1098 | else | ||
| 1099 | return NULL; | ||
| 1100 | } | ||
| 1101 | */ | ||
| 1102 | /* TODO: write slack heuristic.*/ | ||
| 1103 | /*static struct task_struct* slack_heuristic(struct task_struct *prev, rt_domain_t* edf) | ||
| 1104 | { | ||
| 1105 | struct list_head *pos; | ||
| 1106 | struct task_struct* tsk = NULL; | ||
| 1107 | struct task_struct* cur; | ||
| 1108 | |||
| 1109 | if (is_realtime(prev) && is_running(prev) && | ||
| 1110 | get_rt_flags(prev) != RT_F_SLEEP) | ||
| 1111 | tsk = prev; | ||
| 1112 | list_for_each(pos, &edf->ready_queue) { | ||
| 1113 | cur = list_entry(pos, struct task_struct, rt_list); | ||
| 1114 | if (!tsk || | ||
| 1115 | tsk->rt_param.stats.nontardy_job_ctr > | ||
| 1116 | cur->rt_param.stats.nontardy_job_ctr) | ||
| 1117 | tsk = cur; | ||
| 1118 | } | ||
| 1119 | if (tsk && tsk->rt_param.stats.nontardy_job_ctr < 5) | ||
| 1120 | return tsk; | ||
| 1121 | else | ||
| 1122 | return NULL; | ||
| 1123 | }*/ | ||
| 1124 | |||
| 1125 | |||
| 1126 | /* caller holds all locks | 1080 | /* caller holds all locks |
| 1127 | */ | 1081 | */ |
| 1128 | 1082 | ||
| @@ -1229,7 +1183,7 @@ static int schedule_srt_be_cap(struct task_struct *prev, | |||
| 1229 | int deactivate = 1; | 1183 | int deactivate = 1; |
| 1230 | be_server_t* bes; | 1184 | be_server_t* bes; |
| 1231 | cpu_state_t* state; | 1185 | cpu_state_t* state; |
| 1232 | int type; /* FIXME: Initialize? */ | 1186 | int type = BG; |
| 1233 | 1187 | ||
| 1234 | reschedule: | 1188 | reschedule: |
| 1235 | write_lock_irqsave(&srt.ready_lock, flags); | 1189 | write_lock_irqsave(&srt.ready_lock, flags); |
| @@ -1531,9 +1485,6 @@ static void hsb_wake_up_task(struct task_struct *task) | |||
| 1531 | } | 1485 | } |
| 1532 | else if (task->time_slice) { | 1486 | else if (task->time_slice) { |
| 1533 | /* came back in time before deadline | 1487 | /* came back in time before deadline |
| 1534 | * TODO: clip budget to fit into period, otherwise it could | ||
| 1535 | * cause a deadline overrun in the next period, i.e. | ||
| 1536 | * over allocation in the next period. | ||
| 1537 | */ | 1488 | */ |
| 1538 | set_rt_flags(task, RT_F_RUNNING); | 1489 | set_rt_flags(task, RT_F_RUNNING); |
| 1539 | hsb_add_ready(task); | 1490 | hsb_add_ready(task); |
| @@ -1546,43 +1497,16 @@ static void hsb_wake_up_task(struct task_struct *task) | |||
| 1546 | 1497 | ||
| 1547 | static void hsb_task_blocks(struct task_struct *t) | 1498 | static void hsb_task_blocks(struct task_struct *t) |
| 1548 | { | 1499 | { |
| 1549 | /* CLEANUP: The BUG_ON actually triggerd in a really weierd case if a | ||
| 1550 | * BEST_EFFORT gets caught in a migration right after execv | ||
| 1551 | * The next version of Litmus should deal with this more gracefully. | ||
| 1552 | */ | ||
| 1553 | |||
| 1554 | /*BUG_ON(!is_realtime(t));*/ | ||
| 1555 | /* not really anything to do since it can only block if | 1500 | /* not really anything to do since it can only block if |
| 1556 | * it is running, and when it is not running it is not in any | 1501 | * it is running, and when it is not running it is not in any |
| 1557 | * queue anyway. | 1502 | * queue anyway. |
| 1558 | * | ||
| 1559 | * TODO: Check whether the assumption is correct for SIGKILL and | ||
| 1560 | * SIGSTOP. | ||
| 1561 | */ | 1503 | */ |
| 1562 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); | 1504 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); |
| 1563 | /*BUG_ON(t->rt_list.next != LIST_POISON1);*/ | ||
| 1564 | /*BUG_ON(t->rt_list.prev != LIST_POISON2);*/ | ||
| 1565 | |||
| 1566 | if (is_be(t)) | 1505 | if (is_be(t)) |
| 1567 | sched_trace_job_completion(t); | 1506 | sched_trace_job_completion(t); |
| 1568 | } | 1507 | } |
| 1569 | 1508 | ||
| 1570 | 1509 | ||
| 1571 | /* When _tear_down is called, the task should not be in any queue any more | ||
| 1572 | * as it must have blocked first. We don't have any internal state for the task, | ||
| 1573 | * it is all in the task_struct. | ||
| 1574 | */ | ||
| 1575 | static long hsb_tear_down(struct task_struct * t) | ||
| 1576 | { | ||
| 1577 | /* CLEANUP: see hsb_task_blocks */ | ||
| 1578 | /*BUG_ON(!is_realtime(t)); | ||
| 1579 | TRACE("edf-hsb: tear down called for %d \n", t->pid); | ||
| 1580 | BUG_ON(t->array); | ||
| 1581 | BUG_ON(t->rt_list.next != LIST_POISON1); | ||
| 1582 | BUG_ON(t->rt_list.prev != LIST_POISON2);*/ | ||
| 1583 | return 0; | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | static int hsb_mode_change(int new_mode) | 1510 | static int hsb_mode_change(int new_mode) |
| 1587 | { | 1511 | { |
| 1588 | int cpu; | 1512 | int cpu; |
| @@ -1769,8 +1693,6 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
| 1769 | .scheduler_tick = hsb_scheduler_tick,\ | 1693 | .scheduler_tick = hsb_scheduler_tick,\ |
| 1770 | .prepare_task = hsb_prepare_task,\ | 1694 | .prepare_task = hsb_prepare_task,\ |
| 1771 | .sleep_next_period = edf_sleep_next_period,\ | 1695 | .sleep_next_period = edf_sleep_next_period,\ |
| 1772 | .tear_down = hsb_tear_down,\ | ||
| 1773 | .shutdown_hook = 0,\ | ||
| 1774 | .schedule = hsb_schedule,\ | 1696 | .schedule = hsb_schedule,\ |
| 1775 | .finish_switch = hsb_finish_switch,\ | 1697 | .finish_switch = hsb_finish_switch,\ |
| 1776 | .mode_change = hsb_mode_change,\ | 1698 | .mode_change = hsb_mode_change,\ |
| @@ -1789,7 +1711,6 @@ sched_plugin_t *__init init_edf_hsb_plugin(void) | |||
| 1789 | capacity_queue_init(&cap_queue); | 1711 | capacity_queue_init(&cap_queue); |
| 1790 | edf_domain_init(&srt, srt_check_resched); | 1712 | edf_domain_init(&srt, srt_check_resched); |
| 1791 | edf_domain_init(&be, be_check_resched); | 1713 | edf_domain_init(&be, be_check_resched); |
| 1792 | /* TODO: Re-implement FIFO time slicing, was 50ms. */ | ||
| 1793 | fifo_domain_init(&hsb_fifo, NULL); | 1714 | fifo_domain_init(&hsb_fifo, NULL); |
| 1794 | for (i = 0; i < NR_CPUS; i++) | 1715 | for (i = 0; i < NR_CPUS; i++) |
| 1795 | { | 1716 | { |
diff --git a/kernel/sched_global_edf.c b/kernel/sched_global_edf.c index 6e3928b7df..4b36bc5a48 100644 --- a/kernel/sched_global_edf.c +++ b/kernel/sched_global_edf.c | |||
| @@ -64,7 +64,6 @@ static void adjust_cpu_queue(int exec_rt, jiffie_t deadline) | |||
| 64 | entry->executes_realtime = exec_rt; | 64 | entry->executes_realtime = exec_rt; |
| 65 | entry->cur_deadline = deadline; | 65 | entry->cur_deadline = deadline; |
| 66 | 66 | ||
| 67 | /* TODO: move instead of del+reinsert */ | ||
| 68 | list_del(&entry->list); | 67 | list_del(&entry->list); |
| 69 | /* if we do not execute real-time jobs we just move | 68 | /* if we do not execute real-time jobs we just move |
| 70 | * to the end of the queue | 69 | * to the end of the queue |
| @@ -94,10 +93,6 @@ static void adjust_cpu_queue(int exec_rt, jiffie_t deadline) | |||
| 94 | * kick the next if necessary, and so on. The caller is responsible for making | 93 | * kick the next if necessary, and so on. The caller is responsible for making |
| 95 | * sure that it is not the last entry or that a reschedule is not necessary. | 94 | * sure that it is not the last entry or that a reschedule is not necessary. |
| 96 | * | 95 | * |
| 97 | * TODO: This function is probably way too trigger happy. It should only send | ||
| 98 | * IPIs if the other CPU is not going to reschedule anyway. But that is | ||
| 99 | * hard to detect reliably. Too many schedules will hurt performance | ||
| 100 | * but do not cause incorrect schedules. | ||
| 101 | */ | 96 | */ |
| 102 | static int gedf_check_resched(rt_domain_t *edf) | 97 | static int gedf_check_resched(rt_domain_t *edf) |
| 103 | { | 98 | { |
| @@ -336,9 +331,6 @@ static void gedf_wake_up_task(struct task_struct *task) | |||
| 336 | } | 331 | } |
| 337 | else if (task->time_slice) { | 332 | else if (task->time_slice) { |
| 338 | /* came back in time before deadline | 333 | /* came back in time before deadline |
| 339 | * TODO: clip budget to fit into period, otherwise it could | ||
| 340 | * cause a deadline overrun in the next period, i.e. | ||
| 341 | * over allocation in the next period. | ||
| 342 | */ | 334 | */ |
| 343 | set_rt_flags(task, RT_F_RUNNING); | 335 | set_rt_flags(task, RT_F_RUNNING); |
| 344 | add_ready(&gedf, task); | 336 | add_ready(&gedf, task); |
| @@ -356,8 +348,6 @@ static void gedf_task_blocks(struct task_struct *t) | |||
| 356 | * it is running, and when it is not running it is not in any | 348 | * it is running, and when it is not running it is not in any |
| 357 | * queue anyway. | 349 | * queue anyway. |
| 358 | * | 350 | * |
| 359 | * TODO: Check whether the assumption is correct for SIGKILL and | ||
| 360 | * SIGSTOP. | ||
| 361 | */ | 351 | */ |
| 362 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); | 352 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); |
| 363 | BUG_ON(t->rt_list.next != LIST_POISON1); | 353 | BUG_ON(t->rt_list.next != LIST_POISON1); |
| @@ -433,7 +423,6 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
| 433 | .prepare_task = gedf_prepare_task,\ | 423 | .prepare_task = gedf_prepare_task,\ |
| 434 | .sleep_next_period = edf_sleep_next_period,\ | 424 | .sleep_next_period = edf_sleep_next_period,\ |
| 435 | .tear_down = gedf_tear_down,\ | 425 | .tear_down = gedf_tear_down,\ |
| 436 | .shutdown_hook = 0,\ | ||
| 437 | .schedule = gedf_schedule,\ | 426 | .schedule = gedf_schedule,\ |
| 438 | .finish_switch = gedf_finish_switch,\ | 427 | .finish_switch = gedf_finish_switch,\ |
| 439 | .mode_change = gedf_mode_change,\ | 428 | .mode_change = gedf_mode_change,\ |
| @@ -538,7 +527,6 @@ static int gedf_np_check_resched(rt_domain_t *edf) | |||
| 538 | .prepare_task = gedf_prepare_task,\ | 527 | .prepare_task = gedf_prepare_task,\ |
| 539 | .sleep_next_period = edf_sleep_next_period,\ | 528 | .sleep_next_period = edf_sleep_next_period,\ |
| 540 | .tear_down = gedf_tear_down,\ | 529 | .tear_down = gedf_tear_down,\ |
| 541 | .shutdown_hook = 0,\ | ||
| 542 | .schedule = gedf_schedule,\ | 530 | .schedule = gedf_schedule,\ |
| 543 | .finish_switch = gedf_finish_switch,\ | 531 | .finish_switch = gedf_finish_switch,\ |
| 544 | .mode_change = gedf_mode_change,\ | 532 | .mode_change = gedf_mode_change,\ |
diff --git a/kernel/sched_part_edf.c b/kernel/sched_part_edf.c index 8166c8f4c6..a792ac5d03 100644 --- a/kernel/sched_part_edf.c +++ b/kernel/sched_part_edf.c | |||
| @@ -241,10 +241,10 @@ static void part_edf_wake_up_task(struct task_struct *task) | |||
| 241 | add_ready(edf, task); | 241 | add_ready(edf, task); |
| 242 | 242 | ||
| 243 | } else if (task->time_slice) { | 243 | } else if (task->time_slice) { |
| 244 | /* came back in time before deadline | 244 | /* Came back in time before deadline. This may cause |
| 245 | * TODO: clip budget to fit into period, otherwise it could | 245 | * deadline overruns, but since we don't handle suspensions |
| 246 | * cause a deadline overrun in the next period, i.e. | 246 | * in the analytical model, we don't care since we can't |
| 247 | * over allocation in the next period. | 247 | * guarantee anything at all if tasks block. |
| 248 | */ | 248 | */ |
| 249 | set_rt_flags(task, RT_F_RUNNING); | 249 | set_rt_flags(task, RT_F_RUNNING); |
| 250 | add_ready(edf, task); | 250 | add_ready(edf, task); |
| @@ -262,8 +262,6 @@ static void part_edf_task_blocks(struct task_struct *t) | |||
| 262 | * it is running, and when it is not running it is not in any | 262 | * it is running, and when it is not running it is not in any |
| 263 | * queue anyway. | 263 | * queue anyway. |
| 264 | * | 264 | * |
| 265 | * TODO: Check whether the assumption is correct for SIGKILL and | ||
| 266 | * SIGSTOP. | ||
| 267 | */ | 265 | */ |
| 268 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); | 266 | TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice); |
| 269 | BUG_ON(in_list(&t->rt_list)); | 267 | BUG_ON(in_list(&t->rt_list)); |
| @@ -313,7 +311,6 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
| 313 | .prepare_task = part_edf_prepare_task,\ | 311 | .prepare_task = part_edf_prepare_task,\ |
| 314 | .sleep_next_period = edf_sleep_next_period,\ | 312 | .sleep_next_period = edf_sleep_next_period,\ |
| 315 | .tear_down = part_edf_tear_down,\ | 313 | .tear_down = part_edf_tear_down,\ |
| 316 | .shutdown_hook = NULL,\ | ||
| 317 | .schedule = part_edf_schedule,\ | 314 | .schedule = part_edf_schedule,\ |
| 318 | .finish_switch = part_edf_finish_switch,\ | 315 | .finish_switch = part_edf_finish_switch,\ |
| 319 | .mode_change = part_edf_mode_change,\ | 316 | .mode_change = part_edf_mode_change,\ |
diff --git a/kernel/sched_pfair.c b/kernel/sched_pfair.c index 20a7526a0a..1a6a790fb7 100644 --- a/kernel/sched_pfair.c +++ b/kernel/sched_pfair.c | |||
| @@ -480,9 +480,7 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
| 480 | .ready_to_use = 1,\ | 480 | .ready_to_use = 1,\ |
| 481 | .scheduler_tick = pfair_scheduler_tick,\ | 481 | .scheduler_tick = pfair_scheduler_tick,\ |
| 482 | .prepare_task = pfair_prepare_task,\ | 482 | .prepare_task = pfair_prepare_task,\ |
| 483 | .sleep_next_period = 0,\ | ||
| 484 | .tear_down = pfair_tear_down,\ | 483 | .tear_down = pfair_tear_down,\ |
| 485 | .shutdown_hook = 0,\ | ||
| 486 | .schedule = pfair_schedule,\ | 484 | .schedule = pfair_schedule,\ |
| 487 | .finish_switch = pfair_finish_task_switch,\ | 485 | .finish_switch = pfair_finish_task_switch,\ |
| 488 | .mode_change = pfair_mode_change,\ | 486 | .mode_change = pfair_mode_change,\ |
diff --git a/kernel/sched_plugin.c b/kernel/sched_plugin.c index 3c7d6c0842..1f759b7c9f 100644 --- a/kernel/sched_plugin.c +++ b/kernel/sched_plugin.c | |||
| @@ -9,14 +9,6 @@ | |||
| 9 | #include <linux/sched_plugin.h> | 9 | #include <linux/sched_plugin.h> |
| 10 | 10 | ||
| 11 | 11 | ||
| 12 | /* These are the original Linux initialization functions. | ||
| 13 | * We replace them here with our initialization code and call them | ||
| 14 | * after setting up LITMUS. | ||
| 15 | */ | ||
| 16 | void linux_sched_init(void); | ||
| 17 | void linux_sched_init_smp(void); | ||
| 18 | int linux_migration_init(void); | ||
| 19 | |||
| 20 | /************************************************************* | 12 | /************************************************************* |
| 21 | * Dummy plugin functions * | 13 | * Dummy plugin functions * |
| 22 | *************************************************************/ | 14 | *************************************************************/ |
| @@ -99,7 +91,6 @@ sched_plugin_t linux_sched_plugin = { | |||
| 99 | .wake_up_task = litmus_dummy_wake_up_task, | 91 | .wake_up_task = litmus_dummy_wake_up_task, |
| 100 | .task_blocks = litmus_dummy_task_blocks, | 92 | .task_blocks = litmus_dummy_task_blocks, |
| 101 | .sleep_next_period = litmus_dummy_sleep_next_period, | 93 | .sleep_next_period = litmus_dummy_sleep_next_period, |
| 102 | .shutdown_hook = 0, | ||
| 103 | .schedule = litmus_dummy_schedule, | 94 | .schedule = litmus_dummy_schedule, |
| 104 | .finish_switch = litmus_dummy_finish_switch, | 95 | .finish_switch = litmus_dummy_finish_switch, |
| 105 | .scheduler_setup = litmus_dummy_scheduler_setup, | 96 | .scheduler_setup = litmus_dummy_scheduler_setup, |
| @@ -115,47 +106,3 @@ sched_plugin_t linux_sched_plugin = { | |||
| 115 | */ | 106 | */ |
| 116 | sched_plugin_t *curr_sched_plugin = &linux_sched_plugin; | 107 | sched_plugin_t *curr_sched_plugin = &linux_sched_plugin; |
| 117 | 108 | ||
| 118 | |||
| 119 | /* At sched-init */ | ||
| 120 | void __init sched_init(void) | ||
| 121 | { | ||
| 122 | printk("Entering custom sched init, plugin %s\n", | ||
| 123 | curr_sched_plugin->plugin_name); | ||
| 124 | /* Init tracing facility before plugin functions are called */ | ||
| 125 | |||
| 126 | /* CLEANUP: reenable this if needed | ||
| 127 | pstats = INIT_PSTATS; | ||
| 128 | |||
| 129 | */ | ||
| 130 | |||
| 131 | /* Call linux sched init tasks */ | ||
| 132 | linux_sched_init(); | ||
| 133 | printk("Sched init complete\n"); | ||
| 134 | } | ||
| 135 | |||
| 136 | void __init sched_init_smp(void) | ||
| 137 | { | ||
| 138 | printk("Entering custom SMP init, plugin %s\n", | ||
| 139 | curr_sched_plugin->plugin_name); | ||
| 140 | /* Call linux smp initializer */ | ||
| 141 | linux_sched_init_smp(); | ||
| 142 | /* Enable tracing facilities here */ | ||
| 143 | /* | ||
| 144 | CLEANUP: Reenable if needed. | ||
| 145 | if (smp_processor_id() == 0) { | ||
| 146 | if (init_trace()) { | ||
| 147 | printk("Tracing disabled\n"); | ||
| 148 | } else { | ||
| 149 | printk("Default tracing enabled\n"); | ||
| 150 | } | ||
| 151 | } */ | ||
| 152 | printk("Sched init SMP complete\n"); | ||
| 153 | } | ||
| 154 | |||
| 155 | int __init migration_init(void) | ||
| 156 | { | ||
| 157 | printk("Entering migration init\n"); | ||
| 158 | |||
| 159 | /* Call linux migration init as it was before */ | ||
| 160 | return linux_migration_init(); | ||
| 161 | } | ||
diff --git a/kernel/sched_psn_edf.c b/kernel/sched_psn_edf.c index 32f9b23829..9e4f4abd52 100644 --- a/kernel/sched_psn_edf.c +++ b/kernel/sched_psn_edf.c | |||
| @@ -301,39 +301,11 @@ static void psnedf_wake_up_task(struct task_struct *task) | |||
| 301 | TRACE("psnedf: %d unsuspends with budget=%d\n", | 301 | TRACE("psnedf: %d unsuspends with budget=%d\n", |
| 302 | task->pid, task->time_slice); | 302 | task->pid, task->time_slice); |
| 303 | 303 | ||
| 304 | |||
| 305 | /* After fixing the litmus_controlled bug, | 304 | /* After fixing the litmus_controlled bug, |
| 306 | * this should hold again. | 305 | * this should hold again. |
| 307 | */ | 306 | */ |
| 308 | BUG_ON(in_list(&task->rt_list)); | 307 | BUG_ON(in_list(&task->rt_list)); |
| 309 | 308 | ||
| 310 | /* FIXME: | ||
| 311 | * There exists a race between this function, suspensions due to IO, | ||
| 312 | * and switching in and out of real-time mode. For some reason, the | ||
| 313 | * BUG_ON triggered after a task system warm-up phase. | ||
| 314 | * | ||
| 315 | * BUG_ON(in_list(&task->rt_list)); | ||
| 316 | * | ||
| 317 | * Replaced by an if to gather more information. | ||
| 318 | */ | ||
| 319 | /* | ||
| 320 | if (unlikely(in_list(&task->rt_list))) { | ||
| 321 | TRACE(KERN_CRIT "wake_up_task: Why is %s/%d in rt list? " | ||
| 322 | "state=%ld next=%p prev=%p flags=0x%8lx mode=%d " | ||
| 323 | "partition=%d cpu=%d deadline=%ld now=%ld release=%ld" | ||
| 324 | "rtflags=%d timeslice=%d job=%u knp=%d", | ||
| 325 | task->comm, task->pid, | ||
| 326 | task->state, task->rt_list.next, task->rt_list.prev, | ||
| 327 | task->flags, get_rt_mode(), | ||
| 328 | get_partition(task), smp_processor_id(), | ||
| 329 | get_deadline(task), jiffies, get_release(task), | ||
| 330 | get_rt_flags(task), task->time_slice, | ||
| 331 | task->rt_param.times.job_no, task->rt_param.kernel_np); | ||
| 332 | task->state = TASK_RUNNING; | ||
| 333 | return; | ||
| 334 | } | ||
| 335 | */ | ||
| 336 | |||
| 337 | task->state = TASK_RUNNING; | 309 | task->state = TASK_RUNNING; |
| 338 | 310 | ||
| 339 | /* We need to take suspensions because of semaphores into | 311 | /* We need to take suspensions because of semaphores into |
| @@ -520,7 +492,6 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
| 520 | .prepare_task = psnedf_prepare_task,\ | 492 | .prepare_task = psnedf_prepare_task,\ |
| 521 | .sleep_next_period = edf_sleep_next_period,\ | 493 | .sleep_next_period = edf_sleep_next_period,\ |
| 522 | .tear_down = psnedf_tear_down,\ | 494 | .tear_down = psnedf_tear_down,\ |
| 523 | .shutdown_hook = NULL,\ | ||
| 524 | .schedule = psnedf_schedule,\ | 495 | .schedule = psnedf_schedule,\ |
| 525 | .mode_change = psnedf_mode_change,\ | 496 | .mode_change = psnedf_mode_change,\ |
| 526 | .wake_up_task = psnedf_wake_up_task,\ | 497 | .wake_up_task = psnedf_wake_up_task,\ |
