/*
* litmus/sched_edf_hsb.c
*
* Implentation of the EDF-HSB scheduling algorithm.
*
* The following 6 events are fired by timers and not handled by
* the plugin infrastructure itself:
*
* release_[hrt|srt|be]_jobs
* [hrt|be]_server_released
* server_completed (for HRT, SRT, and BE)
*
* The following 4 events are caused by a write to the proc entry
* and should never be run when the plugin is already running:
* stop_[hrt|be]_servers
* admit_[hrt|be]_server
*
* TODO BE SERVER TASK PREEMPTGION A;SDIFHSAKEUHFLKH
* TODO BE server heap needed?
* TODO move slack completion into release
* TODO fix concurrent arms
* TODO slack and BE servers, include slack higher prio
* TODO start servers should no longer be cessary
* TODO harmonize order of method arguments
* TODO test crazy task_new hack
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/hrtimer.h>
#include <litmus/litmus.h>
#include <litmus/bheap.h>
#include <litmus/jobs.h>
#include <litmus/litmus_proc.h>
#include <litmus/sched_plugin.h>
#include <litmus/edf_common.h>
#include <litmus/sched_trace.h>
#include <litmus/servers.h>
#define DEBUG_EDF_HSB
/* DOES NOT WORK */
//#define SLACK_ON_MASTER
#define BE_PROC_NAME "be_servers"
#define HRT_PROC_NAME "hrt_servers"
#define BE_SERVER_BASE 100
#define SLACK_MIN NSEC_PER_MSEC
atomic_t servers_running = ATOMIC_INIT(0); /* TODO should be unnecessary */
#define TIME(x) \
(x)
/* ({lt_t y = x; \ */
/* do_div(y, NSEC_PER_MSEC); \ */
/* y;}) */
#define TRACE_TIMER(fmt, args...) \
sched_trace_log_message("%d P%d*[%s@%s:%d]: " fmt " at %d\n", \
TRACE_ARGS, ## args, TIME(litmus_clock()))
#define TRACE_TASK_TIMER(t, fmt, args...) \
TRACE_TIMER("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \
(t)->rt_param.job_params.job_no, ## args)
/*
* Useful debugging macros. Remove for actual use as they cause
* a lot of lock contention.
*/
#ifdef DEBUG_EDF_HSB
#define TRACE_SUB(fmt, args...) \
sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \
TRACE_ARGS, ## args)
#define TRACE_TASK_SUB(t, fmt, args...) \
TRACE_SUB(TASK_FMT " " fmt, TASK_ARGS(t), ##args)
#define TRACE_SERVER_SUB(s, fmt, args...) \
TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args)
#define TRACE_TASK_SERVER_SUB(t, s, fmt, args...) \
TRACE_TASK_SUB(t, SERVER_FMT " " fmt, SERVER_ARGS(s), ##args)
#else
#define TRACE_SUB(fmt, args...)
#define TRACE_TASK_SUB(t, fmt, args...)
#define TRACE_SERVER_SUB(s, fmt, args...)
#define TRACE_TASK_SERVER_SUB(t, s, fmt, args...)
#endif
/*
* Different types of servers
*/
typedef enum {
S_HRT,
S_SRT,
S_BE,
S_SLACK
} server_type_t;
/*
* A server running HRT tasks
*/
typedef struct {
server_t server;
rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */
int ready; /* False if waiting for next release */
int no_slack;
struct hrtimer slack_timer; /* Server has no slack when:
* (deadline - budget) <= current_time.
*/
struct hrtimer_start_on_info slack_timer_info;
} hrt_server_t;
/*
* State of a single CPU
*/
typedef struct {
int cpu;
struct task_struct* scheduled; /* Task that should be running */
struct task_struct* linked; /* Task that actually is running */
server_t *linked_server; /* The server running on this cpu.
* Note that what it is 'running' is
* linked, not scheduled.
*/
hrt_server_t hrt_server; /* One HRT server per CPU */
struct bheap_node* hn; /* For the cpu_heap */
} cpu_entry_t;
/*
* Data assigned to each task
*/
typedef struct task_data {
server_t *srt_server; /* If the task is SRT, its server */
struct list_head candidate_list; /* List of slack canditates */
struct task_struct *owner;
} task_data_t;
/* CPU state */
DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries);
static struct bheap cpu_heap;
static struct bheap_node cpu_heap_node[NR_CPUS];
/* Task domains */
static rt_domain_t srt_domain;
static rt_domain_t be_domain;
/* Useful tools for server scheduling */
static server_domain_t server_domain;
/* BE server support */
static struct list_head be_servers;
static struct bheap be_ready_servers;
/* Slack support */
static struct list_head slack_queue;
static struct list_head slack_candidates;
/* CPU which will release tasks and global servers */
static int edf_hsb_release_master;
/* Cache to store task_data structs */
static struct kmem_cache *task_data_cache;
static struct proc_dir_entry *edf_hsb_proc_dir = NULL;
static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp;
#define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task)))
#define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on))
#define task_job_no(task) (tsk_rt(task)->job_params.job_no)
#define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data)
#define task_srt_server(task) ((server_t*)task_data(task)->srt_server)
#define server_slack(s) ((server_t*)s->data)
#define server_has_slack(s) ((server_t*)(s)->deadline != 0)
#define local_cpu_entry (&__get_cpu_var(cpu_entries))
#define global_lock (&srt_domain.ready_lock)
#define is_active_plugin (litmus == &edf_hsb_plugin)
/*
* This only works if items are deleted with list_del_init.
*/
static inline int head_in_list(struct list_head *head)
{
BUG_ON(!head);
return !(head->next == head->prev && head->prev == head);
}
/*
* Returns slack server running the task or NULL if N/A.
*/
static inline server_t* task_slack_server(struct task_struct *task)
{
server_t *slack_server = NULL;
if (task->rt_param.linked_on != NO_CPU) {
slack_server = task_linked_entry(task)->linked_server;
if (slack_server->type != S_SLACK)
slack_server = NULL;
}
return slack_server;
}
static task_data_t* task_data_alloc(int gfp_flags)
{
return kmem_cache_alloc(task_data_cache, gfp_flags);
}
static void task_data_free(task_data_t* data)
{
kmem_cache_free(task_data_cache, data);
}
/*
* Donating servers pre-allocate a server for slack to avoid runtime
* calls to kmalloc.
*/
static void server_slack_create(server_t *donator)
{
server_t *slack = server_alloc(GFP_ATOMIC);
server_init(slack, &server_domain, -donator->id, 0, 0, 1);
slack->type = S_SLACK;
slack->data = donator;
donator->data = slack;
}
static void server_slack_destroy(server_t *donator)
{
server_t *slack = (server_t*)donator->data;
donator->data = NULL;
server_destroy(slack);
server_free(slack);
}
static void remove_slack(server_t *slack)
{
TRACE_SERVER_SUB(slack, "slack removed");
//sched_trace_action(NULL, 7);
if (head_in_list(&slack->list))
list_del_init(&slack->list);
slack->deadline = 0;
slack->budget = 0;
slack->wcet = 0;
}
/*
* Slack queue is EDF.
*/
static void add_slack(server_t *slack)
{
struct list_head *pos;
server_t *queued;
TRACE_SERVER_SUB(slack, "slack added");
BUG_ON(head_in_list(&slack->list));
list_for_each_prev(pos, &slack_queue) {
queued = list_entry(pos, server_t, list);
if (lt_before_eq(queued->deadline, slack->deadline)) {
__list_add(&slack->list, pos, pos->next);
return;
}
}
list_add(&slack->list, &slack_queue);
}
static inline struct task_struct* get_candidate(struct list_head *pos)
{
struct task_struct *task = NULL;
task_data_t *data;
if (!list_empty(pos)) {
data = list_entry(pos, task_data_t, candidate_list);
task = data->owner;
}
return task;
}
/*
* Candidate queue is EDF.
*/
static void add_slack_candidate(struct task_struct *task)
{
struct list_head *pos;
struct task_struct *queued;
TRACE_TASK_SUB(task, "candidate added");
list_for_each_prev(pos, &slack_candidates) {
queued = get_candidate(pos);
if (lt_before_eq(get_deadline(queued), get_deadline(task))) {
__list_add(&task_data(task)->candidate_list,
pos, pos->next);
return;
}
}
list_add(&task_data(task)->candidate_list, &slack_candidates);
}
static void donate_slack(server_t *donator)
{
server_t *slack = (server_t*)donator->data;
hrt_server_t *hrt_server;
TRACE_SERVER_SUB(donator, "%llu slack donated", TIME(donator->budget));
if (donator->type == S_HRT) {
hrt_server = container_of(donator, hrt_server_t, server);
BUG_ON(!hrt_server->ready);
}
BUG_ON(head_in_list(&slack->list));
slack->wcet = donator->budget;
slack->budget = donator->budget;
slack->deadline = donator->deadline;
add_slack(slack);
}
/*
* Donate any available slack from a server.
*/
static void check_donate_slack(server_t *donator, struct task_struct *was_scheduled)
{
hrt_server_t *hrt_server;
int donate = 0;
TRACE_SERVER_SUB(donator, "checking donation");
/* Donating small amounts of slack will result in excess migrations */
if (donator->budget < SLACK_MIN)
return;
if (donator->type == S_HRT)
hrt_server = container_of(donator, hrt_server_t, server);
/* Donate if the server is waiting for a task release */
if ((donator->type == S_SRT &&
donator->job_no <= task_job_no(was_scheduled)) ||
(donator->type == S_HRT && hrt_server->no_slack &&
!__jobs_pending(&hrt_server->hrt_domain)) ||
(donator->type == S_BE &&
!__jobs_pending(&be_domain))) {
donate = 1;
}
if (!donate)
return;
sched_trace_action(was_scheduled, 9);
donate_slack(donator);
}
/*
* Adds the task to the candidate queue if it is eligible for slack stealing.
*/
static void check_slack_candidate(struct task_struct *task)
{
TRACE_TASK_SUB(task, "checking for candidate");
if (is_srt(task) &&
/* The SRT task is not ahead of its server */
task_srt_server(task)->job_no >= task_job_no(task) &&
/* The task has yet to be released */
lt_after(get_release(task), litmus_clock()) &&
/* The task didn't just complete */
get_rt_flags(task) != RT_F_SLEEP &&
/* The task hasn't already been added to the list */
!head_in_list(&task_data(task)->candidate_list)) {
add_slack_candidate(task);
sched_trace_action(task, 8);
}
}
/*
* Order BE tasks FIFO.
*/
static inline int be_higher_prio(struct task_struct *first, struct task_struct *second)
{
return lt_before(get_release(first), get_release(second)) ||
/* Break by PID */
(get_release(first) == get_release(second) &&
(first->pid < second->pid));
}
static int be_ready_order(struct bheap_node *a, struct bheap_node *b)
{
struct task_struct *first, *second;
first = bheap2task(a);
second = bheap2task(b);
if (!first || !second)
return first && !second;
return be_higher_prio(first, second);
}
/*
* Order servers by EDF.
*/
static inline int server_higher_prio(server_t *first, server_t *second)
{
return lt_before(first->deadline, second->deadline) ||
/* Break by id */
(first->deadline == second->deadline &&
first->id < second->id);
}
static int server_order(struct bheap_node *a, struct bheap_node *b)
{
server_t *first, *second;
first = a->value;
second = b->value;
return server_higher_prio(first, second);
}
/*
* Order CPU's by deadlines of their servers.
*/
static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
{
cpu_entry_t *first, *second;
first = a->value;
second = b->value;
if (first->linked && second->linked) {
return !server_higher_prio(first->linked_server,
second->linked_server);
}
return second->linked && !first->linked;
}
/*
* Move the CPU entry to the correct position in the queue.
*/
static inline void update_cpu_position(cpu_entry_t *entry)
{
if (likely(bheap_node_in_heap(entry->hn)))
bheap_delete(server_order, &cpu_heap, entry->hn);
/* Don't leave HRT CPUs in the heap as its order only matters
* for global preempts.
*/
if (!entry->linked || !is_hrt(entry->linked))
bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn);
}
static inline cpu_entry_t* lowest_prio_cpu(void)
{
struct bheap_node *hn = bheap_peek(cpu_lower_prio, &cpu_heap);
return (hn) ? hn->value : NULL;
}
static inline int check_hrt_server_initialized(hrt_server_t *hrt_server)
{
return hrt_server->server.wcet && hrt_server->server.period;
}
/*
* Arms the slack timer for the server, if necessary.
*/
static void slack_timer_arm(hrt_server_t *hrt_server)
{
int cpu;
cpu_entry_t *entry;
struct hrtimer *timer;
lt_t when_to_fire;
if (!check_hrt_server_initialized(hrt_server)) {
TRACE_SERVER_SUB(&hrt_server->server, "not initialized");
return;
}
timer = &hrt_server->slack_timer;
entry = container_of(hrt_server, cpu_entry_t, hrt_server);
#ifdef SLACK_ON_MASTER
if (edf_hsb_release_master != NO_CPU)
cpu = edf_hsb_release_master;
else
#endif
cpu = entry->cpu;
when_to_fire = hrt_server->server.deadline - hrt_server->server.budget;
/* Ensure the timer is needed */
if (hrtimer_active(timer) || hrt_server->server.deadline == 0 ||
hrt_server->no_slack || hrt_server->server.budget == 0 ||
!hrt_server->ready) {
TRACE_SERVER_SUB(&hrt_server->server,
"not arming slack timer on P%d", entry->cpu);
return;
}
if (when_to_fire >= hrt_server->server.deadline) {
TRACE_SUB("wtf: %llu, dead: %llu, bud: %llu",
when_to_fire, hrt_server->server.deadline,
hrt_server->server.budget);
BUG_ON(1);
}
/* Arm timer */
if (lt_after_eq(litmus_clock(), when_to_fire)) {
/* 'Fire' immediately */
TRACE("immediate: %llu", when_to_fire);
hrt_server->no_slack = 1;
} else if (cpu != smp_processor_id()) {
atomic_set(&hrt_server->slack_timer_info.state,
HRTIMER_START_ON_INACTIVE);
hrtimer_start_on(cpu,
&hrt_server->slack_timer_info,
&hrt_server->slack_timer,
ns_to_ktime(when_to_fire),
HRTIMER_MODE_ABS_PINNED);
} else {
__hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire),
0, HRTIMER_MODE_ABS_PINNED, 0);
}
TRACE_SUB("slack timer armed to fire at %llu on P%d",
TIME(when_to_fire), entry->cpu);
}
/*
* Does nothing if the slack timer is not armed.
*/
static inline void slack_timer_cancel(hrt_server_t *hrt_server)
{
int ret;
if (hrtimer_active(&hrt_server->slack_timer)) {
ret = hrtimer_try_to_cancel(&hrt_server->slack_timer);
if (ret == -1) {
TRACE_SERVER_SUB(&hrt_server->server,
"slack timer was running concurrently");
} else {
TRACE_SERVER_SUB(&hrt_server->server,
"slack timer cancelled");
}
} else {
TRACE_SERVER_SUB(&hrt_server->server, "slack not active");
}
}
/*
* Handles subtraction of lt_t without underflows.
*/
static inline lt_t lt_subtract(lt_t a, lt_t b)
{
long long sub = (long long)a - (long long)b;
if (sub >= 0)
return sub;
else
return 0;
}
static void requeue_server(server_t *server)
{
int added = 0;
hrt_server_t *hrt_server;
lt_t now = litmus_clock();
BUG_ON(server->type == S_SRT ||
server->type == S_SLACK);
if (lt_before(now, server->release)) {
added = add_server_release(server, &server_domain);
}
if (!added) {
/* Mark servers as released */
if (server->type == S_HRT) {
TRACE_SERVER_SUB(server, "P%d now ready");
hrt_server = container_of(server, hrt_server_t, server);
hrt_server->ready = 1;
} else if (server->type == S_BE) {
TRACE_SERVER_SUB(server, "BE added to ready");
bheap_insert(server_order, &be_ready_servers, server->hn);
}
} else {
BUG_ON(bheap_node_in_heap(server->hn));
}
}
/*
* Absorbs a task's execution time into its donator.
*/
static void reclaim_slack(server_t *slack)
{
lt_t exec;
hrt_server_t *hrt_server;
server_t *donator = server_slack(slack);
/* SRT servers do not ever reclaim slack */
if (donator->type == S_SRT)
return;
else if (donator->type == S_HRT)
sched_trace_action(NULL, 5);
exec = slack->wcet - slack->budget;
TRACE_SERVER_SUB(donator, "reclaiming %llu slack", TIME(exec));
BUG_ON(is_server_linked(donator));
BUG_ON(!slack->wcet);
BUG_ON(!donator->budget);
donator->budget -= exec;
slack->wcet = slack->budget;
/* If budget exhausted, server needs to wait for next release */
if (!donator->budget) {
TRACE_SERVER_SUB(donator, "exhausted by slack");
if (donator->type == S_HRT) {
hrt_server = container_of(donator,
hrt_server_t,
server);
BUG_ON(!hrt_server->ready);
TRACE_SERVER_SUB(donator, "no longer ready");
hrt_server->ready = 0;
slack_timer_cancel(hrt_server);
} else if (donator->type == S_BE) {
TRACE_SERVER_SUB(donator, "BE removed from ready");
bheap_delete(server_order, &be_ready_servers,
donator->hn);
}
/* Prepare servers for their next period. SRT servers are
* handled with their SRT tasks and don't need assistance.
*/
if (donator->type != S_SRT) {
server_release(donator);
requeue_server(donator);
}
}
}
/*
* Begins server execution and arms any timers necessary.
*/
static noinline void link_server(cpu_entry_t *entry,
server_t *next_server)
{
if (entry->linked) {
/* Massive state check */
if (next_server->type == S_SRT) {
/* SRT task cannot get ahead of its server */
BUG_ON(next_server->job_no <task_job_no(entry->linked));
BUG_ON(lt_after(get_deadline(entry->linked),
next_server->deadline));
} else if (next_server->type == S_HRT) {
/* HRT servers should never, ever migrate */
BUG_ON(entry->cpu != task_cpu(entry->linked));
BUG_ON(!entry->hrt_server.ready);
} else if (next_server->type == S_SLACK) {
/* Should have already been removed from slack list */
BUG_ON(head_in_list(&task_data(entry->linked)->candidate_list));
} else { /* BE */
/* Should have already been removed from ready heap */
BUG_ON(bheap_node_in_heap(next_server->hn));
sched_trace_action(entry->linked, next_server->id);
}
if (next_server->type != S_SLACK) {
BUG_ON(head_in_list(&server_slack(next_server)->list));
}
entry->linked_server = next_server;
server_run(entry->linked_server, entry->linked);
}
/* Timer necessary whenever an HRT is not running */
if (!entry->linked || !is_hrt(entry->linked))
slack_timer_arm(&entry->hrt_server);
else
slack_timer_cancel(&entry->hrt_server);
}
/*
* Stops server execution and timers. This will also re-add servers
* to any collections they should be members of.
*/
static noinline void unlink_server(cpu_entry_t *entry, int requeue)
{
server_t *server = entry->linked_server;
hrt_server_t *hrt_server = &entry->hrt_server;
BUG_ON(!entry->linked_server);
server_stop(entry->linked_server);
server = entry->linked_server;
entry->linked_server = NULL;
if (!requeue)
return;
if (server->type == S_SLACK && server->deadline) {
add_slack(server);
//sched_trace_action(entry->linked, 6);
/* Donator needs to absorb slack execution time */
reclaim_slack(server);
} else if (server->type != S_SRT &&
(server->type == S_BE || !hrt_server->ready)) {
requeue_server(server);
}
if (server->type == S_HRT && hrt_server->ready)
BUG_ON(head_in_list(&server_slack(server)->list));
}
/* Update the link of a CPU.
* Handles the case where the to-be-linked task is already
* scheduled on a different CPU. The last argument is only needed
* for BE tasks as their servers can't be determined here.
*/
static noinline void link_task_to_cpu(cpu_entry_t *entry,
struct task_struct* linked,
server_t* next_server,
int swap_on_sched)
{
cpu_entry_t *sched;
server_t *tmp_server;
struct task_struct *tmp_task;
int on_cpu;
BUG_ON(linked && !is_realtime(linked));
BUG_ON(linked && is_hrt(linked) && entry->cpu != task_cpu(linked));
BUG_ON(entry->cpu == edf_hsb_release_master);
if (linked)
TRACE_TASK_SERVER_SUB(linked, next_server, "linking to P%d",
entry->cpu);
/* Currently linked task is set to be unlinked. */
if (entry->linked) {
unlink_server(entry, 1);
entry->linked->rt_param.linked_on = NO_CPU;
entry->linked = NULL;
}
/* Link new task to CPU. */
if (linked) {
set_rt_flags(linked, RT_F_RUNNING);
/* Handle task is already scheduled somewhere! */
on_cpu = linked->rt_param.scheduled_on;
if (on_cpu != NO_CPU) {
sched = &per_cpu(cpu_entries, on_cpu);
/* This should only happen if not linked already */
BUG_ON(sched->linked == linked);
/* Swap link with entry on which linked is scheduled */
if (entry != sched && !is_hrt(linked) &&
swap_on_sched &&
(!sched->linked || !is_hrt(sched->linked))) {
TRACE_TASK_SUB(linked, "already scheduled on P%d",
sched->cpu);
tmp_task = sched->linked;
tmp_server = sched->linked_server;
if (tmp_task)
unlink_server(sched, 0);
linked->rt_param.linked_on = sched->cpu;
sched->linked = linked;
link_server(sched, next_server);
update_cpu_position(sched);
linked = tmp_task;
next_server = tmp_server;
}
}
if (linked) /* Might be NULL due to swap */
linked->rt_param.linked_on = entry->cpu;
}
entry->linked = linked;
link_server(entry, next_server);
update_cpu_position(entry);
BUG_ON(!entry->linked && entry->linked_server);
if (linked)
TRACE_TASK_SERVER_SUB(linked, next_server,
"linked to %d", entry->cpu);
else
TRACE_SUB("NULL linked to %d", entry->cpu);
}
/*
* Grab the local HRT or global SRT or BE domain for the task.
*/
static inline rt_domain_t* get_rt_domain(cpu_entry_t *entry,
struct task_struct *task)
{
if (is_hrt(task))
return &entry->hrt_server.hrt_domain;
else if (is_srt(task))
return &srt_domain;
else /* BE */
return &be_domain;
}
/*
* Ensures the task is not linked anywhere nor present in any ready queues.
*/
static noinline void unlink(struct task_struct* t)
{
cpu_entry_t *entry;
BUG_ON(!t);
if (t->rt_param.linked_on != NO_CPU) {
/* Unlink */
entry = task_linked_entry(t);
link_task_to_cpu(entry, NULL, NULL, 0);
} else if (is_queued(t)) {
if (is_released(t, litmus_clock())) {
/* This is an interesting situation: t is scheduled,
* but has already been unlinked. It was re-added to
* a ready queue of some sort but now needs to
* be removed. This usually happens when a job has
* been preempted but completes before it is
* descheduled.
*/
TRACE_TASK_SUB(t, "removing from domain");
entry = task_sched_entry(t);
remove(get_rt_domain(entry, t), t);
BUG_ON(is_queued(t));
}/* else { */
/* } */
}
}
/*
* A job generated by a HRT task is eligible if either the job's deadline
* is earlier than the server's next deadline, or the server has zero slack
* time in its current period.
*/
static inline int is_eligible(struct task_struct *task,
hrt_server_t *hrt_server)
{
TRACE_TASK_SUB(task, "%d %d %llu %llu",
hrt_server->ready, hrt_server->no_slack,
hrt_server->server.deadline,
get_deadline(task));
return hrt_server->ready &&
(hrt_server->no_slack ||
lt_after_eq(hrt_server->server.deadline, get_deadline(task)));
}
/*
* Set the server to release at the closest preceding deadline to time.
*/
static inline void catchup_server(server_t *server, lt_t time)
{
lt_t diff = time - server->deadline;
lt_t sub = diff % server->period;
server_release_at(server, time - sub);
TRACE_SERVER_SUB(server, "catching up to %llu", time);
}
/*
* Returns the next eligible slack server. This will remove any expired
* slack servers still present in the list.
*/
static server_t* next_eligible_slack_server(void)
{
server_t *next_slack = NULL;
while (!list_empty(&slack_queue)) {
next_slack = list_entry(slack_queue.next, server_t, list);
BUG_ON(!next_slack);
if (lt_after(next_slack->deadline, litmus_clock()) &&
lt_after(next_slack->budget, SLACK_MIN)) {
break;
} else {
/* Slack has expired or has too little time */
remove_slack(next_slack);
next_slack = NULL;
}
}
return next_slack;
}
/*
* Returns the next SRT task that is tardy or will be tardy. If none
* are available, will return a tardy BE task if present.
*/
static struct task_struct* next_eligible_slack(void)
{
lt_t now = litmus_clock();
struct task_struct *next = get_candidate(slack_candidates.next);
while (next && lt_before_eq(get_release(next), now)) {
/* The task's server has re-released. It's the server's
* responsibility now.
*/
list_del_init(&task_data(next)->candidate_list);
next = get_candidate(slack_candidates.next);
}
if (!next) {
/* We couldn't find an SRT to schedule. Find a BE which is
* either tardy or cannot run due to a lack of servers.
*/
next = __peek_ready(&be_domain);
if (next && !is_tardy(next, now))
next = NULL;
}
return next;
}
/*
* If the server is eligible, return the next eligible job. If the server is
* ineligible or there are no eligible jobs, returns NULL. This will re-release
* any servers that are behind.
*/
static struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server)
{
lt_t now = litmus_clock();
struct task_struct *task = __peek_ready(&hrt_server->hrt_domain);
/* Catch up server if it is initialized, not running, and late */
if (task &&
check_hrt_server_initialized(hrt_server) &&
!is_server_linked(&hrt_server->server) &&
lt_before_eq(hrt_server->server.deadline, now)) {
catchup_server(&hrt_server->server, now);
slack_timer_arm(hrt_server);
TRACE_SERVER_SUB(&hrt_server->server, "now ready");
hrt_server->ready = 1;
hrt_server->no_slack = 0;
sched_trace_action(NULL, 2); /* Release */
} else {
TRACE_SERVER_SUB(&hrt_server->server, "%llu %d %llu",
hrt_server->server.deadline,
is_server_linked(&hrt_server->server),
now);
}
if (!hrt_server->server.budget ||
(task && !is_eligible(task, hrt_server))) {
if (task)
TRACE_TASK_SUB(task, "not eligible");
task = NULL;
}
return task;
}
/*
* This will catch up the SRT's server if it is behind.
*/
static struct task_struct* next_eligible_srt(void)
{
struct task_struct *next_srt = __peek_ready(&srt_domain);
server_t *srt_server;
/* A blocking task might pollute the SRT domain if the
* task blocked while it was being run by a slack server.
* Remove and ignore this task.
*/
while (next_srt && !is_running(next_srt)) {
remove(&srt_domain, next_srt);
next_srt = __peek_ready(&srt_domain);
}
/* Catch up srt server. This happens when the job is tardy due
* to overutilization of the system.
*/
if (next_srt) {
srt_server = task_srt_server(next_srt);
if (lt_before(srt_server->deadline, get_deadline(next_srt))) {
server_release_at(srt_server, get_release(next_srt));
srt_server->job_no = task_job_no(next_srt);
}
}
return next_srt;
}
static inline server_t* next_be_server(void)
{
struct bheap_node *hn = bheap_peek(server_order, &be_ready_servers);
return (hn) ? hn->value : NULL;
}
static server_t* next_eligible_be_server(void)
{
server_t *be_server = next_be_server();
lt_t now = litmus_clock();
/* Catch up any late be servers. This happens when the servers could
* not find tasks to schedule or if the system is overutilized.
*/
while (be_server && lt_before_eq(be_server->deadline, now)) {
bheap_delete(server_order, &be_ready_servers,
be_server->hn);
catchup_server(be_server, now);
bheap_insert(server_order, &be_ready_servers,
be_server->hn);
be_server = next_be_server();
sched_trace_action(NULL, 2); /* Release */
}
if (be_server && lt_before(now, be_server->release)) {
TRACE_SERVER_SUB(be_server, "SHOULD BUG");
be_server = NULL;
}
return be_server;
}
/*
* Adds a task to the appropriate queue (ready / release) in a domain.
*/
static noinline void requeue(struct task_struct *task, rt_domain_t *domain)
{
int was_added;
BUG_ON(!is_realtime(task));
BUG_ON(head_in_list(&task_data(task)->candidate_list));
check_slack_candidate(task);
if (is_queued(task)) {
TRACE_TASK_SUB(task, "not requeueing, already queued");
} else if (is_released(task, litmus_clock())) {
TRACE_TASK_SUB(task, "requeuing on ready");
__add_ready(domain, task);
} else {
/* Task needs to wait until it is released */
TRACE_TASK_SUB(task, "requeuing on release");
was_added = add_release(domain, task);
/* The release time happened before we added ourselves
* to the heap. We can now add to ready.
*/
if (!was_added) {
TRACE_TASK_SUB(task, "missed release, going to ready");
__add_ready(domain, task);
}
}
}
static inline void earlier_server_task(server_t *first,
struct task_struct *first_task,
server_t *second,
struct task_struct *second_task,
server_t **server,
struct task_struct **task)
{
if (!first ||
(second && lt_before_eq(second->deadline, first->deadline))) {
*server = second;
*task = second_task;
} else {
*server = first;
*task = first_task;
}
}
/*
* Set server and task to the next server and task respectively.
* If entry is not null, the next server will see if it can schedule
* entry's linked task.
*/
static void next_global_task(cpu_entry_t *entry,
server_t **next_server,
struct task_struct **next_task)
{
struct task_struct *next_srt, *next_be, *next_slack;
server_t *be_server, *slack_server, *srt_server;
*next_server = NULL;
*next_task = NULL;
next_srt = next_eligible_srt();
srt_server = (next_srt) ? task_srt_server(next_srt) : NULL;
next_be = __peek_ready(&be_domain);
be_server = next_eligible_be_server();
next_slack = next_eligible_slack();
slack_server = next_eligible_slack_server();
TRACE_SUB("be_server: %d, next_be: %d, next_srt: %d, slack_server: %d "
"next_slack: %d", (be_server) ? be_server->id : -1,
(next_be) ? next_be->pid : -1,
(next_srt) ? next_srt->pid : -1,
(slack_server) ? slack_server->id : -1,
(next_slack) ? next_slack->pid : -1);
/* Check if the servers can schedule the task linked to entry */
if (entry && entry->linked) {
if (entry->linked_server->type == S_BE &&
(!next_be ||
lt_before(get_release(entry->linked),
get_release(next_be)))) {
next_be = entry->linked;
} else if (entry->linked_server->type == S_SLACK &&
(!next_slack ||
lt_before(get_deadline(entry->linked),
get_deadline(next_slack)))) {
next_slack = entry->linked;
}
}
/* Remove tasks without servers and vice versa from contention */
if (!next_be || !be_server) {
next_be = NULL;
be_server = NULL;
}
if (!next_slack || !slack_server) {
next_slack = NULL;
slack_server = NULL;
}
/* Favor BE servers. If we don't, then a BE server might lose
* out to its own slack.
*/
if (slack_server && be_server &&
be_server->deadline == slack_server->deadline) {
next_slack = NULL;
slack_server = NULL;
}
/* There is probably a better way to do this */
earlier_server_task(srt_server, next_srt,
be_server, next_be,
next_server, next_task);
earlier_server_task(*next_server, *next_task,
slack_server, next_slack,
next_server, next_task);
//BUG_ON(*next_server && lt_before(litmus_clock(), *next_server->release));
}
/*
* Remove the task and server from any ready queues.
*/
static void remove_from_ready(server_t *server, struct task_struct *task,
cpu_entry_t *entry)
{
server_t *slack;
if (server->type == S_SLACK) {
TRACE_SERVER_SUB(server, "removed from slack list");
list_del_init(&server->list);
/* Remove from consideration of BE servers */
if (is_be(task) && is_queued(task)) {
TRACE_TASK_SUB(task, "BE removed from ready");
remove(&be_domain, task);
}
} else {
slack = server_slack(server);
if (head_in_list(&slack->list)) {
remove_slack(slack);
}
if (server->type == S_BE) {
TRACE_SERVER_SUB(server, "server removed from ready");
bheap_delete(server_order, &be_ready_servers,
server->hn);
}
if (is_queued(task)) {
TRACE_TASK_SUB(task, "removed from ready");
remove(get_rt_domain(entry, task), task);
}
}
/* Remove from consideration of slack servers */
if (head_in_list(&task_data(task)->candidate_list)) {
TRACE_TASK_SUB(task, "deleting candidate");
list_del_init(&task_data(task)->candidate_list);
}
}
static void check_for_slack_preempt(struct task_struct*,server_t*,cpu_entry_t*, int);
/*
* Finds and links the next server and task to an entry with no linked task.
*/
static void edf_hsb_pick_next(cpu_entry_t *entry)
{
struct task_struct *next_task;
server_t *next_server;
BUG_ON(entry->linked);
next_task = next_eligible_hrt(&entry->hrt_server);
if (next_task)
next_server = &entry->hrt_server.server;
else
next_global_task(NULL, &next_server, &next_task);
if (next_task) {
remove_from_ready(next_server, next_task, entry);
check_for_slack_preempt(next_task, next_server, entry, 1);
TRACE_TASK_SERVER_SUB(next_task, next_server,
"removing and picked");
/* Force the link to go to the CPU we ask for since we are
* trying to reschedule for this CPU only.
*/
link_task_to_cpu(entry, next_task, next_server,
0 /* Always link to what we want */);
}
}
/*
* Preempt the currently running server and task with new ones.
* It is possible that either only the server or the task is different here.
*/
static void preempt(cpu_entry_t *entry, struct task_struct *next,
server_t *next_server, int slack_resched)
{
struct task_struct *linked = entry->linked;
rt_domain_t *domain;
TRACE_TASK_SERVER_SUB(next, next_server,
"preempting on P%d", entry->cpu);
remove_from_ready(next_server, next, entry);
check_for_slack_preempt(next, next_server, entry, slack_resched);
link_task_to_cpu(entry, next, next_server, 1);
/* No need for this if only the server was preempted */
if (!linked || linked != entry->linked) {
if (linked) {
domain = get_rt_domain(entry, linked);
requeue(linked, domain);
}
preempt_if_preemptable(entry->scheduled, entry->cpu);
}
}
/*
* Causes a preemption if:
* 1. task is being run by a slack server on a different CPU
* 2. slack donated by server is running a task on a different CPU
*/
static void check_for_slack_preempt(struct task_struct *task,
server_t *server,
cpu_entry_t *next_entry,
int resched)
{
cpu_entry_t *entry = NULL;
server_t *slack = server_slack(server);
struct task_struct *slack_task;
/* The task is currently being run by another slack server */
if (tsk_rt(task)->linked_on != NO_CPU) {
entry = task_linked_entry(task);
if (entry != next_entry) {
TRACE_TASK_SUB(task, "was on P%d", entry->cpu);
BUG_ON(entry->linked_server->type != S_SLACK);
unlink(task);
edf_hsb_pick_next(entry);
preempt_if_preemptable(entry->scheduled, entry->cpu);
}
}
/* The server's slack is currently being run */
if (is_server_linked(slack)) {
entry = &per_cpu(cpu_entries, slack->cpu);
slack_task = server_task(slack);
unlink(slack_task);
remove_slack(slack);
if (entry != next_entry && resched) {
TRACE_SERVER_SUB(slack, "was on P%d", entry->cpu);
/* Force a reschedule */
requeue(slack_task, get_rt_domain(entry, slack_task));
edf_hsb_pick_next(entry);
preempt_if_preemptable(entry->scheduled, entry->cpu);
} else {
/* This can only happen on a preemption. If a preemption
* happens, the task will be requeued elsewhere.
* Obviously the next task has already been chosen.
*/
TRACE_SERVER_SUB(slack, "was on local P%d", entry->cpu);
}
}
}
/*
* Check for any necessary non-hrt preemptions.
*/
static void check_for_global_preempt(void)
{
cpu_entry_t *entry, *pref;
server_t *next_server;
struct task_struct *next_task = (struct task_struct*)1; /* Not NULL */
for (entry = lowest_prio_cpu(); entry; entry = lowest_prio_cpu()) {
/* HRT cpus should not be in this heap */
BUG_ON(entry->linked && is_hrt(entry->linked));
next_global_task(entry, &next_server, &next_task);
if (!next_server)
break;
/* Prevent migrations when possible */
if (!entry->linked) {
pref = &per_cpu(cpu_entries, task_cpu(next_task));
#ifdef CONFIG_RELEASE_MASTER
if (!pref->linked && pref->cpu!=edf_hsb_release_master)
#else
if (!pref->linked)
#endif
entry = pref;
}
/* Preempt only if we have an earlier deadline */
if (entry->linked &&
!lt_before(next_server->deadline,
entry->linked_server->deadline)) {
break;
}
/* We do not reschedule if this causes a slack preemption
* because we will detect if we should reschedule on the
* next iteration of the loop.
*/
preempt(entry, next_task, next_server,
0 /* Don't reschedule on a slack preemption */);
}
}
/*
* Correct local link after a change to the local HRT domain.
*/
static void check_for_hrt_preempt(cpu_entry_t *entry)
{
hrt_server_t *hrt_server = &entry->hrt_server;
struct task_struct *next_hrt = next_eligible_hrt(hrt_server);
struct task_struct *curr = entry->linked;
if (next_hrt &&
(!entry->linked || !is_hrt(entry->linked) ||
!is_eligible(entry->linked, hrt_server) ||
edf_preemption_needed(&hrt_server->hrt_domain, entry->linked))) {
preempt(entry, next_hrt, &hrt_server->server, 1);
/* We might have just kicked off something. Check to see if it
* preempts anything.
*/
if (curr && !is_hrt(curr))
check_for_global_preempt();
} else {
TRACE_SERVER_SUB(&hrt_server->server, "not HRT preempting");
}
}
/*
* Assumes called with local irqs disabled.
*/
static void job_arrival(struct task_struct *task, cpu_entry_t *entry)
{
int was_empty;
BUG_ON(task_cpu(task) == NO_CPU);
TRACE_TASK_SUB(task, "arriving on P%d", entry->cpu);
if (is_hrt(task)) {
requeue(task, &entry->hrt_server.hrt_domain);
check_for_hrt_preempt(entry);
} else if (is_srt(task)) {
requeue(task, &srt_domain);
check_for_global_preempt();
} else /* BE */ {
was_empty = !__jobs_pending(&be_domain);
requeue(task, &be_domain);
/* Only way this could cause a preemption is if an eligible
* BE server could not queue up a task.
*/
if (was_empty && __jobs_pending(&be_domain))
check_for_global_preempt();
}
}
/******************************************************************************
* Timer methods
******************************************************************************/
/*
* Merges a group of released HRT tasks into a ready queue and checks
* for preeemptions.
*/
static void release_hrt_jobs(rt_domain_t *domain, struct bheap *tasks)
{
unsigned long flags;
struct task_struct *first;
cpu_entry_t *entry;
raw_spin_lock_irqsave(global_lock, flags);
first = (struct task_struct*)bheap_peek(edf_ready_order, tasks)->value;
entry = task_sched_entry(first);
BUG_ON(!first || !is_hrt(first));
TRACE_TASK(first, "HRT tasks released at %llu on P%d\n",
TIME(litmus_clock()), task_cpu(first));
__merge_ready(domain, tasks);
check_for_hrt_preempt(entry);
raw_spin_unlock_irqrestore(global_lock, flags);
}
/*
* Merges a group of released tasks into a ready queue and checks to see
* if scheduled needs to be called.
*/
static void release_srt_jobs(rt_domain_t *domain, struct bheap *tasks)
{
unsigned long flags;
struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value);
raw_spin_lock_irqsave(global_lock, flags);
TRACE_TASK(first, "SRT tasks released at %llu\n", TIME(litmus_clock()));
__merge_ready(domain, tasks);
check_for_global_preempt();
raw_spin_unlock_irqrestore(global_lock, flags);
}
/*
* Merges a group of released tasks into a ready queue and checks to see
* if scheduled needs to be called.
*/
static void release_be_jobs(rt_domain_t *domain, struct bheap *tasks)
{
unsigned long flags;
int was_empty;
struct task_struct *first = (bheap_peek(edf_ready_order, tasks)->value);
TRACE_TASK(first, "BE tasks released at %llu\n", TIME(litmus_clock()));;
raw_spin_lock_irqsave(global_lock, flags);
was_empty = !__jobs_pending(domain);
__merge_ready(domain, tasks);
if (was_empty) {
/* Only way this could cause a preemption is if an BE server
* could not find a task to run.
*/
check_for_global_preempt();
}
raw_spin_unlock_irqrestore(global_lock, flags);
}
static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer)
{
unsigned long flags;
hrt_server_t *server = container_of(timer, hrt_server_t, slack_timer);
cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server);
raw_spin_lock_irqsave(global_lock, flags);
TRACE_TIMER("slack timer fired for P%d", entry->cpu);
BUG_ON(!server->ready);
sched_trace_action(entry->linked, 3);
/* Set new state of entry */
server->no_slack = 1;
check_for_hrt_preempt(entry);
/* Donate slack if there is nothing to do */
if (!entry->linked || !is_hrt(entry->linked)) {
donate_slack(&server->server);
check_for_global_preempt();
}
raw_spin_unlock_irqrestore(global_lock, flags);
return HRTIMER_NORESTART;
}
static void job_completion(cpu_entry_t *entry, struct task_struct* task)
{
server_t *server = entry->linked_server;
set_rt_flags(task, RT_F_SLEEP);
TRACE_TASK_SUB(task, "completed");
unlink(task);
check_donate_slack(server, task);
/* If a slack server completed an SRT, we don't want to do anything */
if (server->type == S_SLACK && is_srt(task)) {
tsk_rt(task)->job_params.job_no++;
sched_trace_task_release(task);
TRACE_TASK_SERVER_SUB(task, server, "catching up SRT, "
"rel: %llu, dead: %llu",
TIME(get_release(task)),
TIME(get_deadline(task)));
check_slack_candidate(task);
sched_trace_task_completion(task, 1);
return;
}
BUG_ON(is_queued(task));
if (server->type == S_SRT) {
/* If the task is behind the server it must release immediately,
* leaving its release time and deadline unchanged.
*/
if (server->job_no > tsk_rt(task)->job_params.job_no) {
TRACE_TASK_SUB(task, "catching up");
tsk_rt(task)->job_params.job_no++;
} else {
/* Otherwise release them both */
prepare_for_next_period(task);
TRACE_TASK_SUB(task, "next release: %llu, dead: %llu",
TIME(get_release(task)),
TIME(get_deadline(task)));
server_release(server);
}
} else {
prepare_for_next_period(task);
TRACE_TASK_SUB(task, "next release: %llu, dead: %llu",
TIME(get_release(task)),
TIME(get_deadline(task)));
}
if (is_released(task, litmus_clock()))
sched_trace_task_release(task);
/* Don't requeue a blocking task */
if (is_running(task))
job_arrival(task, entry);
sched_trace_task_completion(task, 1);
}
/*
* Assumes called with local irqs disabled.
*/
static void server_completed(server_t *server, struct task_struct *task)
{
lt_t now = litmus_clock();
hrt_server_t *hrt_server;
cpu_entry_t *entry = task_linked_entry(task);
BUG_ON(entry->linked != task);
if (server->type == S_SRT) {
TRACE_TASK_SUB(task, "must wait on server");
/* The job must now take the priority and release time
* of the next server. We do this so that we can still
* use rt_domain and other handy methods to still work
* with SRT jobs. Because this can ONLY happen if the
* task's job number gets behind the server's, we can
* easily detect the job catching up later.
*/
tsk_rt(task)->job_params.release = server->deadline;
tsk_rt(task)->job_params.deadline = server->deadline +
get_rt_period(task);
TRACE_TASK_SUB(task, "waiting, new dead: %llu, new rel: %llu",
TIME(get_deadline(task)),
TIME(get_release(task)));
}
if (server->type != S_SLACK) {
server_release(server);
if (lt_before_eq(server->release, litmus_clock())) {
sched_trace_action(task, 2);
} else {
sched_trace_action(task, 1);
}
}
if (is_hrt(task) && lt_after(server->release, now)) {
TRACE_SERVER_SUB(server, "P%d no longer ready", entry->cpu);
hrt_server = container_of(server, hrt_server_t, server);
hrt_server->ready = 0;
BUG_ON(hrtimer_active(&hrt_server->slack_timer));
}
unlink(task);
requeue(task, get_rt_domain(entry, task));
/* Need to pick the next task to run */
edf_hsb_pick_next(entry);
if (!entry->linked || entry->linked != task)
preempt_if_preemptable(entry->scheduled, entry->cpu);
}
static void hrt_server_released(server_t *server)
{
hrt_server_t *hrt_server = container_of(server, hrt_server_t, server);
cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server);
BUG_ON(hrtimer_active(&hrt_server->slack_timer));
sched_trace_action(entry->scheduled, 2);
TRACE_SERVER_SUB(server, "HRT server released on P%d", entry->cpu);
hrt_server->no_slack = 0;
hrt_server->ready = 1;
check_for_hrt_preempt(entry);
/* Ensure slack timer is only running if the current
* job is not HRT.
*/
if (entry->linked && is_hrt(entry->linked))
slack_timer_cancel(hrt_server);
else
slack_timer_arm(hrt_server);
}
static void servers_released(struct list_head *servers)
{
int was_be = 0;
unsigned long flags;
struct list_head *pos;
server_t *server;
raw_spin_lock_irqsave(global_lock, flags);
sched_trace_action(NULL, 2);
TRACE_TIMER("Servers released");
list_for_each(pos, servers) {
server = list_entry(pos, server_t, release_list);
if (server->type == S_BE) {
was_be = 1;
BUG_ON(bheap_node_in_heap(server->hn));
TRACE_SERVER_SUB(server, "inserting BE server");
bheap_insert(server_order, &be_ready_servers,
server->hn);
} else { /* HRT server */
hrt_server_released(server);
}
}
if (was_be)
check_for_global_preempt();
raw_spin_unlock_irqrestore(global_lock, flags);
}
/******************************************************************************
* Server management methods
******************************************************************************/
static int curr_be = 0;
/*
* A BE server has been added in a proc entry.
*/
static int admit_be_server(unsigned long long wcet,
unsigned long long period,
int cpu)
{
int rv = 0;
server_t *be_server;
if (cpu != NO_CPU) {
rv = -EINVAL;
goto out;
}
be_server = server_alloc(GFP_ATOMIC);
server_init(be_server, &server_domain,
BE_SERVER_BASE + ++curr_be,
wcet, period, 1);
be_server->type = S_BE;
server_slack_create(be_server);
list_add(&be_server->list, &be_servers);
out:
return rv;
}
/*
* Output all BE servers to a proc entry.
*/
static void list_be_servers(server_proc_t *proc)
{
struct list_head *pos;
server_t *be_server;
list_for_each(pos, &be_servers) {
be_server = list_entry(pos, server_t, list);
list_server(be_server, NO_CPU, proc);
}
}
/*
* Halts and destroys all BE servers.
*/
static void stop_be_servers(void)
{
server_t *be_server;
struct list_head *pos, *safe;
//atomic_set(&servers_running, 0);
list_for_each_safe(pos, safe, &be_servers) {
be_server = list_entry(pos, server_t, list);
list_del_init(pos);
if (bheap_node_in_heap(be_server->hn))
bheap_delete(server_order, &be_ready_servers,
be_server->hn);
server_slack_destroy(be_server);
server_destroy(be_server);
server_free(be_server);
}
}
/*
* An HRT server has been added in a proc entry.
*/
static int admit_hrt_server(unsigned long long wcet,
unsigned long long period,
int cpu)
{
cpu_entry_t *entry = &per_cpu(cpu_entries, cpu);
hrt_server_t *hrt_server = &entry->hrt_server;
struct hrtimer *slack_timer = &hrt_server->slack_timer;
server_init(&hrt_server->server, &server_domain,
cpu, wcet, period, 1);
server_slack_create(&hrt_server->server);
hrt_server->no_slack = 0;
hrt_server->ready = 1;
hrt_server->server.type = S_HRT;
edf_domain_init(&hrt_server->hrt_domain, NULL,
release_hrt_jobs);
hrtimer_init(slack_timer,
CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
slack_timer->function = slack_timer_fire;
return 0;
}
/*
* Print all HRT servers to a proc entry.
*/
static void list_hrt_servers(server_proc_t *proc)
{
cpu_entry_t *entry;
hrt_server_t *hrt_server;
int cpu;
for_each_online_cpu(cpu) {
entry = &per_cpu(cpu_entries, cpu);
hrt_server = &entry->hrt_server;
list_server(&hrt_server->server, cpu, proc);
}
}
/*
* Stops all hrt server timers and resets all fields to 0.
*/
static void stop_hrt_servers(void)
{
int cpu;
cpu_entry_t *entry;
hrt_server_t *hrt_server;
//atomic_set(&servers_running, 0);
for_each_online_cpu(cpu) {
entry = &per_cpu(cpu_entries, cpu);
hrt_server = &entry->hrt_server;
if (hrt_server->server.data)
server_slack_destroy(&hrt_server->server);
slack_timer_cancel(hrt_server);
hrt_server->no_slack = 0;
hrt_server->ready = 0;
hrt_server->server.period = 0;
hrt_server->server.wcet = 0;
}
}
/*
* Starts timers used to manage servers.
*/
static void start_servers(lt_t time)
{
int cpu;
cpu_entry_t *entry;
server_t *server;
server_t *be_server;
struct list_head *pos;
if (atomic_read(&servers_running))
return;
atomic_set(&servers_running, 1);
TRACE_SUB("starting servers at %llu", time);
/* Start HRT servers */
for_each_online_cpu(cpu) {
entry = &per_cpu(cpu_entries, cpu);
server = &entry->hrt_server.server;
if (!check_hrt_server_initialized(&entry->hrt_server))
goto loop_end;
/* Cause a catchup later */
server_release_at(server, time - server->period);
entry->hrt_server.ready = 1;
TRACE("Setting up cpu %d to have timer deadline %llu\n",
cpu, TIME(server->deadline));
loop_end:
cpu = cpu;
}
/* Start BE servers */
list_for_each(pos, &be_servers) {
be_server = list_entry(pos, server_t, list);
BUG_ON(bheap_node_in_heap(be_server->hn));
bheap_insert(server_order, &be_ready_servers, be_server->hn);
/* Cause a catchup later */
server_release_at(be_server, time - be_server->period);
TRACE("Releasing BE server %d\n", be_server->id);
TRACE_SERVER_SUB(be_server, "inserting be server");
}
}
/******************************************************************************
* Plugin methods
******************************************************************************/
static long edf_hsb_activate_plugin(void)
{
int cpu;
cpu_entry_t *entry;
#ifdef CONFIG_RELEASE_MASTER
edf_hsb_release_master = atomic_read(&release_master_cpu);
#else
edf_hsb_release_master = NO_CPU;
#endif
server_domain.release_master = edf_hsb_release_master;
for_each_online_cpu(cpu) {
entry = &per_cpu(cpu_entries, cpu);
#ifdef CONFIG_RELEASE_MASTER
if (cpu != edf_hsb_release_master)
#endif
update_cpu_position(entry);
}
TRACE("activating EDF-HSB plugin.\n");
return 0;
}
/*
* Requires a processor be specified for any task run on the system.
*/
static long edf_hsb_admit_task(struct task_struct *task)
{
cpu_entry_t *entry = task_sched_entry(task);
if (is_hrt(task)) {
return check_hrt_server_initialized(&entry->hrt_server) &&
((task_cpu(task) == task->rt_param.task_params.cpu) &&
(task_cpu(task) == entry->cpu)) ? 0 : -EINVAL;
} else {
/* If the task is not HRT, we don't want to force the user
* to specify a CPU.
*/
return 0;
}
}
/*
* Stops all servers from running.
*/
static long edf_hsb_deactivate_plugin(void)
{
cpu_entry_t *cpu_entry;
hrt_server_t *hrt_server;
unsigned long flags;
int cpu;
local_irq_save(flags);
/* stop_be_servers(); */
/* stop_hrt_servers(); */
for_each_online_cpu(cpu) {
cpu_entry = &per_cpu(cpu_entries, cpu);
hrt_server = &cpu_entry->hrt_server;
slack_timer_cancel(hrt_server);
}
local_irq_restore(flags);
return 0;
}
static void edf_hsb_task_block(struct task_struct *task)
{
unsigned long flags;
TRACE_TASK(task, "block at %llu\n", litmus_clock());
raw_spin_lock_irqsave(global_lock, flags);
unlink(task);
raw_spin_unlock_irqrestore(global_lock, flags);
}
/*
* A task leaves the system.
*/
static void edf_hsb_task_exit(struct task_struct *task)
{
unsigned long flags;
cpu_entry_t *entry = task_sched_entry(task);
BUG_ON(!is_realtime(task));
TRACE_TASK(task, "RIP at %llu on P%d\n",
TIME(litmus_clock()), tsk_rt(task)->scheduled_on);
raw_spin_lock_irqsave(global_lock, flags);
unlink(task);
if (tsk_rt(task)->scheduled_on != NO_CPU) {
entry->scheduled = NULL;
tsk_rt(task)->scheduled_on = NO_CPU;
}
if (is_srt(task)) {
server_slack_destroy(task_srt_server(task));
server_destroy(task_srt_server(task));
server_free(task_srt_server(task));
task_data_free(tsk_rt(task)->plugin_data);
}
raw_spin_unlock_irqrestore(global_lock, flags);
}
/*
* Attempts to determine the current scheduler state, then selects the
* next task and updates the scheduler state.
*/
static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
{
unsigned long flags;
int blocks, out_of_time, preempted, sleep, was_slack;
struct task_struct *curr;
cpu_entry_t *entry = local_cpu_entry;
#ifdef CONFIG_RELEASE_MASTER
/* Bail out early if we are the release master.
* The release master never schedules any real-time tasks.
*/
if (edf_hsb_release_master == entry->cpu)
return NULL;
#endif
raw_spin_lock_irqsave(global_lock, flags);
curr = entry->scheduled;
if (curr != prev && prev->pid == 0) {
TRACE_SUB("should bug");
} else {
BUG_ON(curr && !is_realtime(prev));
}
TRACE("server_budget: %llu, server_deadline: %llu, "
"curr_time: %llu, no_slack: %d, ready: %d\n",
TIME(entry->hrt_server.server.budget),
TIME(entry->hrt_server.server.deadline),
TIME(litmus_clock()), entry->hrt_server.no_slack,
entry->hrt_server.ready);
/* Determine state */
blocks = curr && !is_running(curr);
preempted = entry->scheduled != entry->linked;
sleep = curr && get_rt_flags(curr) == RT_F_SLEEP;
out_of_time = curr && budget_enforced(curr) &&
budget_exhausted(curr);
was_slack = !list_empty(&slack_queue);
TRACE("blocks: %d, preempted: %d, sleep: %d, oot: %d\n",
blocks, preempted, sleep, out_of_time);
if (blocks)
unlink(entry->scheduled);
/* If the task has gone to sleep or exhausted its budget, it
* must complete its current job.
*/
if ((out_of_time || sleep) && !blocks && !preempted)
job_completion(entry, entry->scheduled);
/* Pick the next task if there isn't one currently */
if (!entry->linked)
edf_hsb_pick_next(entry);
/* Set task states */
if (entry->linked != entry->scheduled) {
if (entry->linked)
entry->linked->rt_param.scheduled_on = entry->cpu;
if (entry->scheduled)
entry->scheduled->rt_param.scheduled_on = NO_CPU;
}
entry->scheduled = entry->linked;
sched_state_task_picked();
if (entry->scheduled)
TRACE_TASK(entry->scheduled, "scheduled at %llu\n",
TIME(litmus_clock()));
else
TRACE("NULL scheduled at %llu\n", TIME(litmus_clock()));
/* We generated slack. Check to see if anyone wants to use it */
if (entry->linked && entry->linked_server->type != S_SLACK &&
head_in_list(&server_slack(entry->linked_server)->list))
check_for_global_preempt();
raw_spin_unlock_irqrestore(global_lock, flags);
return entry->scheduled;
}
/*
* Prepare a task for running in RT mode
*/
static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
{
unsigned long flags;
task_data_t *data;
server_t *srt_server = NULL;
cpu_entry_t *entry = task_sched_entry(task);
TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock()));
raw_spin_lock_irqsave(global_lock, flags);
/* Setup job parameters */
release_at(task, litmus_clock());
if (is_srt(task)) {
/* Create SRT server */
srt_server = server_alloc(GFP_ATOMIC);
server_init(srt_server, &server_domain,
task->pid, get_exec_cost(task),
get_rt_period(task), 0);
srt_server->type = S_SRT;
server_slack_create(srt_server);
}
data = task_data_alloc(GFP_ATOMIC);
data->owner = task;
data->srt_server = srt_server;
INIT_LIST_HEAD(&data->candidate_list);
tsk_rt(task)->plugin_data = data;
/* Already running, update the cpu entry.
* This tends to happen when the first tasks enter the system.
*/
if (running) {
BUG_ON(entry->scheduled);
#ifdef CONFIG_RELEASE_MASTER
if (entry->cpu != edf_hsb_release_master) {
#endif
entry->scheduled = task;
tsk_rt(task)->scheduled_on = task_cpu(task);
#ifdef CONFIG_RELEASE_MASTER
} else {
/* do not schedule on release master */
/* Cannot preempt! Causing a preemption with a BE task
* somehow leads to that task never blocking during
* a synchronous release. WHY DOES THIS WORK
*/
if (!is_be(task))
preempt_if_preemptable(entry->scheduled, entry->cpu);
tsk_rt(task)->scheduled_on = NO_CPU;
}
#endif
} else {
task->rt_param.scheduled_on = NO_CPU;
}
task->rt_param.linked_on = NO_CPU;
job_arrival(task, entry);
raw_spin_unlock_irqrestore(global_lock, flags);
}
static void edf_hsb_task_wake_up(struct task_struct *task)
{
unsigned long flags;
cpu_entry_t *entry = task_sched_entry(task);
TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()),
task_cpu(task), task->rt_param.task_params.cpu);
set_rt_flags(task, RT_F_RUNNING);
/* The job blocked while it was being run by a slack server */
if (is_queued(task)) {
check_slack_candidate(task);
return;
}
//BUG_ON(entry->scheduled == task);
raw_spin_lock_irqsave(global_lock, flags);
/* A task set was released. Start servers. */
if (unlikely(!atomic_read(&servers_running)))
start_servers(get_deadline(task) - get_rt_period(task) -
get_rt_phase(task));
/* Note that since we are not re-releasing a task here, we have
* switched from the sporadic to the periodic task model. See
* sched_gsn_edf.c for the sporadic version.
*/
job_arrival(task, entry);
raw_spin_unlock_irqrestore(global_lock, flags);
}
/*
* Unused.
*/
static void edf_hsb_tick(struct task_struct *t)
{
}
/******************************************************************************
* Plugin
******************************************************************************/
static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = {
.plugin_name = "EDF-HSB",
.activate_plugin = edf_hsb_activate_plugin,
.deactivate_plugin = edf_hsb_deactivate_plugin,
.schedule = edf_hsb_schedule,
.admit_task = edf_hsb_admit_task,
.task_block = edf_hsb_task_block,
.task_exit = edf_hsb_task_exit,
.task_new = edf_hsb_task_new,
.task_wake_up = edf_hsb_task_wake_up,
.tick = edf_hsb_tick,
/* From jobs.h */
.complete_job = complete_job,
.release_at = release_at,
};
static int __init init_edf_hsb(void)
{
cpu_entry_t *entry;
hrt_server_t *hrt_server;
int rv, cpu;
rv = register_sched_plugin(&edf_hsb_plugin);
if (rv) {
printk(KERN_ERR "Could not register plugin %s.\n",
edf_hsb_plugin.plugin_name);
goto out;
}
rv = make_plugin_proc_dir(&edf_hsb_plugin, &edf_hsb_proc_dir);
if (rv) {
printk(KERN_ERR "Could not create %s procfs dir.\n",
edf_hsb_plugin.plugin_name);
goto out;
}
task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC);
/* Global domains */
edf_domain_init(&srt_domain, NULL, release_srt_jobs);
rt_domain_init(&be_domain, be_ready_order,
NULL, release_be_jobs);
server_domain_init(&server_domain, servers_released,
server_completed, NO_CPU, global_lock);
/* Server proc interfaces */
server_proc_init(&server_domain,
edf_hsb_proc_dir, BE_PROC_NAME,
admit_be_server, list_be_servers,
stop_be_servers);
server_proc_init(&server_domain,
edf_hsb_proc_dir, HRT_PROC_NAME,
admit_hrt_server, list_hrt_servers,
stop_hrt_servers);
/* Global collections */
bheap_init(&cpu_heap);
bheap_init(&be_ready_servers);
INIT_LIST_HEAD(&be_servers);
INIT_LIST_HEAD(&slack_queue);
INIT_LIST_HEAD(&slack_candidates);
for_each_online_cpu(cpu) {
entry = &per_cpu(cpu_entries, cpu);
hrt_server = &entry->hrt_server;
entry->cpu = cpu;
entry->linked = NULL;
entry->scheduled = NULL;
entry->linked_server = NULL;
/* HRT server */
hrt_server->server.id = cpu;
hrt_server->server.deadline = 0;
hrt_server->server.period = 0;
hrt_server->server.wcet = 0;
hrt_server->ready = 0;
hrtimer_start_on_info_init(&hrt_server->slack_timer_info);
/* CPU entry bheap nodes */
entry->hn = &cpu_heap_node[cpu];
bheap_node_init(&entry->hn, entry);
}
out:
return rv;
}
static void exit_edf_hsb(void)
{
int cpu;
cpu_entry_t *entry;
stop_be_servers();
stop_hrt_servers();
server_domain_destroy(&server_domain);
for_each_online_cpu(cpu) {
entry = &per_cpu(cpu_entries, cpu);
server_slack_destroy(&entry->hrt_server.server);
server_destroy(&entry->hrt_server.server);
}
if (edf_hsb_proc_dir) {
remove_plugin_proc_dir(&edf_hsb_plugin);
/* TODO: is this wrong? */
edf_hsb_proc_dir = NULL;
}
}
module_init(init_edf_hsb);
module_exit(exit_edf_hsb);