#include #include #include #include #include #include #include #include #include #include #include #include #include #include struct gedf_reservation_environment* gedf_env; struct cpu_time { struct hrtimer timer; lt_t last_update_time; }; static DEFINE_PER_CPU(struct cpu_time, cpu_time); static enum hrtimer_restart on_budget_timeout(struct hrtimer *timer) { litmus_reschedule_local(); return HRTIMER_NORESTART; } /* static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) { unsigned long flags; enum hrtimer_restart restart = HRTIMER_NORESTART; struct pres_cpu_state *state; lt_t update, now; state = container_of(timer, struct pres_cpu_state, timer); // The scheduling timer should only fire on the local CPU, because // otherwise deadlocks via timer_cancel() are possible. // Note: this does not interfere with dedicated interrupt handling, as // even under dedicated interrupt handling scheduling timers for // budget enforcement must occur locally on each CPU. // BUG_ON(state->cpu != raw_smp_processor_id()); raw_spin_lock_irqsave(&state->lock, flags); sup_update_time(&state->sup_env, litmus_clock()); update = state->sup_env.next_scheduler_update; now = state->sup_env.env.current_time; TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n", now, update, state->cpu); if (update <= now) { litmus_reschedule_local(); } else if (update != SUP_NO_SCHEDULER_UPDATE) { hrtimer_set_expires(timer, ns_to_ktime(update)); restart = HRTIMER_RESTART; } raw_spin_unlock_irqrestore(&state->lock, flags); return restart; } */ static struct task_struct* ext_res_schedule(struct task_struct * prev) { int cpu = smp_processor_id(); lt_t delta, time_slice; struct cpu_time* entry; struct task_struct* next; entry = this_cpu_ptr(&cpu_time); delta = litmus_clock() - entry->last_update_time; //TODO: implement per cpu lt_t to track time gedf_env->env.ops->update_time(&gedf_env->env, delta, cpu); next = gedf_env->env.ops->dispatch(&gedf_env->env, &time_slice, cpu); entry->last_update_time = litmus_clock(); if (time_slice != ULLONG_MAX) { hrtimer_start(&entry->timer, ns_to_ktime(entry->last_update_time + time_slice), HRTIMER_MODE_ABS_PINNED); } else hrtimer_try_to_cancel(&entry->timer); sched_state_task_picked(); return next; } /* Called when a task should be removed from the ready queue. */ static void ext_res_task_block(struct task_struct *tsk) { struct reservation* res; TRACE_TASK(tsk, "thread suspends at %llu \n", litmus_clock()); res = (struct reservation*) tsk_rt(tsk)->plugin_state; res->par_env->ops->remove_res(res->par_env, res, 0, 0); } /* Called when the state of tsk changes back to TASK_RUNNING. * We need to requeue the task. */ static void ext_res_task_resume(struct task_struct *tsk) { struct reservation* res; TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); res = (struct reservation*) tsk_rt(tsk)->plugin_state; res->par_env->ops->add_res(res->par_env, res, 0); } static long ext_res_admit_task(struct task_struct *tsk) { long err = 0; struct gedf_task_reservation* gedf_task_res; err = alloc_gedf_task_reservation(&gedf_task_res, tsk); if (err) return err; tsk_rt(tsk)->plugin_state = gedf_task_res; gedf_task_res->gedf_res.res.par_env = &gedf_env->env; //TODO: for checkpoint 2, need to find component and insert into it return 0; } static void ext_res_task_new(struct task_struct *tsk, int on_runqueue, int is_running) { struct reservation* res; lt_t now = litmus_clock(); TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", now, on_runqueue, is_running); res = (struct reservation*)tsk_rt(tsk)->plugin_state; release_at(tsk, now); res->replenishment_time = now; res->par_env->ops->add_res(res->par_env, res, 0); if (is_running) litmus_reschedule_local(); } /* static bool pres_fork_task(struct task_struct *tsk) { TRACE_CUR("is forking\n"); TRACE_TASK(tsk, "forked child rt:%d cpu:%d task_cpu:%d " "wcet:%llu per:%llu\n", is_realtime(tsk), tsk_rt(tsk)->task_params.cpu, task_cpu(tsk), tsk_rt(tsk)->task_params.exec_cost, tsk_rt(tsk)->task_params.period); // We always allow forking. // The newly forked task will be in the same reservation. return true; } */ static void ext_res_task_exit(struct task_struct *tsk) { struct reservation* res; struct reservation_environment* par_env; res = (struct reservation*)tsk_rt(tsk)->plugin_state; par_env = res->par_env; par_env->ops->remove_res(par_env, res, 1, 0); TRACE_TASK(tsk, "task exits at %llu \n", litmus_clock()); } /* used by task budget tracking in budget.c. Since we have tasks in containers that track * budget, we don't need this. Furthermore, this scheme doesn't work efficiently with * multicore reservations */ /* static void pres_current_budget(lt_t *used_so_far, lt_t *remaining) { struct pres_task_state *tstate = get_pres_state(current); struct pres_cpu_state *state; // FIXME: protect against concurrent task_exit() local_irq_disable(); state = cpu_state_for(tstate->cpu); raw_spin_lock(&state->lock); sup_update_time(&state->sup_env, litmus_clock()); if (remaining) *remaining = tstate->client->reservation->cur_budget; if (used_so_far) *used_so_far = tstate->client->reservation->budget_consumed; pres_update_timer_and_unlock(state); local_irq_enable(); } */ /* static long do_pres_reservation_create( int res_type, struct reservation_config *config) { struct pres_cpu_state *state; struct reservation* res; struct reservation* new_res = NULL; unsigned long flags; long err; // Allocate before we grab a spin lock. switch (res_type) { case PERIODIC_POLLING: case SPORADIC_POLLING: err = alloc_polling_reservation(res_type, config, &new_res); break; case TABLE_DRIVEN: err = alloc_table_driven_reservation(config, &new_res); break; default: err = -EINVAL; break; } if (err) return err; state = cpu_state_for(config->cpu); raw_spin_lock_irqsave(&state->lock, flags); res = sup_find_by_id(&state->sup_env, config->id); if (!res) { sup_add_new_reservation(&state->sup_env, new_res); err = config->id; } else { err = -EEXIST; } raw_spin_unlock_irqrestore(&state->lock, flags); if (err < 0) kfree(new_res); return err; } */ /* static long pres_reservation_create(int res_type, void* __user _config) { struct reservation_config config; TRACE("Attempt to create reservation (%d)\n", res_type); if (copy_from_user(&config, _config, sizeof(config))) return -EFAULT; if (config.cpu < 0 || !cpu_online(config.cpu)) { printk(KERN_ERR "invalid polling reservation (%u): " "CPU %d offline\n", config.id, config.cpu); return -EINVAL; } return do_pres_reservation_create(res_type, &config); } */ static struct domain_proc_info ext_res_domain_proc_info; static long ext_res_get_domain_proc_info(struct domain_proc_info **ret) { *ret = &ext_res_domain_proc_info; return 0; } static void ext_res_setup_domain_proc(void) { int i, cpu; int num_rt_cpus = num_online_cpus(); struct cd_mapping *cpu_map, *domain_map; memset(&ext_res_domain_proc_info, 0, sizeof(ext_res_domain_proc_info)); init_domain_proc_info(&ext_res_domain_proc_info, num_rt_cpus, num_rt_cpus); ext_res_domain_proc_info.num_cpus = num_rt_cpus; ext_res_domain_proc_info.num_domains = num_rt_cpus; i = 0; for_each_online_cpu(cpu) { cpu_map = &ext_res_domain_proc_info.cpu_to_domains[i]; domain_map = &ext_res_domain_proc_info.domain_to_cpus[i]; cpu_map->id = cpu; domain_map->id = i; cpumask_set_cpu(i, cpu_map->mask); cpumask_set_cpu(cpu, domain_map->mask); ++i; } } static long ext_res_activate_plugin(void) { int cpu; int num_cpus = num_online_cpus(); struct cpu_time* entry; lt_t now = litmus_clock(); alloc_gedf_reservation_environment(&gedf_env, num_cpus); for_each_online_cpu(cpu) { TRACE("Initializing CPU%d...\n", cpu); entry = per_cpu_ptr(&cpu_time, cpu); hrtimer_init(&entry->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); entry->timer.function = on_budget_timeout; entry->last_update_time = now; gedf_env->cpu_entries[cpu].id = cpu; gedf_env->env.ops->resume(&gedf_env->env, cpu); } gedf_env->num_cpus = num_cpus; ext_res_setup_domain_proc(); return 0; } static long ext_res_deactivate_plugin(void) { int cpu; struct cpu_time* entry; gedf_env->env.ops->shutdown(&gedf_env->env); for_each_online_cpu(cpu) { entry = per_cpu_ptr(&cpu_time, cpu); hrtimer_cancel(&entry->timer); } destroy_domain_proc_info(&ext_res_domain_proc_info); return 0; } static struct sched_plugin ext_res_plugin = { .plugin_name = "EXT-RES", .schedule = ext_res_schedule, .task_block = ext_res_task_block, .task_wake_up = ext_res_task_resume, .admit_task = ext_res_admit_task, .task_new = ext_res_task_new, //.fork_task = pres_fork_task, .task_exit = ext_res_task_exit, .complete_job = complete_job, .get_domain_proc_info = ext_res_get_domain_proc_info, .activate_plugin = ext_res_activate_plugin, .deactivate_plugin = ext_res_deactivate_plugin, //.reservation_create = pres_reservation_create, //.current_budget = pres_current_budget, }; static int __init init_ext_res(void) { return register_sched_plugin(&ext_res_plugin); } module_init(init_ext_res);