#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void mtd_shutdown( struct reservation* res) { struct mtd_reservation* mtd_res = container_of(res, struct mtd_reservation, res[0]); int cpu; mtd_res->res[0].env->ops->shutdown(res->env); for_each_online_cpu(cpu) { clean_up_ext_reservation(&mtd_res->res[cpu]); if (mtd_res->num_intervals[cpu]) kfree(mtd_res->intervals[cpu]); } kfree(res); } static int mtd_is_np( struct reservation* res, int cpu) { BUG_ON(res->env->ops->is_np(res->env, cpu)); return 0; } static void mtd_on_preempt( struct reservation* res, int cpu) { res->env->ops->suspend(res->env, cpu); } static void mtd_on_schedule( struct reservation* res, int cpu) { res->env->ops->resume(res->env, cpu); } static struct task_struct* mtd_dispatch_client( struct reservation* res, lt_t* time_slice, int cpu) { return res->env->ops->dispatch(res->env, time_slice, cpu); } static void mtd_replenish_budget( struct reservation* res, int cpu) { struct mtd_reservation* mtd_res = container_of(res, struct mtd_reservation, res[cpu]); BUG_ON(res != &mtd_res->res[cpu]); BUG_ON(!mtd_res->num_intervals[cpu]); /* calculate next interval index */ mtd_res->interval_index[cpu] = (mtd_res->interval_index[cpu] + 1) % mtd_res->num_intervals[cpu]; /* if wrap around, then increment major cycle */ if (!mtd_res->interval_index[cpu]) mtd_res->major_cycle_start[cpu] += mtd_res->major_cycle; mtd_res->cur_interval[cpu].start = mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].start; mtd_res->cur_interval[cpu].end = mtd_res->intervals[cpu][mtd_res->interval_index[cpu]].end; res->cur_budget = mtd_res->cur_interval[cpu].end - mtd_res->cur_interval[cpu].start; res->replenishment_time = mtd_res->major_cycle_start[cpu]; res->replenishment_time += mtd_res->cur_interval[cpu].start; } static void mtd_drain_budget( struct reservation* res, lt_t how_much, int cpu) { struct mtd_reservation* mtd_res; lt_t now, end; mtd_res = container_of(res, struct mtd_reservation, res[cpu]); BUG_ON(res != &mtd_res->res[cpu]); BUG_ON(!mtd_res->num_intervals[cpu]); now = litmus_clock(); end = mtd_res->major_cycle_start[cpu] + mtd_res->cur_interval[cpu].end; if (now >= end) res->cur_budget = 0; else res->cur_budget = end - now; res->env->ops->update_time(res->env, how_much, cpu); } static struct reservation_ops mtd_ops = { .drain_budget = mtd_drain_budget, .replenish_budget = mtd_replenish_budget, .dispatch_client = mtd_dispatch_client, .on_schedule = mtd_on_schedule, .on_preempt = mtd_on_preempt, .is_np = mtd_is_np, .shutdown = mtd_shutdown }; /* cannot handle installing table of same core from different threads simultaneously. * intervals is passed from config, which is in userspace */ long mtd_res_install_table( struct mtd_reservation* mtd_res, struct lt_interval* intervals, lt_t major_cycle, unsigned int num_intervals, int cpu) { long err; if (mtd_res->major_cycle && major_cycle != mtd_res->major_cycle) return -EINVAL; /* deallocate memory for past table */ if (mtd_res->num_intervals[cpu]) kfree(&mtd_res->intervals[cpu]); mtd_res->major_cycle = major_cycle; mtd_res->num_intervals[cpu] = num_intervals; /* allocate kernel memory for intervals */ mtd_res->intervals[cpu] = kzalloc(sizeof(struct lt_interval) * num_intervals, GFP_KERNEL); if (!mtd_res->intervals[cpu]) return ENOMEM; /* copy from user space to kernel space */ err = copy_from_user(mtd_res->intervals[cpu], intervals, sizeof(struct lt_interval) * num_intervals); if (err) return err; /* reservation always begin executing start on hyperperiod boundaries */ mtd_res->interval_index[cpu] = 0; mtd_res->cur_interval[cpu].start = mtd_res->intervals[cpu][0].start; mtd_res->cur_interval[cpu].end = mtd_res->intervals[cpu][0].end; return 0; } long alloc_mtd_reservation( struct mtd_reservation** _res, unsigned int id, lt_t major_cycle) { struct mtd_reservation* mtd_res; int i; mtd_res = kzalloc(sizeof(struct mtd_reservation), GFP_KERNEL); if (!mtd_res) return -ENOMEM; for_each_online_cpu(i) { init_ext_reservation(&mtd_res->res[i], id, &mtd_ops); } mtd_res->major_cycle = major_cycle; *_res = mtd_res; return 0; } /* ***************************************************************** */ static int mtd_ready_order(struct bheap_node* a, struct bheap_node* b) { return higher_res_prio(bheap2res(a), bheap2res(b)); } static void requeue( struct mtd_cpu_entry* entry, struct reservation* res) { BUG_ON(!res); BUG_ON(is_queued_res(res)); if (lt_before_eq(res->replenishment_time, litmus_clock())) __add_ready_res(&entry->domain, res); else __add_release_res(&entry->domain, res); } /* ***************************************************************** */ static void mtd_env_shutdown( struct reservation_environment* env) { struct mtd_reservation_environment* mtd_env; struct reservation* res; unsigned long flags; int cpu; mtd_env = container_of(env, struct mtd_reservation_environment, env); for_each_online_cpu(cpu) { domain_suspend_releases(&mtd_env->cpu_entries[cpu].domain); } raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); /* call shutdown on all scheduled reservations */ while (!list_empty(&env->all_reservations)) { res = list_first_entry(&env->all_reservations, struct reservation, all_list); list_del_init(&res->all_list); res->ops->shutdown(res); } raw_spin_unlock_irqrestore(&mtd_env->insert_lock, flags); /* free memory */ kfree(env); } static int mtd_env_is_np( struct reservation_environment* env, int cpu) { return 1; } static struct reservation* mtd_find_res_by_id( struct reservation_environment* env, int id) { struct reservation* res; unsigned long flags; struct mtd_reservation_environment* mtd_env = container_of(env, struct mtd_reservation_environment, env); raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); list_for_each_entry(res, &env->all_reservations, all_list) { if (res->id == id) { raw_spin_unlock_irqrestore(&mtd_env->insert_lock, flags); return res; } } raw_spin_unlock_irqrestore(&mtd_env->insert_lock, flags); return NULL; } /* not supported */ static void mtd_env_remove_res( struct reservation_environment* env, struct reservation* res, int complete, int cpu) { return; } /* the reservation is added one core at a time due to how the table is specified */ static void mtd_env_add_res( struct reservation_environment* env, struct reservation* res, int cpu) { struct mtd_reservation_environment* mtd_env; struct mtd_reservation* mtd_res; lt_t tmp; unsigned long flags; mtd_env = container_of(env, struct mtd_reservation_environment, env); mtd_res = container_of(res, struct mtd_reservation, res[cpu]); BUG_ON(&mtd_res->res[cpu] != res); /* only add to list in reservation if is first core of res to be added */ raw_spin_lock_irqsave(&mtd_env->insert_lock, flags); if (!mtd_res->res[0].par_env) { mtd_res->res[0].par_env = env; list_add_tail(&mtd_res->res[0].all_list, &env->all_reservations); } raw_spin_unlock_irqrestore(&mtd_env->insert_lock, flags); tmp = div64_u64(litmus_clock(), mtd_res->major_cycle); mtd_res->major_cycle_start[cpu] = tmp * mtd_res->major_cycle; res->par_env = env; if (mtd_res->num_intervals[cpu]) { tmp = mtd_res->major_cycle_start[cpu]; res->replenishment_time = tmp + mtd_res->cur_interval[cpu].start; res->cur_budget = mtd_res->cur_interval[cpu].end - mtd_res->cur_interval[cpu].start; raw_spin_lock_irqsave(&mtd_env->cpu_entries[cpu].domain.ready_lock, flags); requeue(&mtd_env->cpu_entries[cpu], &mtd_res->res[cpu]); if (mtd_res->res[cpu].replenishment_time <= litmus_clock()) litmus_reschedule_local(); raw_spin_unlock_irqrestore(&mtd_env->cpu_entries[cpu].domain.ready_lock, flags); } } /* not supported */ static void mtd_env_suspend( struct reservation_environment* env, int cpu) { return; } /* not supported */ static void mtd_env_resume( struct reservation_environment* env, int cpu) { return; } /* If two reservations have overlapping intervals on the same core, * then which one is scheduled is undefined */ static struct task_struct* mtd_env_dispatch( struct reservation_environment* env, lt_t* time_slice, int cpu) { struct mtd_reservation_environment* mtd_env; struct mtd_cpu_entry* entry; struct task_struct* next = NULL; unsigned long flags; mtd_env = container_of(env, struct mtd_reservation_environment, env); entry = &mtd_env->cpu_entries[cpu]; raw_spin_lock_irqsave(&entry->domain.ready_lock, flags); /* if linked and scheduled differ, preempt and schedule accordingly */ if (!entry->linked) entry->linked = __take_ready_res(&entry->domain); if (entry->scheduled != entry->linked) { if (entry->scheduled && entry->scheduled->ops->on_preempt) { entry->scheduled->ops->on_preempt(entry->scheduled, cpu); } if (entry->linked && entry->linked->ops->on_schedule) { entry->linked->ops->on_schedule(entry->linked, cpu); } entry->scheduled = entry->linked; } raw_spin_unlock_irqrestore(&entry->domain.ready_lock, flags); if (entry->scheduled) { /* let scheduled reservation decide what runs next */ next = entry->scheduled->ops->dispatch_client(entry->scheduled, time_slice, cpu); *time_slice = (*time_slice > entry->scheduled->cur_budget) ? entry->scheduled->cur_budget : *time_slice; } else *time_slice = ULLONG_MAX; return next; } static void mtd_env_update_time( struct reservation_environment* env, lt_t how_much, int cpu) { struct mtd_reservation_environment* mtd_env; struct mtd_cpu_entry* entry; unsigned long flags; mtd_env = container_of(env, struct mtd_reservation_environment, env); entry = &mtd_env->cpu_entries[cpu]; /* drains budget of ready task */ /* In the case that multiple tasks on this core share an execution frame, * only 1 has its budget drained. However, the other tasks will be scheduled * at the end of the frame for epsilon time and immediately have its budget drained * before the task of the next frame is scheduled. * This results in only 1 of the tasks getting actual execution. */ if (entry->scheduled) { entry->scheduled->ops->drain_budget(entry->scheduled, how_much, cpu); /* if no more budget, replenish and requeue */ if (!entry->scheduled->cur_budget) { entry->scheduled->ops->replenish_budget(entry->scheduled, cpu); raw_spin_lock_irqsave(&entry->domain.ready_lock, flags); requeue(entry, entry->scheduled); raw_spin_unlock_irqrestore(&entry->domain.ready_lock, flags); entry->linked = NULL; } else { entry->linked = entry->scheduled; } } } /* callback for how the domain will release jobs */ static void mtd_env_release_jobs(rt_domain_t* rt, struct bheap* res) { unsigned long flags; raw_spin_lock_irqsave(&rt->ready_lock, flags); __merge_ready(rt, res); litmus_reschedule_local(); raw_spin_unlock_irqrestore(&rt->ready_lock, flags); } static struct reservation_environment_ops mtd_env_ops = { .update_time = mtd_env_update_time, .dispatch = mtd_env_dispatch, .resume = mtd_env_resume, .suspend = mtd_env_suspend, .add_res = mtd_env_add_res, .remove_res = mtd_env_remove_res, .find_res_by_id = mtd_find_res_by_id, .is_np = mtd_env_is_np, .shutdown = mtd_env_shutdown }; long alloc_mtd_reservation_environment( struct mtd_reservation_environment** _env, int num_cpus) { struct mtd_reservation_environment* mtd_env; int i; mtd_env = kzalloc(sizeof(struct mtd_reservation_environment), GFP_KERNEL); if (!mtd_env) return -ENOMEM; memset(mtd_env, 0, sizeof(struct mtd_reservation_environment)); /* set environment callback actions */ mtd_env->env.ops = &mtd_env_ops; INIT_LIST_HEAD(&mtd_env->env.all_reservations); mtd_env->num_cpus = num_cpus; for (i = 0; i < num_cpus; i++) { mtd_env->cpu_entries[i].id = i; /* initialize per cpu domain */ rt_domain_init(&mtd_env->cpu_entries[i].domain, mtd_ready_order, NULL, mtd_env_release_jobs); } *_env = mtd_env; return 0; }