#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <litmus/litmus.h>
#include <litmus/trace.h>
#include <litmus/sched_trace.h>
#include <litmus/event_group.h>
#if 1
#define VTRACE(fmt, args...) \
sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \
TRACE_ARGS, ## args)
#else
#define VTRACE(fmt, args...)
#endif
/*
* Return event_queue slot for the given time.
*/
static unsigned int time2slot(lt_t time)
{
return (unsigned int) time2quanta(time, FLOOR) % EVENT_QUEUE_SLOTS;
}
/*
* Executes events from an event_list in priority order.
* Events can requeue themselves when they are called.
*/
static enum hrtimer_restart on_timer(struct hrtimer *timer)
{
unsigned long flags;
unsigned long num = 0;
struct event_list *el;
struct rt_event *e;
struct list_head *pos, *safe, list;
el = container_of(timer, struct event_list, timer);
raw_spin_lock_irqsave(&el->group->queue_lock, flags);
VTRACE("Removing event list 0x%p\n", el);
list_del_init(&el->list);
raw_spin_unlock_irqrestore(&el->group->queue_lock, flags);
/* Empty event list so this event can be requeued */
VTRACE("Emptying event list 0x%p\n", el);
list_replace_init(&el->events, &list);
/* Fire events */
list_for_each_safe(pos, safe, &list) {
num++;
e = list_entry(pos, struct rt_event, list);
VTRACE("Dequeueing event 0x%p with prio %d from 0x%p\n",
e, e->prio, el);
list_del_init(pos);
e->function(e);
}
VTRACE("Exhausted %d events from list 0x%p\n", num, el);
sched_trace_action(NULL, num);
return HRTIMER_NORESTART;
}
/*
* Insert event in event-list, respecting priority order.
*/
void insert_event(struct event_list *el, struct rt_event *e)
{
struct list_head *pos, *last = NULL;
struct rt_event *queued;
list_for_each(pos, &el->events) {
queued = list_entry(pos, struct rt_event, list);
last = pos;
if (e->prio < queued->prio) {
VTRACE("Inserting priority %d 0x%p before %d 0x%p "
"in 0x%p, pos 0x%p\n", e->prio, &e->list,
queued->prio, &queued->list, el, pos);
BUG_ON(!list_empty(&e->list));
list_add_tail(&e->list, pos);
return;
}
}
VTRACE("Inserting priority %d 0x%p at end of 0x%p, last 0x%p\n",
e->prio, &el->list, el, last);
BUG_ON(!list_empty(&e->list));
list_add(&e->list, (last) ? last : pos);
}
/*
* Return event_list for the given event and time. If no event_list
* is being used yet and use_event_heap is 1, will create the list
* and return it. Otherwise it will return NULL.
*/
static struct event_list* get_event_list(struct event_group *group,
struct rt_event *e,
lt_t fire,
int use_event_list)
{
struct list_head* pos;
struct event_list *el = NULL, *tmp;
unsigned int slot = time2slot(fire);
int remaining = 300;
VTRACE("Getting list for %llu, event 0x%p\n", fire, e);
/* Initialize pos for the case that the list is empty */
pos = group->event_queue[slot].next;
list_for_each(pos, &group->event_queue[slot]) {
BUG_ON(remaining-- < 0);
tmp = list_entry(pos, struct event_list, list);
if (lt_after_eq(fire, tmp->fire_time) &&
lt_before(fire, tmp->fire_time + group->res)) {
VTRACE("Found match at time %llu\n", tmp->fire_time);
el = tmp;
break;
} else if (lt_before(fire, tmp->fire_time)) {
/* We need to insert a new node since el is
* already in the future
*/
VTRACE("Time %llu was before %llu\n",
fire, tmp->fire_time);
break;
} else {
VTRACE("Time %llu was after %llu\n",
fire, tmp->fire_time + group->res);
}
}
if (!el && use_event_list) {
/* Use pre-allocated list */
tmp = e->event_list;
tmp->fire_time = fire;
tmp->group = group;
/* Add to queue */
VTRACE("Using list for priority %d and time %llu\n",
e->prio, fire);
BUG_ON(!list_empty(&tmp->list));
list_add(&tmp->list, pos->prev);
el = tmp;
}
return el;
}
/*
* Prepare a release list for a new set of events.
*/
static void reinit_event_list(struct rt_event *e)
{
struct event_list *el = e->event_list;
VTRACE("Reinitting 0x%p for event 0x%p\n", el, e);
BUG_ON(hrtimer_try_to_cancel(&el->timer) == 1);
INIT_LIST_HEAD(&el->events);
atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE);
}
/**
* add_event() - Add timer to event group.
*/
void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
{
struct event_list *el;
VTRACE("Adding event 0x%p with priority %d for time %llu\n",
e, e->prio, fire);
/* A NULL group means use the group of the currently executing CPU */
if (NULL == group)
group = get_event_group_for(NO_CPU);
/* Saving the group is important for cancellations */
e->_event_group = group;
raw_spin_lock(&group->queue_lock);
el = get_event_list(group, e, fire, 0);
if (!el) {
/* Use our own, but drop lock first */
raw_spin_unlock(&group->queue_lock);
reinit_event_list(e);
raw_spin_lock(&group->queue_lock);
el = get_event_list(group, e, fire, 1);
}
/* Add event to sorted list */
insert_event(el, e);
raw_spin_unlock(&group->queue_lock);
/* Arm timer if we are the owner */
if (el == e->event_list) {
VTRACE("Arming timer on event 0x%p for %llu\n", e, fire);
if (group->cpu == smp_processor_id()) {
__hrtimer_start_range_ns(&el->timer,
ns_to_ktime(el->fire_time),
0, HRTIMER_MODE_ABS_PINNED, 0);
} else {
hrtimer_start_on(group->cpu, &el->info,
&el->timer, ns_to_ktime(el->fire_time),
HRTIMER_MODE_ABS_PINNED);
}
} else {
VTRACE("Not my timer @%llu\n", fire);
}
}
/**
* cancel_event() - Remove event from the group.
*/
void cancel_event(struct rt_event *e)
{
struct list_head *swap = NULL;
struct rt_event *swappy;
struct event_list *tmp;
struct event_group *group = e->_event_group;
if (e->list.next != &e->list) {
raw_spin_lock(&group->queue_lock);
VTRACE("Canceling event 0x%p with priority %d\n", e, e->prio);
/* If somebody else is hooked up to our event list, swap
* with their event list and leave our old event list
* to execute.
*/
if (!list_empty(&e->list)) {
swap = (e->list.next == &e->event_list->events) ?
(e->list.prev == &e->event_list->events) ?
NULL : e->list.prev : e->list.next;
list_del_init(&e->list);
}
if (swap) {
swappy = list_entry(swap, struct rt_event, list);
tmp = swappy->event_list;
swappy->event_list = e->event_list;
e->event_list = tmp;
VTRACE("Swapping with event 0x%p", swappy);
}
hrtimer_try_to_cancel(&e->event_list->timer);
list_del_init(&e->event_list->list);
e->_event_group = NULL;
raw_spin_unlock(&group->queue_lock);
}
}
struct kmem_cache *event_list_cache;
struct event_list* event_list_alloc(int gfp_flags)
{
struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags);
if (el) {
hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
INIT_LIST_HEAD(&el->list);
el->timer.function = on_timer;
} else {
VTRACE("Failed to allocate event list!");
printk(KERN_CRIT "Failed to allocate event list.\n");
BUG();
}
return el;
}
void init_event(struct rt_event *e, int prio, fire_event_t function,
struct event_list *el)
{
e->prio = prio;
e->function = function;
e->event_list = el;
INIT_LIST_HEAD(&e->list);
}
/**
* init_event_group() - Prepare group for events.
* @group Group to prepare
* @res Timer resolution. Two events of @res distance will be merged
* @cpu Cpu on which to fire timers
*/
static void init_event_group(struct event_group *group, lt_t res, int cpu)
{
int i;
VTRACE("Creating group with resolution %llu on CPU %d", res, cpu);
group->res = res;
group->cpu = cpu;
for (i = 0; i < EVENT_QUEUE_SLOTS; i++)
INIT_LIST_HEAD(&group->event_queue[i]);
raw_spin_lock_init(&group->queue_lock);
}
DEFINE_PER_CPU(struct event_group, _event_groups);
struct event_group *get_event_group_for(const int cpu)
{
return &per_cpu(_event_groups,
(NO_CPU == cpu) ? smp_processor_id() : cpu);
}
static int __init _init_event_groups(void)
{
int cpu;
printk("Initializing LITMUS^RT event groups.\n");
for_each_online_cpu(cpu) {
init_event_group(get_event_group_for(cpu),
CONFIG_MERGE_TIMERS_WINDOW, cpu);
}
return 0;
}
module_init(_init_event_groups);