#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/preempt.h>
#include <litmus/sched_plugin.h>
#include <litmus/edf_common.h>
#include <litmus/sched_trace.h>
#include <litmus/trace.h>
/* This is the per-cpu state of the plugin */
struct dumb_cpu {
struct task_struct* dumb_task;
int scheduled;
int blocked;
raw_spinlock_t lock;
};
/* Define AND initialize one dumb_cpu per CPU.
* Use of this macro is good for safety and performance reasons.
* Beware: this macro creates global variables. If two plugins pick
* the same name here, this will not compile.
*/
DEFINE_PER_CPU(struct dumb_cpu, pdumb_cpus);
/**
* Called when litmus's active plugin is set to PDUMB.
*/
static long pdumb_activate_plugin(void)
{
int i;
struct dumb_cpu* cpu;
/* Reset per-cpu state. Always assume you have to wipe the
* slate clean in this method. Someone evil may have been
* tampering with your data structures.
*/
for (i = 0; i < num_online_cpus(); i++) {
cpu = &per_cpu(pdumb_cpus, i);
cpu->dumb_task = NULL;
cpu->scheduled = 0;
cpu->blocked = 0;
}
printk(KERN_ERR "Activated a dumb plugin!\n");
return 0;
}
/**
* Returns 0 if the plugin will admit the task.
* In this case, we only allow one task per CPU.
*/
static long pdumb_admit_task(struct task_struct *t)
{
long ret;
unsigned long flags;
int cpu_num = get_partition(t);
/* Per CPU variables have to be accessed wierd.
* The macro per_cpu accesses based on an integer index.
*/
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, cpu_num);
/* We are accessing state atomically, we need a lock and to
* disable irqs.
*/
raw_spin_lock_irqsave(&cpu->lock, flags);
if (cpu->dumb_task) {
/* Reject the task, causing a failure in userspace */
printk(KERN_ERR "Already have a dumb task on %d!\n", cpu_num);
ret = -EINVAL;
} else {
/* Assign our dumb task */
printk(KERN_ERR "Taking your dumb task on %d!\n", cpu_num);
cpu->dumb_task = t;
ret = 0;
}
raw_spin_unlock_irqrestore(&cpu->lock, flags);
return ret;
}
/**
* Called when an ADMITTED task joins the real-time world.
*/
static void pdumb_task_new(struct task_struct *t, int on_rq, int running)
{
/* Needed to disable interrupts */
unsigned long flags;
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
raw_spin_lock_irqsave(&cpu->lock, flags);
/* We only admit one task per cpu, this better be it */
BUG_ON(cpu->dumb_task != t);
/* The task could already be running in Linux. For example, it
* could have been running, then switched into real-time
* mode. This is how all real-time tasks begin execution.
* Lets just say its running here too.
*/
if (running) {
cpu->scheduled = 1;
}
/* Re-enable irqs and unlock */
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/**
* Called when a task leaves the real-time world.
*/
static void pdumb_task_exit(struct task_struct *t)
{
unsigned long flags;
struct dumb_cpu *cpu = &__get_cpu_var(pdumb_cpus);
raw_spin_lock_irqsave(&cpu->lock, flags);
cpu->dumb_task = NULL;
cpu->scheduled = 0;
cpu->blocked = 0;
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/**
* Called when Litmus needs to figure out what to run.
* This plugin just swaps between nothing and the dumb task.
* If there is no dumb task, returns nothing.
*/
static struct task_struct* pdumb_schedule(struct task_struct *prev)
{
/* The macro __get_cpu_var() returns the local cpu variable */
struct dumb_cpu *cpu = &__get_cpu_var(pdumb_cpus);
struct task_struct *sched;
/* Accessing state, need lock. We don't need to disable irqs
* because they are already disabled when this method is
* called.
*/
raw_spin_lock(&cpu->lock);
/* Switch between dumb_task and nothing arbitrarily */
if (cpu->scheduled || cpu->blocked || !cpu->dumb_task) {
cpu->scheduled = 0;
sched = NULL;
TRACE("Nothing scheduled!\n");
} else {
cpu->scheduled = 1;
sched = cpu->dumb_task;
TRACE_TASK(cpu->dumb_task, "Scheduled!\n");
}
/* This must be done when the cpu state has been atomically
* chosen. This allows litmus to manage some race conditions
* with tasks blocking / preempting / releasing without any
* work on the plugin's part.
*/
sched_state_task_picked();
raw_spin_unlock(&cpu->lock);
return sched;
}
/**
* Called when a task sleeps.
* This method MUST remove the task from all plugin data structures,
* or insanity ensues. There can be no chance that this task is ever
* returned in the _schedule method until it wakes up.
*/
static void pdumb_task_block(struct task_struct *t)
{
unsigned long flags;
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
TRACE_TASK(t, "Blocked!\n");
raw_spin_lock_irqsave(&cpu->lock, flags);
cpu->blocked = 1;
cpu->scheduled = 0;
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/**
* Called when a sleeping task resumes.
*/
static void pdumb_task_wake_up(struct task_struct *t)
{
unsigned long flags;
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
TRACE_TASK(t, "Awoken!\n");
raw_spin_lock_irqsave(&cpu->lock, flags);
cpu->blocked = 0;
cpu->scheduled = 0;
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/* The actual plugin structure */
static struct sched_plugin pdumb_plugin __cacheline_aligned_in_smp = {
.plugin_name = "PDUMB",
/* Initialization (when someone switches to this plugin) */
.activate_plugin = pdumb_activate_plugin,
/* Scheduling methods */
.schedule = pdumb_schedule,
.task_wake_up = pdumb_task_wake_up,
.task_block = pdumb_task_block,
/* Task management methods */
.admit_task = pdumb_admit_task,
.task_new = pdumb_task_new,
.task_exit = pdumb_task_exit,
};
/**
* Called when the system boots up.
*/
static int __init init_pdumb(void)
{
int i;
struct dumb_cpu* cpu;
/* Initialize CPU state. */
for (i = 0; i < num_online_cpus(); i++) {
cpu = &per_cpu(pdumb_cpus, i);
raw_spin_lock_init(&cpu->lock);
}
return register_sched_plugin(&pdumb_plugin);
}
module_init(init_pdumb);