1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
|
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/preempt.h>
#include <litmus/sched_plugin.h>
#include <litmus/edf_common.h>
#include <litmus/sched_trace.h>
#include <litmus/trace.h>
/* This is the per-cpu state of the plugin */
struct dumb_cpu {
struct task_struct* dumb_task;
int scheduled;
int blocked;
raw_spinlock_t lock;
};
/* Define AND initialize one dumb_cpu per CPU.
* Use of this macro is good for safety and performance reasons.
*/
DEFINE_PER_CPU(struct dumb_cpu, pdumb_cpus);
/**
* Called when litmus's active plugin is set to PDUMB.
*/
static long pdumb_activate_plugin(void)
{
int i;
struct dumb_cpu* cpu;
/* Reset per-cpu state. Always assume you have to wipe the
* slate clean in this method. Someone evil may have been
* tampering with your data structures.
*/
for (i = 0; i < num_online_cpus(); i++) {
cpu = &per_cpu(pdumb_cpus, i);
cpu->dumb_task = NULL;
cpu->scheduled = 0;
cpu->blocked = 0;
}
printk(KERN_ERR "Activated a dumb plugin!\n");
return 0;
}
/**
* Returns 0 if the plugin will admit the task.
* In this case, we only allow one task per CPU.
*/
static long pdumb_admit_task(struct task_struct *t)
{
long ret;
int cpu_num = get_partition(t);
/* Per CPU variables have to be accessed wierd.
* The macro per_cpu accesses based on an integer index.
*/
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, cpu_num);
raw_spin_lock(&cpu->lock);
if (cpu->dumb_task) {
printk(KERN_ERR "Already have a dumb task on %d!\n",
cpu_num);
ret = -EINVAL;
} else {
printk(KERN_ERR "Taking your dumb task on %d!\n",
cpu_num);
/* Assign our dumb task! */
cpu->dumb_task = t;
ret = 0;
}
raw_spin_unlock(&cpu->lock);
return ret;
}
/**
* Called when an ADMITTED task joins the real-time world.
*/
static void pdumb_task_new(struct task_struct *t, int on_rq, int running)
{
/* Needed to disable interrupts */
unsigned long flags;
/* The macro __get_cpu_var() returns the local cpu variable */
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
/* We are accessing state atomically, we need a lock and to
* disable irqs.
*/
raw_spin_lock_irqsave(&cpu->lock, flags);
/* The task could already be running in Linux.
* Lets just say its running here too.
*/
if (running) {
cpu->scheduled = 1;
}
/* Re-enable irqs and unlock */
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/**
* Called when a task leaves the real-time world.
*/
static void pdumb_task_exit(struct task_struct *t)
{
unsigned long flags;
struct dumb_cpu *cpu = &__get_cpu_var(pdumb_cpus);
raw_spin_lock_irqsave(&cpu->lock, flags);
cpu->dumb_task = NULL;
cpu->scheduled = 0;
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/**
* Called when Litmus needs to figure out what to run.
* This plugin just swaps between nothing and the dumb task.
* If there is no dumb task, returns nothing.
*/
static struct task_struct* pdumb_schedule(struct task_struct *prev)
{
struct dumb_cpu *cpu = &__get_cpu_var(pdumb_cpus);
struct task_struct *sched;
/* Accessing state, need lock. We don't need to disable irqs
* because they are already disabled when this method is
* called.
*/
raw_spin_lock(&cpu->lock);
if (cpu->scheduled || cpu->blocked || !cpu->dumb_task) {
cpu->scheduled = 0;
sched = NULL;
TRACE("Nothing scheduled!\n");
} else {
cpu->scheduled = 1;
sched = cpu->dumb_task;
TRACE_TASK(cpu->dumb_task, "Scheduled!\n");
}
sched_state_task_picked();
raw_spin_unlock(&cpu->lock);
return sched;
}
/**
* Called when a task sleeps.
* This method MUST remove the task from all plugin data structures,
* or insanity ensues. There can be no chance that this task is ever
* returned in the _schedule method until it wakes up.
*/
static void pdumb_task_block(struct task_struct *t)
{
unsigned long flags;
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
TRACE_TASK(t, "Blocked!\n");
raw_spin_lock_irqsave(&cpu->lock, flags);
cpu->blocked = 1;
cpu->scheduled = 0;
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/**
* Called when a sleeping task resumes.
*/
static void pdumb_task_wake_up(struct task_struct *t)
{
unsigned long flags;
struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
TRACE_TASK(t, "Awoken!\n");
raw_spin_lock_irqsave(&cpu->lock, flags);
cpu->blocked = 0;
cpu->scheduled = 0;
raw_spin_unlock_irqrestore(&cpu->lock, flags);
}
/* The actual plugin structure */
static struct sched_plugin pdumb_plugin __cacheline_aligned_in_smp = {
.plugin_name = "PDUMB",
/* Initialization (when someone switches to this plugin) */
.activate_plugin = pdumb_activate_plugin,
/* Scheduling methods */
.schedule = pdumb_schedule,
.task_wake_up = pdumb_task_wake_up,
.task_block = pdumb_task_block,
/* Task management methods */
.admit_task = pdumb_admit_task,
.task_new = pdumb_task_new,
.task_exit = pdumb_task_exit,
};
/**
* Called when the system boots up.
*/
static int __init init_pdumb(void)
{
int i;
struct dumb_cpu* cpu;
/* Initialize CPU state. */
for (i = 0; i < num_online_cpus(); i++) {
cpu = &per_cpu(pdumb_cpus, i);
raw_spin_lock_init(&cpu->lock);
}
return register_sched_plugin(&pdumb_plugin);
}
module_init(init_pdumb);
|