1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
|
/* sched_plugin.c -- core infrastructure for the scheduler plugin system
*
* This file includes the initialization of the plugin system, the no-op Linux
* scheduler plugin, some dummy functions, and some helper functions.
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <litmus/litmus.h>
#include <litmus/sched_plugin.h>
#include <litmus/preempt.h>
#include <litmus/jobs.h>
#ifdef CONFIG_LITMUS_NVIDIA
#include <litmus/nvidia_info.h>
#endif
/*
* Generic function to trigger preemption on either local or remote cpu
* from scheduler plugins. The key feature is that this function is
* non-preemptive section aware and does not invoke the scheduler / send
* IPIs if the to-be-preempted task is actually non-preemptive.
*/
void preempt_if_preemptable(struct task_struct* t, int cpu)
{
/* t is the real-time task executing on CPU on_cpu If t is NULL, then
* on_cpu is currently scheduling background work.
*/
int reschedule = 0;
if (!t)
/* move non-real-time task out of the way */
reschedule = 1;
else {
if (smp_processor_id() == cpu) {
/* local CPU case */
/* check if we need to poke userspace */
if (is_user_np(t))
/* Yes, poke it. This doesn't have to be atomic since
* the task is definitely not executing. */
request_exit_np(t);
else if (!is_kernel_np(t))
/* only if we are allowed to preempt the
* currently-executing task */
reschedule = 1;
} else {
/* Remote CPU case. Only notify if it's not a kernel
* NP section and if we didn't set the userspace
* flag. */
reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t));
}
}
if (likely(reschedule))
litmus_reschedule(cpu);
}
/*************************************************************
* Dummy plugin functions *
*************************************************************/
static void litmus_dummy_finish_switch(struct task_struct * prev)
{
}
static struct task_struct* litmus_dummy_schedule(struct task_struct * prev)
{
sched_state_task_picked();
return NULL;
}
static void litmus_dummy_tick(struct task_struct* tsk)
{
}
static long litmus_dummy_admit_task(struct task_struct* tsk)
{
printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n",
tsk->comm, tsk->pid);
return -EINVAL;
}
static void litmus_dummy_task_new(struct task_struct *t, int on_rq, int running)
{
}
static void litmus_dummy_task_wake_up(struct task_struct *task)
{
}
static void litmus_dummy_task_block(struct task_struct *task)
{
}
static void litmus_dummy_task_exit(struct task_struct *task)
{
}
static long litmus_dummy_complete_job(void)
{
return -ENOSYS;
}
static long litmus_dummy_activate_plugin(void)
{
#ifdef CONFIG_LITMUS_NVIDIA
shutdown_nvidia_info();
#endif
return 0;
}
static long litmus_dummy_deactivate_plugin(void)
{
return 0;
}
static int litmus_dummy_compare(struct task_struct* a, struct task_struct* b)
{
TRACE_CUR("WARNING: Dummy compare function called!\n");
return 0;
}
#ifdef CONFIG_LITMUS_LOCKING
static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type,
void* __user config)
{
return -ENXIO;
}
static void litmus_dummy_increase_prio(struct task_struct* t, struct task_struct* prio_inh)
{
}
static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh)
{
}
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
static void litmus_dummy_increase_prio_klitirq(struct task_struct* klitirqd,
struct task_struct* old_owner,
struct task_struct* new_owner)
{
}
static void litmus_dummy_decrease_prio_klitirqd(struct task_struct* klitirqd,
struct task_struct* old_owner)
{
}
#endif
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t)
{
TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
return(0); // failure.
}
static void litmus_dummy_change_prio_pai_tasklet(struct task_struct *old_prio,
struct task_struct *new_prio)
{
TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
}
static void litmus_dummy_run_tasklets(struct task_struct* t)
{
//TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
}
#endif
#ifdef CONFIG_LITMUS_NESTED_LOCKING
static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh,
raw_spinlock_t *to_unlock, unsigned long irqflags)
{
}
static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task_struct* prio_inh,
raw_spinlock_t *to_unlock, unsigned long irqflags)
{
}
static int litmus_dummy___compare(struct task_struct* a, comparison_mode_t a_mod,
struct task_struct* b, comparison_mode_t b_mode)
{
TRACE_CUR("WARNING: Dummy compare function called!\n");
return 0;
}
#endif
#ifdef CONFIG_LITMUS_DGL_SUPPORT
static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t)
{
return NULL;
}
#endif
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
static long litmus_dummy_allocate_aff_obs(struct affinity_observer **aff_obs,
int type,
void* __user config)
{
return -ENXIO;
}
#endif
/* The default scheduler plugin. It doesn't do anything and lets Linux do its
* job.
*/
struct sched_plugin linux_sched_plugin = {
.plugin_name = "Linux",
.tick = litmus_dummy_tick,
.task_new = litmus_dummy_task_new,
.task_exit = litmus_dummy_task_exit,
.task_wake_up = litmus_dummy_task_wake_up,
.task_block = litmus_dummy_task_block,
.complete_job = litmus_dummy_complete_job,
.schedule = litmus_dummy_schedule,
.finish_switch = litmus_dummy_finish_switch,
.activate_plugin = litmus_dummy_activate_plugin,
.deactivate_plugin = litmus_dummy_deactivate_plugin,
.compare = litmus_dummy_compare,
#ifdef CONFIG_LITMUS_LOCKING
.allocate_lock = litmus_dummy_allocate_lock,
.increase_prio = litmus_dummy_increase_prio,
.decrease_prio = litmus_dummy_decrease_prio,
#endif
#ifdef CONFIG_LITMUS_NESTED_LOCKING
.nested_increase_prio = litmus_dummy_nested_increase_prio,
.nested_decrease_prio = litmus_dummy_nested_decrease_prio,
.__compare = litmus_dummy___compare,
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
.increase_prio_klitirqd = litmus_dummy_increase_prio_klitirqd,
.decrease_prio_klitirqd = litmus_dummy_decrease_prio_klitirqd,
#endif
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
.enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet,
.change_prio_pai_tasklet = litmus_dummy_change_prio_pai_tasklet,
.run_tasklets = litmus_dummy_run_tasklets,
#endif
#ifdef CONFIG_LITMUS_DGL_SUPPORT
.get_dgl_spinlock = litmus_dummy_get_dgl_spinlock,
#endif
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
.allocate_aff_obs = litmus_dummy_allocate_aff_obs,
#endif
.admit_task = litmus_dummy_admit_task
};
/*
* The reference to current plugin that is used to schedule tasks within
* the system. It stores references to actual function implementations
* Should be initialized by calling "init_***_plugin()"
*/
struct sched_plugin *litmus = &linux_sched_plugin;
/* the list of registered scheduling plugins */
static LIST_HEAD(sched_plugins);
static DEFINE_RAW_SPINLOCK(sched_plugins_lock);
#define CHECK(func) {\
if (!plugin->func) \
plugin->func = litmus_dummy_ ## func;}
/* FIXME: get reference to module */
int register_sched_plugin(struct sched_plugin* plugin)
{
printk(KERN_INFO "Registering LITMUS^RT plugin %s.\n",
plugin->plugin_name);
/* make sure we don't trip over null pointers later */
CHECK(finish_switch);
CHECK(schedule);
CHECK(tick);
CHECK(task_wake_up);
CHECK(task_exit);
CHECK(task_block);
CHECK(task_new);
CHECK(complete_job);
CHECK(activate_plugin);
CHECK(deactivate_plugin);
CHECK(compare);
#ifdef CONFIG_LITMUS_LOCKING
CHECK(allocate_lock);
CHECK(increase_prio);
CHECK(decrease_prio);
#endif
#ifdef CONFIG_LITMUS_NESTED_LOCKING
CHECK(nested_increase_prio);
CHECK(nested_decrease_prio);
CHECK(__compare);
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
CHECK(increase_prio_klitirqd);
CHECK(decrease_prio_klitirqd);
#endif
#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
CHECK(enqueue_pai_tasklet);
CHECK(change_prio_pai_tasklet);
CHECK(run_tasklets);
#endif
#ifdef CONFIG_LITMUS_DGL_SUPPORT
CHECK(get_dgl_spinlock);
#endif
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
CHECK(allocate_aff_obs);
#endif
CHECK(admit_task);
if (!plugin->release_at)
plugin->release_at = release_at;
raw_spin_lock(&sched_plugins_lock);
list_add(&plugin->list, &sched_plugins);
raw_spin_unlock(&sched_plugins_lock);
return 0;
}
/* FIXME: reference counting, etc. */
struct sched_plugin* find_sched_plugin(const char* name)
{
struct list_head *pos;
struct sched_plugin *plugin;
raw_spin_lock(&sched_plugins_lock);
list_for_each(pos, &sched_plugins) {
plugin = list_entry(pos, struct sched_plugin, list);
if (!strcmp(plugin->plugin_name, name))
goto out_unlock;
}
plugin = NULL;
out_unlock:
raw_spin_unlock(&sched_plugins_lock);
return plugin;
}
int print_sched_plugins(char* buf, int max)
{
int count = 0;
struct list_head *pos;
struct sched_plugin *plugin;
raw_spin_lock(&sched_plugins_lock);
list_for_each(pos, &sched_plugins) {
plugin = list_entry(pos, struct sched_plugin, list);
count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name);
if (max - count <= 0)
break;
}
raw_spin_unlock(&sched_plugins_lock);
return count;
}
|