1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
|
#include <linux/interrupt.h>
/*
Threaded tasklet handling for Litmus. Tasklets
are scheduled with the priority of the tasklet's
owner-- that is, the RT task on behalf the tasklet
runs.
Tasklets are current scheduled in FIFO order with
NO priority inheritance for "blocked" tasklets.
klitirqd assumes the priority of the owner of the
tasklet when the tasklet is next to execute.
Currently, hi-tasklets are scheduled before
low-tasklets, regardless of priority of low-tasklets.
This priority inversion probably needs to be fixed,
though it is not an issue if our work with GPUs as
GPUs are owned (and associated klitirqds) for
exclusive time periods, thus no inversions can
occur.
FIXME: Let low-tasklets with higher Litmus priority
be scheduled before hi-tasklets of lower Litmus
priority.
TODO: Decide if tasklets should really be scheduled
FIFO. If not, we should probably ensure tasklets with
the same owner still execute in FIFO order lest we
confuse drivers with out-of-order execution (though
they probably should still be able to handle it by
tasklet processing design).
*/
#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD
//void trigger_litirqs(struct task_struct*);
/* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons.
Actual launch of threads is deffered to kworker's
workqueue, so daemons will likely not be immediately
running when this function returns, though the required
data will be initialized. */
void spawn_klitirqd(void);
/* Raises a flag to tell klitirqds to terminate.
Termination is async, so some threads may be running
after function return. */
void kill_klitirqd(void);
/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready
to handle tasklets. 0, otherwise.*/
int klitirqd_is_ready(void);
void __litmus_tasklet_schedule(
struct tasklet_struct *t,
unsigned int k_id);
/* schedule a tasklet on klitirqd #k_id */
static inline void litmus_tasklet_schedule(
struct tasklet_struct *t,
unsigned int k_id)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__litmus_tasklet_schedule(t, k_id);
}
extern void __litmus_tasklet_hi_schedule(struct tasklet_struct *t,
unsigned int k_id);
/* schedule a hi tasklet on klitirqd #k_id */
static inline void litmus_tasklet_hi_schedule(struct tasklet_struct *t,
unsigned int k_id)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__litmus_tasklet_hi_schedule(t, k_id);
}
extern void __litmus_tasklet_hi_schedule_first(
struct tasklet_struct *t,
unsigned int k_id);
/* schedule a hi tasklet on klitirqd #k_id on next go-around */
/* PRECONDITION: Interrupts must be disabled. */
static inline void litmus_tasklet_hi_schedule_first(
struct tasklet_struct *t,
unsigned int k_id)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__litmus_tasklet_hi_schedule_first(t, k_id);
}
|