1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
|
#ifndef __LITMUS_SOFTIRQ_H
#define __LITMUS_SOFTIRQ_H
#include <linux/interrupt.h>
#include <linux/workqueue.h>
/*
Threaded tasklet handling for Litmus. Tasklets
are scheduled with the priority of the tasklet's
owner---that is, the RT task on behalf the tasklet
runs.
Tasklets are current scheduled in FIFO order with
NO priority inheritance for "blocked" tasklets.
klitirqd assumes the priority of the owner of the
tasklet when the tasklet is next to execute.
Currently, hi-tasklets are scheduled before
low-tasklets, regardless of priority of low-tasklets.
And likewise, low-tasklets are scheduled before work
queue objects. This priority inversion probably needs
to be fixed, though it is not an issue if our work with
GPUs as GPUs are owned (and associated klitirqds) for
exclusive time periods, thus no inversions can
occur.
*/
#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD
/* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons.
Actual launch of threads is deffered to kworker's
workqueue, so daemons will likely not be immediately
running when this function returns, though the required
data will be initialized.
@affinity_set: an array expressing the processor affinity
for each of the NR_LITMUS_SOFTIRQD daemons. May be set
to NULL for global scheduling.
- Examples -
8-CPU system with two CPU clusters:
affinity[] = {0, 0, 0, 0, 3, 3, 3, 3}
NOTE: Daemons not actually bound to specified CPU, but rather
cluster in which the CPU resides.
8-CPU system, partitioned:
affinity[] = {0, 1, 2, 3, 4, 5, 6, 7}
FIXME: change array to a CPU topology or array of cpumasks
*/
void spawn_klitirqd(int* affinity);
/* Raises a flag to tell klitirqds to terminate.
Termination is async, so some threads may be running
after function return. */
void kill_klitirqd(void);
/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready
to handle tasklets. 0, otherwise.*/
int klitirqd_is_ready(void);
/* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready
to handle tasklets. 0, otherwise.*/
int klitirqd_is_dead(void);
/* Flushes all pending work out to the OS for regular
* tasklet/work processing of the specified 'owner'
*
* PRECOND: klitirqd_thread must have a clear entry
* in the GPU registry, otherwise this call will become
* a no-op as work will loop back to the klitirqd_thread.
*
* Pass NULL for owner to flush ALL pending items.
*/
void flush_pending(struct task_struct* klitirqd_thread,
struct task_struct* owner);
struct task_struct* get_klitirqd(unsigned int k_id);
extern int __litmus_tasklet_schedule(
struct tasklet_struct *t,
unsigned int k_id);
/* schedule a tasklet on klitirqd #k_id */
static inline int litmus_tasklet_schedule(
struct tasklet_struct *t,
unsigned int k_id)
{
int ret = 0;
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
ret = __litmus_tasklet_schedule(t, k_id);
return(ret);
}
/* for use by __tasklet_schedule() */
static inline int _litmus_tasklet_schedule(
struct tasklet_struct *t,
unsigned int k_id)
{
return(__litmus_tasklet_schedule(t, k_id));
}
extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t,
unsigned int k_id);
/* schedule a hi tasklet on klitirqd #k_id */
static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t,
unsigned int k_id)
{
int ret = 0;
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
ret = __litmus_tasklet_hi_schedule(t, k_id);
return(ret);
}
/* for use by __tasklet_hi_schedule() */
static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t,
unsigned int k_id)
{
return(__litmus_tasklet_hi_schedule(t, k_id));
}
extern int __litmus_tasklet_hi_schedule_first(
struct tasklet_struct *t,
unsigned int k_id);
/* schedule a hi tasklet on klitirqd #k_id on next go-around */
/* PRECONDITION: Interrupts must be disabled. */
static inline int litmus_tasklet_hi_schedule_first(
struct tasklet_struct *t,
unsigned int k_id)
{
int ret = 0;
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
ret = __litmus_tasklet_hi_schedule_first(t, k_id);
return(ret);
}
/* for use by __tasklet_hi_schedule_first() */
static inline int _litmus_tasklet_hi_schedule_first(
struct tasklet_struct *t,
unsigned int k_id)
{
return(__litmus_tasklet_hi_schedule_first(t, k_id));
}
//////////////
extern int __litmus_schedule_work(
struct work_struct* w,
unsigned int k_id);
static inline int litmus_schedule_work(
struct work_struct* w,
unsigned int k_id)
{
return(__litmus_schedule_work(w, k_id));
}
///////////// mutex operations for client threads.
void down_and_set_stat(struct task_struct* t,
enum klitirqd_sem_status to_set,
struct mutex* sem);
void __down_and_reset_and_set_stat(struct task_struct* t,
enum klitirqd_sem_status to_reset,
enum klitirqd_sem_status to_set,
struct mutex* sem);
void up_and_set_stat(struct task_struct* t,
enum klitirqd_sem_status to_set,
struct mutex* sem);
void release_klitirqd_lock(struct task_struct* t);
int reacquire_klitirqd_lock(struct task_struct* t);
#endif
|