diff options
Diffstat (limited to 'kernel/rtmutex-tester.c')
-rw-r--r-- | kernel/rtmutex-tester.c | 436 |
1 files changed, 436 insertions, 0 deletions
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c new file mode 100644 index 000000000000..fe211ba3a5b5 --- /dev/null +++ b/kernel/rtmutex-tester.c | |||
@@ -0,0 +1,436 @@ | |||
1 | /* | ||
2 | * RT-Mutex-tester: scriptable tester for rt mutexes | ||
3 | * | ||
4 | * started by Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/config.h> | ||
10 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/smp_lock.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/sysdev.h> | ||
16 | #include <linux/timer.h> | ||
17 | |||
18 | #include "rtmutex.h" | ||
19 | |||
20 | #define MAX_RT_TEST_THREADS 8 | ||
21 | #define MAX_RT_TEST_MUTEXES 8 | ||
22 | |||
23 | static spinlock_t rttest_lock; | ||
24 | static atomic_t rttest_event; | ||
25 | |||
26 | struct test_thread_data { | ||
27 | int opcode; | ||
28 | int opdata; | ||
29 | int mutexes[MAX_RT_TEST_MUTEXES]; | ||
30 | int bkl; | ||
31 | int event; | ||
32 | struct sys_device sysdev; | ||
33 | }; | ||
34 | |||
35 | static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; | ||
36 | static task_t *threads[MAX_RT_TEST_THREADS]; | ||
37 | static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; | ||
38 | |||
39 | enum test_opcodes { | ||
40 | RTTEST_NOP = 0, | ||
41 | RTTEST_SCHEDOT, /* 1 Sched other, data = nice */ | ||
42 | RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */ | ||
43 | RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */ | ||
44 | RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */ | ||
45 | RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */ | ||
46 | RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */ | ||
47 | RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */ | ||
48 | RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */ | ||
49 | RTTEST_LOCKBKL, /* 9 Lock BKL */ | ||
50 | RTTEST_UNLOCKBKL, /* 10 Unlock BKL */ | ||
51 | RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */ | ||
52 | RTTEST_RESETEVENT = 98, /* 98 Reset event counter */ | ||
53 | RTTEST_RESET = 99, /* 99 Reset all pending operations */ | ||
54 | }; | ||
55 | |||
56 | static int handle_op(struct test_thread_data *td, int lockwakeup) | ||
57 | { | ||
58 | struct sched_param schedpar; | ||
59 | int i, id, ret = -EINVAL; | ||
60 | |||
61 | switch(td->opcode) { | ||
62 | |||
63 | case RTTEST_NOP: | ||
64 | return 0; | ||
65 | |||
66 | case RTTEST_SCHEDOT: | ||
67 | schedpar.sched_priority = 0; | ||
68 | ret = sched_setscheduler(current, SCHED_NORMAL, &schedpar); | ||
69 | if (!ret) | ||
70 | set_user_nice(current, 0); | ||
71 | return ret; | ||
72 | |||
73 | case RTTEST_SCHEDRT: | ||
74 | schedpar.sched_priority = td->opdata; | ||
75 | return sched_setscheduler(current, SCHED_FIFO, &schedpar); | ||
76 | |||
77 | case RTTEST_LOCKCONT: | ||
78 | td->mutexes[td->opdata] = 1; | ||
79 | td->event = atomic_add_return(1, &rttest_event); | ||
80 | return 0; | ||
81 | |||
82 | case RTTEST_RESET: | ||
83 | for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) { | ||
84 | if (td->mutexes[i] == 4) { | ||
85 | rt_mutex_unlock(&mutexes[i]); | ||
86 | td->mutexes[i] = 0; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | if (!lockwakeup && td->bkl == 4) { | ||
91 | unlock_kernel(); | ||
92 | td->bkl = 0; | ||
93 | } | ||
94 | return 0; | ||
95 | |||
96 | case RTTEST_RESETEVENT: | ||
97 | atomic_set(&rttest_event, 0); | ||
98 | return 0; | ||
99 | |||
100 | default: | ||
101 | if (lockwakeup) | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | switch(td->opcode) { | ||
106 | |||
107 | case RTTEST_LOCK: | ||
108 | case RTTEST_LOCKNOWAIT: | ||
109 | id = td->opdata; | ||
110 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES) | ||
111 | return ret; | ||
112 | |||
113 | td->mutexes[id] = 1; | ||
114 | td->event = atomic_add_return(1, &rttest_event); | ||
115 | rt_mutex_lock(&mutexes[id]); | ||
116 | td->event = atomic_add_return(1, &rttest_event); | ||
117 | td->mutexes[id] = 4; | ||
118 | return 0; | ||
119 | |||
120 | case RTTEST_LOCKINT: | ||
121 | case RTTEST_LOCKINTNOWAIT: | ||
122 | id = td->opdata; | ||
123 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES) | ||
124 | return ret; | ||
125 | |||
126 | td->mutexes[id] = 1; | ||
127 | td->event = atomic_add_return(1, &rttest_event); | ||
128 | ret = rt_mutex_lock_interruptible(&mutexes[id], 0); | ||
129 | td->event = atomic_add_return(1, &rttest_event); | ||
130 | td->mutexes[id] = ret ? 0 : 4; | ||
131 | return ret ? -EINTR : 0; | ||
132 | |||
133 | case RTTEST_UNLOCK: | ||
134 | id = td->opdata; | ||
135 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) | ||
136 | return ret; | ||
137 | |||
138 | td->event = atomic_add_return(1, &rttest_event); | ||
139 | rt_mutex_unlock(&mutexes[id]); | ||
140 | td->event = atomic_add_return(1, &rttest_event); | ||
141 | td->mutexes[id] = 0; | ||
142 | return 0; | ||
143 | |||
144 | case RTTEST_LOCKBKL: | ||
145 | if (td->bkl) | ||
146 | return 0; | ||
147 | td->bkl = 1; | ||
148 | lock_kernel(); | ||
149 | td->bkl = 4; | ||
150 | return 0; | ||
151 | |||
152 | case RTTEST_UNLOCKBKL: | ||
153 | if (td->bkl != 4) | ||
154 | break; | ||
155 | unlock_kernel(); | ||
156 | td->bkl = 0; | ||
157 | return 0; | ||
158 | |||
159 | default: | ||
160 | break; | ||
161 | } | ||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Schedule replacement for rtsem_down(). Only called for threads with | ||
167 | * PF_MUTEX_TESTER set. | ||
168 | * | ||
169 | * This allows us to have finegrained control over the event flow. | ||
170 | * | ||
171 | */ | ||
172 | void schedule_rt_mutex_test(struct rt_mutex *mutex) | ||
173 | { | ||
174 | int tid, op, dat; | ||
175 | struct test_thread_data *td; | ||
176 | |||
177 | /* We have to lookup the task */ | ||
178 | for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) { | ||
179 | if (threads[tid] == current) | ||
180 | break; | ||
181 | } | ||
182 | |||
183 | BUG_ON(tid == MAX_RT_TEST_THREADS); | ||
184 | |||
185 | td = &thread_data[tid]; | ||
186 | |||
187 | op = td->opcode; | ||
188 | dat = td->opdata; | ||
189 | |||
190 | switch (op) { | ||
191 | case RTTEST_LOCK: | ||
192 | case RTTEST_LOCKINT: | ||
193 | case RTTEST_LOCKNOWAIT: | ||
194 | case RTTEST_LOCKINTNOWAIT: | ||
195 | if (mutex != &mutexes[dat]) | ||
196 | break; | ||
197 | |||
198 | if (td->mutexes[dat] != 1) | ||
199 | break; | ||
200 | |||
201 | td->mutexes[dat] = 2; | ||
202 | td->event = atomic_add_return(1, &rttest_event); | ||
203 | break; | ||
204 | |||
205 | case RTTEST_LOCKBKL: | ||
206 | default: | ||
207 | break; | ||
208 | } | ||
209 | |||
210 | schedule(); | ||
211 | |||
212 | |||
213 | switch (op) { | ||
214 | case RTTEST_LOCK: | ||
215 | case RTTEST_LOCKINT: | ||
216 | if (mutex != &mutexes[dat]) | ||
217 | return; | ||
218 | |||
219 | if (td->mutexes[dat] != 2) | ||
220 | return; | ||
221 | |||
222 | td->mutexes[dat] = 3; | ||
223 | td->event = atomic_add_return(1, &rttest_event); | ||
224 | break; | ||
225 | |||
226 | case RTTEST_LOCKNOWAIT: | ||
227 | case RTTEST_LOCKINTNOWAIT: | ||
228 | if (mutex != &mutexes[dat]) | ||
229 | return; | ||
230 | |||
231 | if (td->mutexes[dat] != 2) | ||
232 | return; | ||
233 | |||
234 | td->mutexes[dat] = 1; | ||
235 | td->event = atomic_add_return(1, &rttest_event); | ||
236 | return; | ||
237 | |||
238 | case RTTEST_LOCKBKL: | ||
239 | return; | ||
240 | default: | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | td->opcode = 0; | ||
245 | |||
246 | for (;;) { | ||
247 | set_current_state(TASK_INTERRUPTIBLE); | ||
248 | |||
249 | if (td->opcode > 0) { | ||
250 | int ret; | ||
251 | |||
252 | set_current_state(TASK_RUNNING); | ||
253 | ret = handle_op(td, 1); | ||
254 | set_current_state(TASK_INTERRUPTIBLE); | ||
255 | if (td->opcode == RTTEST_LOCKCONT) | ||
256 | break; | ||
257 | td->opcode = ret; | ||
258 | } | ||
259 | |||
260 | /* Wait for the next command to be executed */ | ||
261 | schedule(); | ||
262 | } | ||
263 | |||
264 | /* Restore previous command and data */ | ||
265 | td->opcode = op; | ||
266 | td->opdata = dat; | ||
267 | } | ||
268 | |||
269 | static int test_func(void *data) | ||
270 | { | ||
271 | struct test_thread_data *td = data; | ||
272 | int ret; | ||
273 | |||
274 | current->flags |= PF_MUTEX_TESTER; | ||
275 | allow_signal(SIGHUP); | ||
276 | |||
277 | for(;;) { | ||
278 | |||
279 | set_current_state(TASK_INTERRUPTIBLE); | ||
280 | |||
281 | if (td->opcode > 0) { | ||
282 | set_current_state(TASK_RUNNING); | ||
283 | ret = handle_op(td, 0); | ||
284 | set_current_state(TASK_INTERRUPTIBLE); | ||
285 | td->opcode = ret; | ||
286 | } | ||
287 | |||
288 | /* Wait for the next command to be executed */ | ||
289 | schedule(); | ||
290 | |||
291 | if (signal_pending(current)) | ||
292 | flush_signals(current); | ||
293 | |||
294 | if(kthread_should_stop()) | ||
295 | break; | ||
296 | } | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * sysfs_test_command - interface for test commands | ||
302 | * @dev: thread reference | ||
303 | * @buf: command for actual step | ||
304 | * @count: length of buffer | ||
305 | * | ||
306 | * command syntax: | ||
307 | * | ||
308 | * opcode:data | ||
309 | */ | ||
310 | static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, | ||
311 | size_t count) | ||
312 | { | ||
313 | struct test_thread_data *td; | ||
314 | char cmdbuf[32]; | ||
315 | int op, dat, tid; | ||
316 | |||
317 | td = container_of(dev, struct test_thread_data, sysdev); | ||
318 | tid = td->sysdev.id; | ||
319 | |||
320 | /* strings from sysfs write are not 0 terminated! */ | ||
321 | if (count >= sizeof(cmdbuf)) | ||
322 | return -EINVAL; | ||
323 | |||
324 | /* strip of \n: */ | ||
325 | if (buf[count-1] == '\n') | ||
326 | count--; | ||
327 | if (count < 1) | ||
328 | return -EINVAL; | ||
329 | |||
330 | memcpy(cmdbuf, buf, count); | ||
331 | cmdbuf[count] = 0; | ||
332 | |||
333 | if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2) | ||
334 | return -EINVAL; | ||
335 | |||
336 | switch (op) { | ||
337 | case RTTEST_SIGNAL: | ||
338 | send_sig(SIGHUP, threads[tid], 0); | ||
339 | break; | ||
340 | |||
341 | default: | ||
342 | if (td->opcode > 0) | ||
343 | return -EBUSY; | ||
344 | td->opdata = dat; | ||
345 | td->opcode = op; | ||
346 | wake_up_process(threads[tid]); | ||
347 | } | ||
348 | |||
349 | return count; | ||
350 | } | ||
351 | |||
352 | /** | ||
353 | * sysfs_test_status - sysfs interface for rt tester | ||
354 | * @dev: thread to query | ||
355 | * @buf: char buffer to be filled with thread status info | ||
356 | */ | ||
357 | static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) | ||
358 | { | ||
359 | struct test_thread_data *td; | ||
360 | char *curr = buf; | ||
361 | task_t *tsk; | ||
362 | int i; | ||
363 | |||
364 | td = container_of(dev, struct test_thread_data, sysdev); | ||
365 | tsk = threads[td->sysdev.id]; | ||
366 | |||
367 | spin_lock(&rttest_lock); | ||
368 | |||
369 | curr += sprintf(curr, | ||
370 | "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:", | ||
371 | td->opcode, td->event, tsk->state, | ||
372 | (MAX_RT_PRIO - 1) - tsk->prio, | ||
373 | (MAX_RT_PRIO - 1) - tsk->normal_prio, | ||
374 | tsk->pi_blocked_on, td->bkl); | ||
375 | |||
376 | for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--) | ||
377 | curr += sprintf(curr, "%d", td->mutexes[i]); | ||
378 | |||
379 | spin_unlock(&rttest_lock); | ||
380 | |||
381 | curr += sprintf(curr, ", T: %p, R: %p\n", tsk, | ||
382 | mutexes[td->sysdev.id].owner); | ||
383 | |||
384 | return curr - buf; | ||
385 | } | ||
386 | |||
387 | static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL); | ||
388 | static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command); | ||
389 | |||
390 | static struct sysdev_class rttest_sysclass = { | ||
391 | set_kset_name("rttest"), | ||
392 | }; | ||
393 | |||
394 | static int init_test_thread(int id) | ||
395 | { | ||
396 | thread_data[id].sysdev.cls = &rttest_sysclass; | ||
397 | thread_data[id].sysdev.id = id; | ||
398 | |||
399 | threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); | ||
400 | if (IS_ERR(threads[id])) | ||
401 | return PTR_ERR(threads[id]); | ||
402 | |||
403 | return sysdev_register(&thread_data[id].sysdev); | ||
404 | } | ||
405 | |||
406 | static int init_rttest(void) | ||
407 | { | ||
408 | int ret, i; | ||
409 | |||
410 | spin_lock_init(&rttest_lock); | ||
411 | |||
412 | for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) | ||
413 | rt_mutex_init(&mutexes[i]); | ||
414 | |||
415 | ret = sysdev_class_register(&rttest_sysclass); | ||
416 | if (ret) | ||
417 | return ret; | ||
418 | |||
419 | for (i = 0; i < MAX_RT_TEST_THREADS; i++) { | ||
420 | ret = init_test_thread(i); | ||
421 | if (ret) | ||
422 | break; | ||
423 | ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status); | ||
424 | if (ret) | ||
425 | break; | ||
426 | ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command); | ||
427 | if (ret) | ||
428 | break; | ||
429 | } | ||
430 | |||
431 | printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" ); | ||
432 | |||
433 | return ret; | ||
434 | } | ||
435 | |||
436 | device_initcall(init_rttest); | ||