diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-31 13:18:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-06 03:23:59 -0500 |
commit | 1696a8bee390929fed05c6297164816ae2ced280 (patch) | |
tree | 59866e14aa5f8bf6f5ac0fa69211be6a5e05afd3 /kernel/rtmutex-tester.c | |
parent | e25a64c4017e3a3cda17454b040737e410a12991 (diff) |
locking: Move the rtmutex code to kernel/locking/
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-p9ijt8div0hwldexwfm4nlhj@git.kernel.org
[ Fixed build failure in kernel/rcu/tree_plugin.h. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rtmutex-tester.c')
-rw-r--r-- | kernel/rtmutex-tester.c | 420 |
1 files changed, 0 insertions, 420 deletions
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c deleted file mode 100644 index 1d96dd0d93c1..000000000000 --- a/kernel/rtmutex-tester.c +++ /dev/null | |||
@@ -1,420 +0,0 @@ | |||
1 | /* | ||
2 | * RT-Mutex-tester: scriptable tester for rt mutexes | ||
3 | * | ||
4 | * started by Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/device.h> | ||
10 | #include <linux/kthread.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/sched/rt.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/timer.h> | ||
16 | #include <linux/freezer.h> | ||
17 | #include <linux/stat.h> | ||
18 | |||
19 | #include "rtmutex.h" | ||
20 | |||
21 | #define MAX_RT_TEST_THREADS 8 | ||
22 | #define MAX_RT_TEST_MUTEXES 8 | ||
23 | |||
24 | static spinlock_t rttest_lock; | ||
25 | static atomic_t rttest_event; | ||
26 | |||
27 | struct test_thread_data { | ||
28 | int opcode; | ||
29 | int opdata; | ||
30 | int mutexes[MAX_RT_TEST_MUTEXES]; | ||
31 | int event; | ||
32 | struct device dev; | ||
33 | }; | ||
34 | |||
35 | static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; | ||
36 | static struct task_struct *threads[MAX_RT_TEST_THREADS]; | ||
37 | static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; | ||
38 | |||
39 | enum test_opcodes { | ||
40 | RTTEST_NOP = 0, | ||
41 | RTTEST_SCHEDOT, /* 1 Sched other, data = nice */ | ||
42 | RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */ | ||
43 | RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */ | ||
44 | RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */ | ||
45 | RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */ | ||
46 | RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */ | ||
47 | RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */ | ||
48 | RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */ | ||
49 | /* 9, 10 - reserved for BKL commemoration */ | ||
50 | RTTEST_SIGNAL = 11, /* 11 Signal other test thread, data = thread id */ | ||
51 | RTTEST_RESETEVENT = 98, /* 98 Reset event counter */ | ||
52 | RTTEST_RESET = 99, /* 99 Reset all pending operations */ | ||
53 | }; | ||
54 | |||
55 | static int handle_op(struct test_thread_data *td, int lockwakeup) | ||
56 | { | ||
57 | int i, id, ret = -EINVAL; | ||
58 | |||
59 | switch(td->opcode) { | ||
60 | |||
61 | case RTTEST_NOP: | ||
62 | return 0; | ||
63 | |||
64 | case RTTEST_LOCKCONT: | ||
65 | td->mutexes[td->opdata] = 1; | ||
66 | td->event = atomic_add_return(1, &rttest_event); | ||
67 | return 0; | ||
68 | |||
69 | case RTTEST_RESET: | ||
70 | for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) { | ||
71 | if (td->mutexes[i] == 4) { | ||
72 | rt_mutex_unlock(&mutexes[i]); | ||
73 | td->mutexes[i] = 0; | ||
74 | } | ||
75 | } | ||
76 | return 0; | ||
77 | |||
78 | case RTTEST_RESETEVENT: | ||
79 | atomic_set(&rttest_event, 0); | ||
80 | return 0; | ||
81 | |||
82 | default: | ||
83 | if (lockwakeup) | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | switch(td->opcode) { | ||
88 | |||
89 | case RTTEST_LOCK: | ||
90 | case RTTEST_LOCKNOWAIT: | ||
91 | id = td->opdata; | ||
92 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES) | ||
93 | return ret; | ||
94 | |||
95 | td->mutexes[id] = 1; | ||
96 | td->event = atomic_add_return(1, &rttest_event); | ||
97 | rt_mutex_lock(&mutexes[id]); | ||
98 | td->event = atomic_add_return(1, &rttest_event); | ||
99 | td->mutexes[id] = 4; | ||
100 | return 0; | ||
101 | |||
102 | case RTTEST_LOCKINT: | ||
103 | case RTTEST_LOCKINTNOWAIT: | ||
104 | id = td->opdata; | ||
105 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES) | ||
106 | return ret; | ||
107 | |||
108 | td->mutexes[id] = 1; | ||
109 | td->event = atomic_add_return(1, &rttest_event); | ||
110 | ret = rt_mutex_lock_interruptible(&mutexes[id], 0); | ||
111 | td->event = atomic_add_return(1, &rttest_event); | ||
112 | td->mutexes[id] = ret ? 0 : 4; | ||
113 | return ret ? -EINTR : 0; | ||
114 | |||
115 | case RTTEST_UNLOCK: | ||
116 | id = td->opdata; | ||
117 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) | ||
118 | return ret; | ||
119 | |||
120 | td->event = atomic_add_return(1, &rttest_event); | ||
121 | rt_mutex_unlock(&mutexes[id]); | ||
122 | td->event = atomic_add_return(1, &rttest_event); | ||
123 | td->mutexes[id] = 0; | ||
124 | return 0; | ||
125 | |||
126 | default: | ||
127 | break; | ||
128 | } | ||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Schedule replacement for rtsem_down(). Only called for threads with | ||
134 | * PF_MUTEX_TESTER set. | ||
135 | * | ||
136 | * This allows us to have finegrained control over the event flow. | ||
137 | * | ||
138 | */ | ||
139 | void schedule_rt_mutex_test(struct rt_mutex *mutex) | ||
140 | { | ||
141 | int tid, op, dat; | ||
142 | struct test_thread_data *td; | ||
143 | |||
144 | /* We have to lookup the task */ | ||
145 | for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) { | ||
146 | if (threads[tid] == current) | ||
147 | break; | ||
148 | } | ||
149 | |||
150 | BUG_ON(tid == MAX_RT_TEST_THREADS); | ||
151 | |||
152 | td = &thread_data[tid]; | ||
153 | |||
154 | op = td->opcode; | ||
155 | dat = td->opdata; | ||
156 | |||
157 | switch (op) { | ||
158 | case RTTEST_LOCK: | ||
159 | case RTTEST_LOCKINT: | ||
160 | case RTTEST_LOCKNOWAIT: | ||
161 | case RTTEST_LOCKINTNOWAIT: | ||
162 | if (mutex != &mutexes[dat]) | ||
163 | break; | ||
164 | |||
165 | if (td->mutexes[dat] != 1) | ||
166 | break; | ||
167 | |||
168 | td->mutexes[dat] = 2; | ||
169 | td->event = atomic_add_return(1, &rttest_event); | ||
170 | break; | ||
171 | |||
172 | default: | ||
173 | break; | ||
174 | } | ||
175 | |||
176 | schedule(); | ||
177 | |||
178 | |||
179 | switch (op) { | ||
180 | case RTTEST_LOCK: | ||
181 | case RTTEST_LOCKINT: | ||
182 | if (mutex != &mutexes[dat]) | ||
183 | return; | ||
184 | |||
185 | if (td->mutexes[dat] != 2) | ||
186 | return; | ||
187 | |||
188 | td->mutexes[dat] = 3; | ||
189 | td->event = atomic_add_return(1, &rttest_event); | ||
190 | break; | ||
191 | |||
192 | case RTTEST_LOCKNOWAIT: | ||
193 | case RTTEST_LOCKINTNOWAIT: | ||
194 | if (mutex != &mutexes[dat]) | ||
195 | return; | ||
196 | |||
197 | if (td->mutexes[dat] != 2) | ||
198 | return; | ||
199 | |||
200 | td->mutexes[dat] = 1; | ||
201 | td->event = atomic_add_return(1, &rttest_event); | ||
202 | return; | ||
203 | |||
204 | default: | ||
205 | return; | ||
206 | } | ||
207 | |||
208 | td->opcode = 0; | ||
209 | |||
210 | for (;;) { | ||
211 | set_current_state(TASK_INTERRUPTIBLE); | ||
212 | |||
213 | if (td->opcode > 0) { | ||
214 | int ret; | ||
215 | |||
216 | set_current_state(TASK_RUNNING); | ||
217 | ret = handle_op(td, 1); | ||
218 | set_current_state(TASK_INTERRUPTIBLE); | ||
219 | if (td->opcode == RTTEST_LOCKCONT) | ||
220 | break; | ||
221 | td->opcode = ret; | ||
222 | } | ||
223 | |||
224 | /* Wait for the next command to be executed */ | ||
225 | schedule(); | ||
226 | } | ||
227 | |||
228 | /* Restore previous command and data */ | ||
229 | td->opcode = op; | ||
230 | td->opdata = dat; | ||
231 | } | ||
232 | |||
233 | static int test_func(void *data) | ||
234 | { | ||
235 | struct test_thread_data *td = data; | ||
236 | int ret; | ||
237 | |||
238 | current->flags |= PF_MUTEX_TESTER; | ||
239 | set_freezable(); | ||
240 | allow_signal(SIGHUP); | ||
241 | |||
242 | for(;;) { | ||
243 | |||
244 | set_current_state(TASK_INTERRUPTIBLE); | ||
245 | |||
246 | if (td->opcode > 0) { | ||
247 | set_current_state(TASK_RUNNING); | ||
248 | ret = handle_op(td, 0); | ||
249 | set_current_state(TASK_INTERRUPTIBLE); | ||
250 | td->opcode = ret; | ||
251 | } | ||
252 | |||
253 | /* Wait for the next command to be executed */ | ||
254 | schedule(); | ||
255 | try_to_freeze(); | ||
256 | |||
257 | if (signal_pending(current)) | ||
258 | flush_signals(current); | ||
259 | |||
260 | if(kthread_should_stop()) | ||
261 | break; | ||
262 | } | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * sysfs_test_command - interface for test commands | ||
268 | * @dev: thread reference | ||
269 | * @buf: command for actual step | ||
270 | * @count: length of buffer | ||
271 | * | ||
272 | * command syntax: | ||
273 | * | ||
274 | * opcode:data | ||
275 | */ | ||
276 | static ssize_t sysfs_test_command(struct device *dev, struct device_attribute *attr, | ||
277 | const char *buf, size_t count) | ||
278 | { | ||
279 | struct sched_param schedpar; | ||
280 | struct test_thread_data *td; | ||
281 | char cmdbuf[32]; | ||
282 | int op, dat, tid, ret; | ||
283 | |||
284 | td = container_of(dev, struct test_thread_data, dev); | ||
285 | tid = td->dev.id; | ||
286 | |||
287 | /* strings from sysfs write are not 0 terminated! */ | ||
288 | if (count >= sizeof(cmdbuf)) | ||
289 | return -EINVAL; | ||
290 | |||
291 | /* strip of \n: */ | ||
292 | if (buf[count-1] == '\n') | ||
293 | count--; | ||
294 | if (count < 1) | ||
295 | return -EINVAL; | ||
296 | |||
297 | memcpy(cmdbuf, buf, count); | ||
298 | cmdbuf[count] = 0; | ||
299 | |||
300 | if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2) | ||
301 | return -EINVAL; | ||
302 | |||
303 | switch (op) { | ||
304 | case RTTEST_SCHEDOT: | ||
305 | schedpar.sched_priority = 0; | ||
306 | ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar); | ||
307 | if (ret) | ||
308 | return ret; | ||
309 | set_user_nice(current, 0); | ||
310 | break; | ||
311 | |||
312 | case RTTEST_SCHEDRT: | ||
313 | schedpar.sched_priority = dat; | ||
314 | ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar); | ||
315 | if (ret) | ||
316 | return ret; | ||
317 | break; | ||
318 | |||
319 | case RTTEST_SIGNAL: | ||
320 | send_sig(SIGHUP, threads[tid], 0); | ||
321 | break; | ||
322 | |||
323 | default: | ||
324 | if (td->opcode > 0) | ||
325 | return -EBUSY; | ||
326 | td->opdata = dat; | ||
327 | td->opcode = op; | ||
328 | wake_up_process(threads[tid]); | ||
329 | } | ||
330 | |||
331 | return count; | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * sysfs_test_status - sysfs interface for rt tester | ||
336 | * @dev: thread to query | ||
337 | * @buf: char buffer to be filled with thread status info | ||
338 | */ | ||
339 | static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *attr, | ||
340 | char *buf) | ||
341 | { | ||
342 | struct test_thread_data *td; | ||
343 | struct task_struct *tsk; | ||
344 | char *curr = buf; | ||
345 | int i; | ||
346 | |||
347 | td = container_of(dev, struct test_thread_data, dev); | ||
348 | tsk = threads[td->dev.id]; | ||
349 | |||
350 | spin_lock(&rttest_lock); | ||
351 | |||
352 | curr += sprintf(curr, | ||
353 | "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:", | ||
354 | td->opcode, td->event, tsk->state, | ||
355 | (MAX_RT_PRIO - 1) - tsk->prio, | ||
356 | (MAX_RT_PRIO - 1) - tsk->normal_prio, | ||
357 | tsk->pi_blocked_on); | ||
358 | |||
359 | for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--) | ||
360 | curr += sprintf(curr, "%d", td->mutexes[i]); | ||
361 | |||
362 | spin_unlock(&rttest_lock); | ||
363 | |||
364 | curr += sprintf(curr, ", T: %p, R: %p\n", tsk, | ||
365 | mutexes[td->dev.id].owner); | ||
366 | |||
367 | return curr - buf; | ||
368 | } | ||
369 | |||
370 | static DEVICE_ATTR(status, S_IRUSR, sysfs_test_status, NULL); | ||
371 | static DEVICE_ATTR(command, S_IWUSR, NULL, sysfs_test_command); | ||
372 | |||
373 | static struct bus_type rttest_subsys = { | ||
374 | .name = "rttest", | ||
375 | .dev_name = "rttest", | ||
376 | }; | ||
377 | |||
378 | static int init_test_thread(int id) | ||
379 | { | ||
380 | thread_data[id].dev.bus = &rttest_subsys; | ||
381 | thread_data[id].dev.id = id; | ||
382 | |||
383 | threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); | ||
384 | if (IS_ERR(threads[id])) | ||
385 | return PTR_ERR(threads[id]); | ||
386 | |||
387 | return device_register(&thread_data[id].dev); | ||
388 | } | ||
389 | |||
390 | static int init_rttest(void) | ||
391 | { | ||
392 | int ret, i; | ||
393 | |||
394 | spin_lock_init(&rttest_lock); | ||
395 | |||
396 | for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) | ||
397 | rt_mutex_init(&mutexes[i]); | ||
398 | |||
399 | ret = subsys_system_register(&rttest_subsys, NULL); | ||
400 | if (ret) | ||
401 | return ret; | ||
402 | |||
403 | for (i = 0; i < MAX_RT_TEST_THREADS; i++) { | ||
404 | ret = init_test_thread(i); | ||
405 | if (ret) | ||
406 | break; | ||
407 | ret = device_create_file(&thread_data[i].dev, &dev_attr_status); | ||
408 | if (ret) | ||
409 | break; | ||
410 | ret = device_create_file(&thread_data[i].dev, &dev_attr_command); | ||
411 | if (ret) | ||
412 | break; | ||
413 | } | ||
414 | |||
415 | printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" ); | ||
416 | |||
417 | return ret; | ||
418 | } | ||
419 | |||
420 | device_initcall(init_rttest); | ||