aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-23 06:00:35 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:16:44 -0400
commita20a7105a2206eb4b9c791aa276f4233bb453214 (patch)
tree07323bf1ab014ef53239be76e4f12d380295fa17 /litmus
parentfe983e95cab78aee27211a56f4ac6708b35f424c (diff)
Feather-Trace: add generic ftdev device driver
This patch adds the ftdev device driver, which is used to export samples collected with Feather-Trace to userspace.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/ftdev.c432
2 files changed, 433 insertions, 1 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 4c6130b58bae..bca61e6deb71 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -2,4 +2,4 @@
2# Makefile for LITMUS^RT 2# Makefile for LITMUS^RT
3# 3#
4 4
5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o 5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
new file mode 100644
index 000000000000..a3c679d1c5c7
--- /dev/null
+++ b/litmus/ftdev.c
@@ -0,0 +1,432 @@
1#include <linux/sched.h>
2#include <linux/fs.h>
3#include <linux/slab.h>
4#include <linux/cdev.h>
5#include <asm/uaccess.h>
6#include <linux/module.h>
7#include <linux/device.h>
8#include <linux/vmalloc.h>
9
10#include <litmus/feather_trace.h>
11#include <litmus/ftdev.h>
12
13struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size)
14{
15 struct ft_buffer* buf;
16 size_t total = (size + 1) * count;
17 char* mem;
18
19 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
20 if (!buf)
21 return NULL;
22
23
24 mem = vmalloc(total);
25
26 if (!mem) {
27 kfree(buf);
28 return NULL;
29 }
30
31 if (!init_ft_buffer(buf, count, size,
32 mem + (count * size), /* markers at the end */
33 mem)) { /* buffer objects */
34 vfree(mem);
35 kfree(buf);
36 return NULL;
37 }
38 return buf;
39}
40
41void free_ft_buffer(struct ft_buffer* buf)
42{
43 if (buf) {
44 vfree(buf->buffer_mem);
45 kfree(buf);
46 }
47}
48
49struct ftdev_event {
50 int id;
51 struct ftdev_event* next;
52};
53
54static int activate(struct ftdev_event** chain, int id)
55{
56 struct ftdev_event* ev = kmalloc(sizeof(*ev), GFP_KERNEL);
57 if (ev) {
58 printk(KERN_INFO
59 "Enabling feather-trace event %d.\n", (int) id);
60 ft_enable_event(id);
61 ev->id = id;
62 ev->next = *chain;
63 *chain = ev;
64 }
65 return ev ? 0 : -ENOMEM;
66}
67
68static void deactivate(struct ftdev_event** chain, int id)
69{
70 struct ftdev_event **cur = chain;
71 struct ftdev_event *nxt;
72 while (*cur) {
73 if ((*cur)->id == id) {
74 nxt = (*cur)->next;
75 kfree(*cur);
76 *cur = nxt;
77 printk(KERN_INFO
78 "Disabling feather-trace event %d.\n", (int) id);
79 ft_disable_event(id);
80 break;
81 }
82 cur = &(*cur)->next;
83 }
84}
85
86static int ftdev_open(struct inode *in, struct file *filp)
87{
88 struct ftdev* ftdev;
89 struct ftdev_minor* ftdm;
90 unsigned int buf_idx = iminor(in);
91 int err = 0;
92
93 ftdev = container_of(in->i_cdev, struct ftdev, cdev);
94
95 if (buf_idx >= ftdev->minor_cnt) {
96 err = -ENODEV;
97 goto out;
98 }
99 if (ftdev->can_open && (err = ftdev->can_open(ftdev, buf_idx)))
100 goto out;
101
102 ftdm = ftdev->minor + buf_idx;
103 ftdm->ftdev = ftdev;
104 filp->private_data = ftdm;
105
106 if (mutex_lock_interruptible(&ftdm->lock)) {
107 err = -ERESTARTSYS;
108 goto out;
109 }
110
111 if (!ftdm->readers && ftdev->alloc)
112 err = ftdev->alloc(ftdev, buf_idx);
113 if (0 == err)
114 ftdm->readers++;
115
116 mutex_unlock(&ftdm->lock);
117out:
118 return err;
119}
120
121static int ftdev_release(struct inode *in, struct file *filp)
122{
123 struct ftdev* ftdev;
124 struct ftdev_minor* ftdm;
125 unsigned int buf_idx = iminor(in);
126 int err = 0;
127
128 ftdev = container_of(in->i_cdev, struct ftdev, cdev);
129
130 if (buf_idx >= ftdev->minor_cnt) {
131 err = -ENODEV;
132 goto out;
133 }
134 ftdm = ftdev->minor + buf_idx;
135
136 if (mutex_lock_interruptible(&ftdm->lock)) {
137 err = -ERESTARTSYS;
138 goto out;
139 }
140
141 if (ftdm->readers == 1) {
142 while (ftdm->events)
143 deactivate(&ftdm->events, ftdm->events->id);
144
145 /* wait for any pending events to complete */
146 set_current_state(TASK_UNINTERRUPTIBLE);
147 schedule_timeout(HZ);
148
149 printk(KERN_ALERT "Failed trace writes: %u\n",
150 ftdm->buf->failed_writes);
151
152 if (ftdev->free)
153 ftdev->free(ftdev, buf_idx);
154 }
155
156 ftdm->readers--;
157 mutex_unlock(&ftdm->lock);
158out:
159 return err;
160}
161
162/* based on ft_buffer_read
163 * @returns < 0 : page fault
164 * = 0 : no data available
165 * = 1 : one slot copied
166 */
167static int ft_buffer_copy_to_user(struct ft_buffer* buf, char __user *dest)
168{
169 unsigned int idx;
170 int err = 0;
171 if (buf->free_count != buf->slot_count) {
172 /* data available */
173 idx = buf->read_idx % buf->slot_count;
174 if (buf->slots[idx] == SLOT_READY) {
175 err = copy_to_user(dest, ((char*) buf->buffer_mem) +
176 idx * buf->slot_size,
177 buf->slot_size);
178 if (err == 0) {
179 /* copy ok */
180 buf->slots[idx] = SLOT_FREE;
181 buf->read_idx++;
182 fetch_and_inc(&buf->free_count);
183 err = 1;
184 }
185 }
186 }
187 return err;
188}
189
190static ssize_t ftdev_read(struct file *filp,
191 char __user *to, size_t len, loff_t *f_pos)
192{
193 /* we ignore f_pos, this is strictly sequential */
194
195 ssize_t err = 0;
196 size_t chunk;
197 int copied;
198 struct ftdev_minor* ftdm = filp->private_data;
199
200 if (mutex_lock_interruptible(&ftdm->lock)) {
201 err = -ERESTARTSYS;
202 goto out;
203 }
204
205
206 chunk = ftdm->buf->slot_size;
207 while (len >= chunk) {
208 copied = ft_buffer_copy_to_user(ftdm->buf, to);
209 if (copied == 1) {
210 len -= chunk;
211 to += chunk;
212 err += chunk;
213 } else if (err == 0 && copied == 0 && ftdm->events) {
214 /* Only wait if there are any events enabled and only
215 * if we haven't copied some data yet. We cannot wait
216 * here with copied data because that data would get
217 * lost if the task is interrupted (e.g., killed).
218 */
219 mutex_unlock(&ftdm->lock);
220 set_current_state(TASK_INTERRUPTIBLE);
221
222 schedule_timeout(50);
223
224 if (signal_pending(current)) {
225 if (err == 0)
226 /* nothing read yet, signal problem */
227 err = -ERESTARTSYS;
228 goto out;
229 }
230 if (mutex_lock_interruptible(&ftdm->lock)) {
231 err = -ERESTARTSYS;
232 goto out;
233 }
234 } else if (copied < 0) {
235 /* page fault */
236 err = copied;
237 break;
238 } else
239 /* nothing left to get, return to user space */
240 break;
241 }
242 mutex_unlock(&ftdm->lock);
243out:
244 return err;
245}
246
247static long ftdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
248{
249 long err = -ENOIOCTLCMD;
250 struct ftdev_minor* ftdm = filp->private_data;
251
252 if (mutex_lock_interruptible(&ftdm->lock)) {
253 err = -ERESTARTSYS;
254 goto out;
255 }
256
257 /* FIXME: check id against list of acceptable events */
258
259 switch (cmd) {
260 case FTDEV_ENABLE_CMD:
261 if (activate(&ftdm->events, arg))
262 err = -ENOMEM;
263 else
264 err = 0;
265 break;
266
267 case FTDEV_DISABLE_CMD:
268 deactivate(&ftdm->events, arg);
269 err = 0;
270 break;
271
272 default:
273 printk(KERN_DEBUG "ftdev: strange ioctl (%u, %lu)\n", cmd, arg);
274 };
275
276 mutex_unlock(&ftdm->lock);
277out:
278 return err;
279}
280
281static ssize_t ftdev_write(struct file *filp, const char __user *from,
282 size_t len, loff_t *f_pos)
283{
284 struct ftdev_minor* ftdm = filp->private_data;
285 ssize_t err = -EINVAL;
286 struct ftdev* ftdev = ftdm->ftdev;
287
288 /* dispatch write to buffer-specific code, if available */
289 if (ftdev->write)
290 err = ftdev->write(ftdm->buf, len, from);
291
292 return err;
293}
294
295struct file_operations ftdev_fops = {
296 .owner = THIS_MODULE,
297 .open = ftdev_open,
298 .release = ftdev_release,
299 .write = ftdev_write,
300 .read = ftdev_read,
301 .unlocked_ioctl = ftdev_ioctl,
302};
303
304int ftdev_init( struct ftdev* ftdev, struct module* owner,
305 const int minor_cnt, const char* name)
306{
307 int i, err;
308
309 BUG_ON(minor_cnt < 1);
310
311 cdev_init(&ftdev->cdev, &ftdev_fops);
312 ftdev->name = name;
313 ftdev->minor_cnt = minor_cnt;
314 ftdev->cdev.owner = owner;
315 ftdev->cdev.ops = &ftdev_fops;
316 ftdev->alloc = NULL;
317 ftdev->free = NULL;
318 ftdev->can_open = NULL;
319 ftdev->write = NULL;
320
321 ftdev->minor = kcalloc(ftdev->minor_cnt, sizeof(*ftdev->minor),
322 GFP_KERNEL);
323 if (!ftdev->minor) {
324 printk(KERN_WARNING "ftdev(%s): Could not allocate memory\n",
325 ftdev->name);
326 err = -ENOMEM;
327 goto err_out;
328 }
329
330 for (i = 0; i < ftdev->minor_cnt; i++) {
331 mutex_init(&ftdev->minor[i].lock);
332 ftdev->minor[i].readers = 0;
333 ftdev->minor[i].buf = NULL;
334 ftdev->minor[i].events = NULL;
335 }
336
337 ftdev->class = class_create(owner, ftdev->name);
338 if (IS_ERR(ftdev->class)) {
339 err = PTR_ERR(ftdev->class);
340 printk(KERN_WARNING "ftdev(%s): "
341 "Could not create device class.\n", ftdev->name);
342 goto err_dealloc;
343 }
344
345 return 0;
346
347err_dealloc:
348 kfree(ftdev->minor);
349err_out:
350 return err;
351}
352
353/*
354 * Destroy minor devices up to, but not including, up_to.
355 */
356static void ftdev_device_destroy(struct ftdev* ftdev, unsigned int up_to)
357{
358 dev_t minor_cntr;
359
360 if (up_to < 1)
361 up_to = (ftdev->minor_cnt < 1) ? 0 : ftdev->minor_cnt;
362
363 for (minor_cntr = 0; minor_cntr < up_to; ++minor_cntr)
364 device_destroy(ftdev->class, MKDEV(ftdev->major, minor_cntr));
365}
366
367void ftdev_exit(struct ftdev* ftdev)
368{
369 printk("ftdev(%s): Exiting\n", ftdev->name);
370 ftdev_device_destroy(ftdev, -1);
371 cdev_del(&ftdev->cdev);
372 unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt);
373 class_destroy(ftdev->class);
374 kfree(ftdev->minor);
375}
376
377int register_ftdev(struct ftdev* ftdev)
378{
379 struct device **device;
380 dev_t trace_dev_tmp, minor_cntr;
381 int err;
382
383 err = alloc_chrdev_region(&trace_dev_tmp, 0, ftdev->minor_cnt,
384 ftdev->name);
385 if (err) {
386 printk(KERN_WARNING "ftdev(%s): "
387 "Could not allocate char. device region (%d minors)\n",
388 ftdev->name, ftdev->minor_cnt);
389 goto err_out;
390 }
391
392 ftdev->major = MAJOR(trace_dev_tmp);
393
394 err = cdev_add(&ftdev->cdev, trace_dev_tmp, ftdev->minor_cnt);
395 if (err) {
396 printk(KERN_WARNING "ftdev(%s): "
397 "Could not add cdev for major %u with %u minor(s).\n",
398 ftdev->name, ftdev->major, ftdev->minor_cnt);
399 goto err_unregister;
400 }
401
402 /* create the minor device(s) */
403 for (minor_cntr = 0; minor_cntr < ftdev->minor_cnt; ++minor_cntr)
404 {
405 trace_dev_tmp = MKDEV(ftdev->major, minor_cntr);
406 device = &ftdev->minor[minor_cntr].device;
407
408 *device = device_create(ftdev->class, NULL, trace_dev_tmp, NULL,
409 "litmus/%s%d", ftdev->name, minor_cntr);
410 if (IS_ERR(*device)) {
411 err = PTR_ERR(*device);
412 printk(KERN_WARNING "ftdev(%s): "
413 "Could not create device major/minor number "
414 "%u/%u\n", ftdev->name, ftdev->major,
415 minor_cntr);
416 printk(KERN_WARNING "ftdev(%s): "
417 "will attempt deletion of allocated devices.\n",
418 ftdev->name);
419 goto err_minors;
420 }
421 }
422
423 return 0;
424
425err_minors:
426 ftdev_device_destroy(ftdev, minor_cntr);
427 cdev_del(&ftdev->cdev);
428err_unregister:
429 unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt);
430err_out:
431 return err;
432}