aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/ftdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/ftdev.c')
-rw-r--r--litmus/ftdev.c446
1 files changed, 446 insertions, 0 deletions
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
new file mode 100644
index 00000000000..99bc39ffbce
--- /dev/null
+++ b/litmus/ftdev.c
@@ -0,0 +1,446 @@
1#include <linux/sched.h>
2#include <linux/fs.h>
3#include <linux/slab.h>
4#include <linux/cdev.h>
5#include <asm/uaccess.h>
6#include <linux/module.h>
7#include <linux/device.h>
8
9#include <litmus/litmus.h>
10#include <litmus/feather_trace.h>
11#include <litmus/ftdev.h>
12
13struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size)
14{
15 struct ft_buffer* buf;
16 size_t total = (size + 1) * count;
17 char* mem;
18 int order = 0, pages = 1;
19
20 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
21 if (!buf)
22 return NULL;
23
24 total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0);
25 while (pages < total) {
26 order++;
27 pages *= 2;
28 }
29
30 mem = (char*) __get_free_pages(GFP_KERNEL, order);
31 if (!mem) {
32 kfree(buf);
33 return NULL;
34 }
35
36 if (!init_ft_buffer(buf, count, size,
37 mem + (count * size), /* markers at the end */
38 mem)) { /* buffer objects */
39 free_pages((unsigned long) mem, order);
40 kfree(buf);
41 return NULL;
42 }
43 return buf;
44}
45
46void free_ft_buffer(struct ft_buffer* buf)
47{
48 int order = 0, pages = 1;
49 size_t total;
50
51 if (buf) {
52 total = (buf->slot_size + 1) * buf->slot_count;
53 total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0);
54 while (pages < total) {
55 order++;
56 pages *= 2;
57 }
58 free_pages((unsigned long) buf->buffer_mem, order);
59 kfree(buf);
60 }
61}
62
63struct ftdev_event {
64 int id;
65 struct ftdev_event* next;
66};
67
68static int activate(struct ftdev_event** chain, int id)
69{
70 struct ftdev_event* ev = kmalloc(sizeof(*ev), GFP_KERNEL);
71 if (ev) {
72 printk(KERN_INFO
73 "Enabling feather-trace event %d.\n", (int) id);
74 ft_enable_event(id);
75 ev->id = id;
76 ev->next = *chain;
77 *chain = ev;
78 }
79 return ev ? 0 : -ENOMEM;
80}
81
82static void deactivate(struct ftdev_event** chain, int id)
83{
84 struct ftdev_event **cur = chain;
85 struct ftdev_event *nxt;
86 while (*cur) {
87 if ((*cur)->id == id) {
88 nxt = (*cur)->next;
89 kfree(*cur);
90 *cur = nxt;
91 printk(KERN_INFO
92 "Disabling feather-trace event %d.\n", (int) id);
93 ft_disable_event(id);
94 break;
95 }
96 cur = &(*cur)->next;
97 }
98}
99
100static int ftdev_open(struct inode *in, struct file *filp)
101{
102 struct ftdev* ftdev;
103 struct ftdev_minor* ftdm;
104 unsigned int buf_idx = iminor(in);
105 int err = 0;
106
107 ftdev = container_of(in->i_cdev, struct ftdev, cdev);
108
109 if (buf_idx >= ftdev->minor_cnt) {
110 err = -ENODEV;
111 goto out;
112 }
113 if (ftdev->can_open && (err = ftdev->can_open(ftdev, buf_idx)))
114 goto out;
115
116 ftdm = ftdev->minor + buf_idx;
117 ftdm->ftdev = ftdev;
118 filp->private_data = ftdm;
119
120 if (mutex_lock_interruptible(&ftdm->lock)) {
121 err = -ERESTARTSYS;
122 goto out;
123 }
124
125 if (!ftdm->readers && ftdev->alloc)
126 err = ftdev->alloc(ftdev, buf_idx);
127 if (0 == err)
128 ftdm->readers++;
129
130 mutex_unlock(&ftdm->lock);
131out:
132 return err;
133}
134
135static int ftdev_release(struct inode *in, struct file *filp)
136{
137 struct ftdev* ftdev;
138 struct ftdev_minor* ftdm;
139 unsigned int buf_idx = iminor(in);
140 int err = 0;
141
142 ftdev = container_of(in->i_cdev, struct ftdev, cdev);
143
144 if (buf_idx >= ftdev->minor_cnt) {
145 err = -ENODEV;
146 goto out;
147 }
148 ftdm = ftdev->minor + buf_idx;
149
150 if (mutex_lock_interruptible(&ftdm->lock)) {
151 err = -ERESTARTSYS;
152 goto out;
153 }
154
155 if (ftdm->readers == 1) {
156 while (ftdm->events)
157 deactivate(&ftdm->events, ftdm->events->id);
158
159 /* wait for any pending events to complete */
160 set_current_state(TASK_UNINTERRUPTIBLE);
161 schedule_timeout(HZ);
162
163 printk(KERN_ALERT "Failed trace writes: %u\n",
164 ftdm->buf->failed_writes);
165
166 if (ftdev->free)
167 ftdev->free(ftdev, buf_idx);
168 }
169
170 ftdm->readers--;
171 mutex_unlock(&ftdm->lock);
172out:
173 return err;
174}
175
176/* based on ft_buffer_read
177 * @returns < 0 : page fault
178 * = 0 : no data available
179 * = 1 : one slot copied
180 */
181static int ft_buffer_copy_to_user(struct ft_buffer* buf, char __user *dest)
182{
183 unsigned int idx;
184 int err = 0;
185 if (buf->free_count != buf->slot_count) {
186 /* data available */
187 idx = buf->read_idx % buf->slot_count;
188 if (buf->slots[idx] == SLOT_READY) {
189 err = copy_to_user(dest, ((char*) buf->buffer_mem) +
190 idx * buf->slot_size,
191 buf->slot_size);
192 if (err == 0) {
193 /* copy ok */
194 buf->slots[idx] = SLOT_FREE;
195 buf->read_idx++;
196 fetch_and_inc(&buf->free_count);
197 err = 1;
198 }
199 }
200 }
201 return err;
202}
203
204static ssize_t ftdev_read(struct file *filp,
205 char __user *to, size_t len, loff_t *f_pos)
206{
207 /* we ignore f_pos, this is strictly sequential */
208
209 ssize_t err = 0;
210 size_t chunk;
211 int copied;
212 struct ftdev_minor* ftdm = filp->private_data;
213
214 if (mutex_lock_interruptible(&ftdm->lock)) {
215 err = -ERESTARTSYS;
216 goto out;
217 }
218
219
220 chunk = ftdm->buf->slot_size;
221 while (len >= chunk) {
222 copied = ft_buffer_copy_to_user(ftdm->buf, to);
223 if (copied == 1) {
224 len -= chunk;
225 to += chunk;
226 err += chunk;
227 } else if (err == 0 && copied == 0 && ftdm->events) {
228 /* Only wait if there are any events enabled and only
229 * if we haven't copied some data yet. We cannot wait
230 * here with copied data because that data would get
231 * lost if the task is interrupted (e.g., killed).
232 */
233 mutex_unlock(&ftdm->lock);
234 set_current_state(TASK_INTERRUPTIBLE);
235
236 schedule_timeout(50);
237
238 if (signal_pending(current)) {
239 if (err == 0)
240 /* nothing read yet, signal problem */
241 err = -ERESTARTSYS;
242 goto out;
243 }
244 if (mutex_lock_interruptible(&ftdm->lock)) {
245 err = -ERESTARTSYS;
246 goto out;
247 }
248 } else if (copied < 0) {
249 /* page fault */
250 err = copied;
251 break;
252 } else
253 /* nothing left to get, return to user space */
254 break;
255 }
256 mutex_unlock(&ftdm->lock);
257out:
258 return err;
259}
260
261static long ftdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
262{
263 long err = -ENOIOCTLCMD;
264 struct ftdev_minor* ftdm = filp->private_data;
265
266 if (mutex_lock_interruptible(&ftdm->lock)) {
267 err = -ERESTARTSYS;
268 goto out;
269 }
270
271 /* FIXME: check id against list of acceptable events */
272
273 switch (cmd) {
274 case FTDEV_ENABLE_CMD:
275 if (activate(&ftdm->events, arg))
276 err = -ENOMEM;
277 else
278 err = 0;
279 break;
280
281 case FTDEV_DISABLE_CMD:
282 deactivate(&ftdm->events, arg);
283 err = 0;
284 break;
285
286 default:
287 printk(KERN_DEBUG "ftdev: strange ioctl (%u, %lu)\n", cmd, arg);
288 };
289
290 mutex_unlock(&ftdm->lock);
291out:
292 return err;
293}
294
295static ssize_t ftdev_write(struct file *filp, const char __user *from,
296 size_t len, loff_t *f_pos)
297{
298 struct ftdev_minor* ftdm = filp->private_data;
299 ssize_t err = -EINVAL;
300 struct ftdev* ftdev = ftdm->ftdev;
301
302 /* dispatch write to buffer-specific code, if available */
303 if (ftdev->write)
304 err = ftdev->write(ftdm->buf, len, from);
305
306 return err;
307}
308
309struct file_operations ftdev_fops = {
310 .owner = THIS_MODULE,
311 .open = ftdev_open,
312 .release = ftdev_release,
313 .write = ftdev_write,
314 .read = ftdev_read,
315 .unlocked_ioctl = ftdev_ioctl,
316};
317
318int ftdev_init( struct ftdev* ftdev, struct module* owner,
319 const int minor_cnt, const char* name)
320{
321 int i, err;
322
323 BUG_ON(minor_cnt < 1);
324
325 cdev_init(&ftdev->cdev, &ftdev_fops);
326 ftdev->name = name;
327 ftdev->minor_cnt = minor_cnt;
328 ftdev->cdev.owner = owner;
329 ftdev->cdev.ops = &ftdev_fops;
330 ftdev->alloc = NULL;
331 ftdev->free = NULL;
332 ftdev->can_open = NULL;
333 ftdev->write = NULL;
334
335 ftdev->minor = kcalloc(ftdev->minor_cnt, sizeof(*ftdev->minor),
336 GFP_KERNEL);
337 if (!ftdev->minor) {
338 printk(KERN_WARNING "ftdev(%s): Could not allocate memory\n",
339 ftdev->name);
340 err = -ENOMEM;
341 goto err_out;
342 }
343
344 for (i = 0; i < ftdev->minor_cnt; i++) {
345 mutex_init(&ftdev->minor[i].lock);
346 ftdev->minor[i].readers = 0;
347 ftdev->minor[i].buf = NULL;
348 ftdev->minor[i].events = NULL;
349 }
350
351 ftdev->class = class_create(owner, ftdev->name);
352 if (IS_ERR(ftdev->class)) {
353 err = PTR_ERR(ftdev->class);
354 printk(KERN_WARNING "ftdev(%s): "
355 "Could not create device class.\n", ftdev->name);
356 goto err_dealloc;
357 }
358
359 return 0;
360
361err_dealloc:
362 kfree(ftdev->minor);
363err_out:
364 return err;
365}
366
367/*
368 * Destroy minor devices up to, but not including, up_to.
369 */
370static void ftdev_device_destroy(struct ftdev* ftdev, unsigned int up_to)
371{
372 dev_t minor_cntr;
373
374 if (up_to < 1)
375 up_to = (ftdev->minor_cnt < 1) ? 0 : ftdev->minor_cnt;
376
377 for (minor_cntr = 0; minor_cntr < up_to; ++minor_cntr)
378 device_destroy(ftdev->class, MKDEV(ftdev->major, minor_cntr));
379}
380
381void ftdev_exit(struct ftdev* ftdev)
382{
383 printk("ftdev(%s): Exiting\n", ftdev->name);
384 ftdev_device_destroy(ftdev, -1);
385 cdev_del(&ftdev->cdev);
386 unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt);
387 class_destroy(ftdev->class);
388 kfree(ftdev->minor);
389}
390
391int register_ftdev(struct ftdev* ftdev)
392{
393 struct device **device;
394 dev_t trace_dev_tmp, minor_cntr;
395 int err;
396
397 err = alloc_chrdev_region(&trace_dev_tmp, 0, ftdev->minor_cnt,
398 ftdev->name);
399 if (err) {
400 printk(KERN_WARNING "ftdev(%s): "
401 "Could not allocate char. device region (%d minors)\n",
402 ftdev->name, ftdev->minor_cnt);
403 goto err_out;
404 }
405
406 ftdev->major = MAJOR(trace_dev_tmp);
407
408 err = cdev_add(&ftdev->cdev, trace_dev_tmp, ftdev->minor_cnt);
409 if (err) {
410 printk(KERN_WARNING "ftdev(%s): "
411 "Could not add cdev for major %u with %u minor(s).\n",
412 ftdev->name, ftdev->major, ftdev->minor_cnt);
413 goto err_unregister;
414 }
415
416 /* create the minor device(s) */
417 for (minor_cntr = 0; minor_cntr < ftdev->minor_cnt; ++minor_cntr)
418 {
419 trace_dev_tmp = MKDEV(ftdev->major, minor_cntr);
420 device = &ftdev->minor[minor_cntr].device;
421
422 *device = device_create(ftdev->class, NULL, trace_dev_tmp, NULL,
423 "litmus/%s%d", ftdev->name, minor_cntr);
424 if (IS_ERR(*device)) {
425 err = PTR_ERR(*device);
426 printk(KERN_WARNING "ftdev(%s): "
427 "Could not create device major/minor number "
428 "%u/%u\n", ftdev->name, ftdev->major,
429 minor_cntr);
430 printk(KERN_WARNING "ftdev(%s): "
431 "will attempt deletion of allocated devices.\n",
432 ftdev->name);
433 goto err_minors;
434 }
435 }
436
437 return 0;
438
439err_minors:
440 ftdev_device_destroy(ftdev, minor_cntr);
441 cdev_del(&ftdev->cdev);
442err_unregister:
443 unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt);
444err_out:
445 return err;
446}