aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/feather_buffer.h
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-15 23:29:07 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-15 23:29:07 -0400
commit7960d976f81206f7fa98965bd57998c71c9d59c8 (patch)
treecc3699eaf5cb2c786a32d28dd60f465894a24125 /include/linux/feather_buffer.h
parent93c8ca4362c6b275cc89130ae7eb5f9cbdffef80 (diff)
Merged Feather-Trace.
Diffstat (limited to 'include/linux/feather_buffer.h')
-rw-r--r--include/linux/feather_buffer.h108
1 files changed, 108 insertions, 0 deletions
diff --git a/include/linux/feather_buffer.h b/include/linux/feather_buffer.h
new file mode 100644
index 0000000000..c477772335
--- /dev/null
+++ b/include/linux/feather_buffer.h
@@ -0,0 +1,108 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6static inline int fetch_and_inc(int *val)
7{
8 int ret = 1;
9 __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (ret), "+m" (*val) : : "memory" );
10 return ret;
11}
12
13static inline int fetch_and_dec(int *val)
14{
15 int ret = -1;
16 __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (ret), "+m" (*val) : : "memory" );
17 return ret;
18}
19
20#define SLOT_FREE 0
21#define SLOT_BUSY 1
22#define SLOT_READY 2
23
24struct ft_buffer {
25 unsigned int slot_count;
26 unsigned int slot_size;
27
28 int free_count;
29 unsigned int write_idx;
30 unsigned int read_idx;
31
32 char* slots;
33 void* buffer_mem;
34 unsigned int failed_writes;
35};
36
37static inline int init_ft_buffer(struct ft_buffer* buf,
38 unsigned int slot_count,
39 unsigned int slot_size,
40 char* slots,
41 void* buffer_mem)
42{
43 int i = 0;
44 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
45 /* The slot count must divide UNIT_MAX + 1 so that when it
46 * wraps around the index correctly points to 0.
47 */
48 return 0;
49 } else {
50 buf->slot_count = slot_count;
51 buf->slot_size = slot_size;
52 buf->slots = slots;
53 buf->buffer_mem = buffer_mem;
54 buf->free_count = slot_count;
55 buf->write_idx = 0;
56 buf->read_idx = 0;
57 buf->failed_writes = 0;
58 for (i = 0; i < slot_count; i++)
59 buf->slots[i] = SLOT_FREE;
60 return 1;
61 }
62}
63
64static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
65{
66 int free = fetch_and_dec(&buf->free_count);
67 unsigned int idx;
68 if (free <= 0) {
69 fetch_and_inc(&buf->free_count);
70 *ptr = 0;
71 fetch_and_inc(&buf->failed_writes);
72 return 0;
73 } else {
74 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
75 buf->slots[idx] = SLOT_BUSY;
76 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
77 return 1;
78 }
79}
80
81static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
82{
83 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
84 buf->slots[idx] = SLOT_READY;
85}
86
87
88/* exclusive reader access is assumed */
89static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
90{
91 unsigned int idx;
92 if (buf->free_count == buf->slot_count)
93 /* nothing available */
94 return 0;
95 idx = buf->read_idx % buf->slot_count;
96 if (buf->slots[idx] == SLOT_READY) {
97 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
98 buf->slot_size);
99 buf->slots[idx] = SLOT_FREE;
100 buf->read_idx++;
101 fetch_and_inc(&buf->free_count);
102 return 1;
103 } else
104 return 0;
105}
106
107
108#endif