aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/feather_buffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/feather_buffer.h')
-rw-r--r--include/litmus/feather_buffer.h118
1 files changed, 118 insertions, 0 deletions
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h
new file mode 100644
index 000000000000..38de95b73553
--- /dev/null
+++ b/include/litmus/feather_buffer.h
@@ -0,0 +1,118 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6#define SLOT_FREE 0
7#define SLOT_BUSY 1
8#define SLOT_READY 2
9
10struct ft_buffer {
11 unsigned int slot_count;
12 unsigned int slot_size;
13
14 int free_count;
15 unsigned int write_idx;
16 unsigned int read_idx;
17
18 char* slots;
19 void* buffer_mem;
20 unsigned int failed_writes;
21};
22
23static inline int init_ft_buffer(struct ft_buffer* buf,
24 unsigned int slot_count,
25 unsigned int slot_size,
26 char* slots,
27 void* buffer_mem)
28{
29 int i = 0;
30 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
31 /* The slot count must divide UNIT_MAX + 1 so that when it
32 * wraps around the index correctly points to 0.
33 */
34 return 0;
35 } else {
36 buf->slot_count = slot_count;
37 buf->slot_size = slot_size;
38 buf->slots = slots;
39 buf->buffer_mem = buffer_mem;
40 buf->free_count = slot_count;
41 buf->write_idx = 0;
42 buf->read_idx = 0;
43 buf->failed_writes = 0;
44 for (i = 0; i < slot_count; i++)
45 buf->slots[i] = SLOT_FREE;
46 return 1;
47 }
48}
49
50static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
51{
52 int free = fetch_and_dec(&buf->free_count);
53 unsigned int idx;
54 if (free <= 0) {
55 fetch_and_inc(&buf->free_count);
56 *ptr = 0;
57 fetch_and_inc(&buf->failed_writes);
58 return 0;
59 } else {
60 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
61 buf->slots[idx] = SLOT_BUSY;
62 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
63 return 1;
64 }
65}
66
67/* For single writer scenarios, with fewer atomic ops. */
68static inline int ft_buffer_start_single_write(struct ft_buffer* buf, void **ptr)
69{
70 unsigned int idx;
71
72 if (buf->free_count <= 0) {
73 *ptr = 0;
74 /* single writer: no atomicity needed */
75 buf->failed_writes++;
76 return 0;
77 } else {
78 /* free_count is positive, and can only increase since we are
79 * (by assumption) the only writer accessing the buffer.
80 */
81
82 idx = buf->write_idx++ % buf->slot_count;
83 buf->slots[idx] = SLOT_BUSY;
84 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
85
86 ft_atomic_dec(&buf->free_count);
87 return 1;
88 }
89}
90
91static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
92{
93 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
94 buf->slots[idx] = SLOT_READY;
95}
96
97
98/* exclusive reader access is assumed */
99static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
100{
101 unsigned int idx;
102 if (buf->free_count == buf->slot_count)
103 /* nothing available */
104 return 0;
105 idx = buf->read_idx % buf->slot_count;
106 if (buf->slots[idx] == SLOT_READY) {
107 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
108 buf->slot_size);
109 buf->slots[idx] = SLOT_FREE;
110 buf->read_idx++;
111 fetch_and_inc(&buf->free_count);
112 return 1;
113 } else
114 return 0;
115}
116
117
118#endif