aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/fdso.c
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
commitb1e1fea67bca3796d5f9133a92c300ec4fa93a4f (patch)
tree5cc1336e1fe1d6f93b1067e73e43381dd20db690 /litmus/fdso.c
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Bjoern's Dissertation Code with Priority Donationwip-splitting-omlp-jerickso
Diffstat (limited to 'litmus/fdso.c')
-rw-r--r--litmus/fdso.c297
1 files changed, 297 insertions, 0 deletions
diff --git a/litmus/fdso.c b/litmus/fdso.c
new file mode 100644
index 000000000000..2c629598e3c9
--- /dev/null
+++ b/litmus/fdso.c
@@ -0,0 +1,297 @@
1/* fdso.c - file descriptor attached shared objects
2 *
3 * (c) 2007 B. Brandenburg, LITMUS^RT project
4 *
5 * Notes:
6 * - objects descriptor (OD) tables are not cloned during a fork.
7 * - objects are created on-demand, and freed after the last reference
8 * is dropped.
9 * - for now, object types are hard coded.
10 * - As long as we have live objects, we keep a reference to the inode.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/mutex.h>
16#include <linux/file.h>
17#include <asm/uaccess.h>
18
19#include <litmus/fdso.h>
20
21extern struct fdso_ops generic_lock_ops;
22
23static const struct fdso_ops* fdso_ops[] = {
24 &generic_lock_ops, /* FMLP_SEM */
25 &generic_lock_ops, /* SRP_SEM */
26 &generic_lock_ops, /* MPCP_SEM */
27 &generic_lock_ops, /* MPCP_VS_SEM */
28 &generic_lock_ops, /* DPCP_SEM */
29 &generic_lock_ops, /* OMLP_SEM */
30};
31
32static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
33{
34 if (fdso_ops[type]->create)
35 return fdso_ops[type]->create(obj_ref, type, config);
36 else
37 return -EINVAL;
38}
39
40static void fdso_destroy(obj_type_t type, void* obj)
41{
42 fdso_ops[type]->destroy(type, obj);
43}
44
45static int fdso_open(struct od_table_entry* entry, void* __user config)
46{
47 if (fdso_ops[entry->obj->type]->open)
48 return fdso_ops[entry->obj->type]->open(entry, config);
49 else
50 return 0;
51}
52
53static int fdso_close(struct od_table_entry* entry)
54{
55 if (fdso_ops[entry->obj->type]->close)
56 return fdso_ops[entry->obj->type]->close(entry);
57 else
58 return 0;
59}
60
61/* inode must be locked already */
62static int alloc_inode_obj(struct inode_obj_id** obj_ref,
63 struct inode* inode,
64 obj_type_t type,
65 unsigned int id,
66 void* __user config)
67{
68 struct inode_obj_id* obj;
69 void* raw_obj;
70 int err;
71
72 obj = kmalloc(sizeof(*obj), GFP_KERNEL);
73 if (!obj) {
74 return -ENOMEM;
75 }
76
77 err = fdso_create(&raw_obj, type, config);
78 if (err != 0) {
79 kfree(obj);
80 return err;
81 }
82
83 INIT_LIST_HEAD(&obj->list);
84 atomic_set(&obj->count, 1);
85 obj->type = type;
86 obj->id = id;
87 obj->obj = raw_obj;
88 obj->inode = inode;
89
90 list_add(&obj->list, &inode->i_obj_list);
91 atomic_inc(&inode->i_count);
92
93 printk(KERN_DEBUG "alloc_inode_obj(%p, %d, %d): object created\n", inode, type, id);
94
95 *obj_ref = obj;
96 return 0;
97}
98
99/* inode must be locked already */
100static struct inode_obj_id* get_inode_obj(struct inode* inode,
101 obj_type_t type,
102 unsigned int id)
103{
104 struct list_head* pos;
105 struct inode_obj_id* obj = NULL;
106
107 list_for_each(pos, &inode->i_obj_list) {
108 obj = list_entry(pos, struct inode_obj_id, list);
109 if (obj->id == id && obj->type == type) {
110 atomic_inc(&obj->count);
111 return obj;
112 }
113 }
114 printk(KERN_DEBUG "get_inode_obj(%p, %d, %d): couldn't find object\n", inode, type, id);
115 return NULL;
116}
117
118
119static void put_inode_obj(struct inode_obj_id* obj)
120{
121 struct inode* inode;
122 int let_go = 0;
123
124 inode = obj->inode;
125 if (atomic_dec_and_test(&obj->count)) {
126
127 mutex_lock(&inode->i_obj_mutex);
128 /* no new references can be obtained */
129 if (!atomic_read(&obj->count)) {
130 list_del(&obj->list);
131 fdso_destroy(obj->type, obj->obj);
132 kfree(obj);
133 let_go = 1;
134 }
135 mutex_unlock(&inode->i_obj_mutex);
136 if (let_go)
137 iput(inode);
138 }
139}
140
141static struct od_table_entry* get_od_entry(struct task_struct* t)
142{
143 struct od_table_entry* table;
144 int i;
145
146
147 table = t->od_table;
148 if (!table) {
149 table = kzalloc(sizeof(*table) * MAX_OBJECT_DESCRIPTORS,
150 GFP_KERNEL);
151 t->od_table = table;
152 }
153
154 for (i = 0; table && i < MAX_OBJECT_DESCRIPTORS; i++)
155 if (!table[i].used) {
156 table[i].used = 1;
157 return table + i;
158 }
159 return NULL;
160}
161
162static int put_od_entry(struct od_table_entry* od)
163{
164 put_inode_obj(od->obj);
165 od->used = 0;
166 return 0;
167}
168
169void exit_od_table(struct task_struct* t)
170{
171 int i;
172
173 if (t->od_table) {
174 for (i = 0; i < MAX_OBJECT_DESCRIPTORS; i++)
175 if (t->od_table[i].used)
176 put_od_entry(t->od_table + i);
177 kfree(t->od_table);
178 t->od_table = NULL;
179 }
180}
181
182static int do_sys_od_open(struct file* file, obj_type_t type, int id,
183 void* __user config)
184{
185 int idx = 0, err = 0;
186 struct inode* inode;
187 struct inode_obj_id* obj = NULL;
188 struct od_table_entry* entry;
189
190 inode = file->f_dentry->d_inode;
191
192 entry = get_od_entry(current);
193 if (!entry)
194 return -ENOMEM;
195
196 mutex_lock(&inode->i_obj_mutex);
197 obj = get_inode_obj(inode, type, id);
198 if (!obj)
199 err = alloc_inode_obj(&obj, inode, type, id, config);
200 if (err != 0) {
201 obj = NULL;
202 idx = err;
203 entry->used = 0;
204 } else {
205 entry->obj = obj;
206 entry->class = fdso_ops[type];
207 idx = entry - current->od_table;
208 }
209
210 mutex_unlock(&inode->i_obj_mutex);
211
212 /* open only if creation succeeded */
213 if (!err)
214 err = fdso_open(entry, config);
215 if (err < 0) {
216 /* The class rejected the open call.
217 * We need to clean up and tell user space.
218 */
219 if (obj)
220 put_od_entry(entry);
221 idx = err;
222 }
223
224 return idx;
225}
226
227
228struct od_table_entry* get_entry_for_od(int od)
229{
230 struct task_struct *t = current;
231
232 if (!t->od_table)
233 return NULL;
234 if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS)
235 return NULL;
236 if (!t->od_table[od].used)
237 return NULL;
238 return t->od_table + od;
239}
240
241
242asmlinkage long sys_od_open(int fd, int type, int obj_id, void* __user config)
243{
244 int ret = 0;
245 struct file* file;
246
247 /*
248 1) get file from fd, get inode from file
249 2) lock inode
250 3) try to lookup object
251 4) if not present create and enqueue object, inc inode refcnt
252 5) increment refcnt of object
253 6) alloc od_table_entry, setup ptrs
254 7) unlock inode
255 8) return offset in od_table as OD
256 */
257
258 if (type < MIN_OBJ_TYPE || type > MAX_OBJ_TYPE) {
259 ret = -EINVAL;
260 goto out;
261 }
262
263 file = fget(fd);
264 if (!file) {
265 ret = -EBADF;
266 goto out;
267 }
268
269 ret = do_sys_od_open(file, type, obj_id, config);
270
271 fput(file);
272
273out:
274 return ret;
275}
276
277
278asmlinkage long sys_od_close(int od)
279{
280 int ret = -EINVAL;
281 struct task_struct *t = current;
282
283 if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS)
284 return ret;
285
286 if (!t->od_table || !t->od_table[od].used)
287 return ret;
288
289
290 /* give the class a chance to reject the close
291 */
292 ret = fdso_close(t->od_table + od);
293 if (ret == 0)
294 ret = put_od_entry(t->od_table + od);
295
296 return ret;
297}