aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/locking.c
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2012-08-30 21:01:47 -0400
commitb1e1fea67bca3796d5f9133a92c300ec4fa93a4f (patch)
tree5cc1336e1fe1d6f93b1067e73e43381dd20db690 /litmus/locking.c
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Bjoern's Dissertation Code with Priority Donationwip-splitting-omlp-jerickso
Diffstat (limited to 'litmus/locking.c')
-rw-r--r--litmus/locking.c186
1 files changed, 186 insertions, 0 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
new file mode 100644
index 000000000000..84a1d8309699
--- /dev/null
+++ b/litmus/locking.c
@@ -0,0 +1,186 @@
1#include <linux/sched.h>
2#include <litmus/litmus.h>
3#include <litmus/fdso.h>
4
5#ifdef CONFIG_LITMUS_LOCKING
6
7#include <litmus/sched_plugin.h>
8#include <litmus/trace.h>
9#include <litmus/wait.h>
10
11static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
12static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
13static int close_generic_lock(struct od_table_entry* entry);
14static void destroy_generic_lock(obj_type_t type, void* sem);
15
16struct fdso_ops generic_lock_ops = {
17 .create = create_generic_lock,
18 .open = open_generic_lock,
19 .close = close_generic_lock,
20 .destroy = destroy_generic_lock
21};
22
23static inline bool is_lock(struct od_table_entry* entry)
24{
25 return entry->class == &generic_lock_ops;
26}
27
28static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
29{
30 BUG_ON(!is_lock(entry));
31 return (struct litmus_lock*) entry->obj->obj;
32}
33
34static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
35{
36 struct litmus_lock* lock;
37 int err;
38
39 err = litmus->allocate_lock(&lock, type, arg);
40 if (err == 0)
41 *obj_ref = lock;
42 return err;
43}
44
45static int open_generic_lock(struct od_table_entry* entry, void* __user arg)
46{
47 struct litmus_lock* lock = get_lock(entry);
48 if (lock->ops->open)
49 return lock->ops->open(lock, arg);
50 else
51 return 0; /* default: any task can open it */
52}
53
54static int close_generic_lock(struct od_table_entry* entry)
55{
56 struct litmus_lock* lock = get_lock(entry);
57 if (lock->ops->close)
58 return lock->ops->close(lock);
59 else
60 return 0; /* default: closing succeeds */
61}
62
63static void destroy_generic_lock(obj_type_t type, void* obj)
64{
65 struct litmus_lock* lock = (struct litmus_lock*) obj;
66 lock->ops->deallocate(lock);
67}
68
69asmlinkage long sys_litmus_lock(int lock_od)
70{
71 long err = -EINVAL;
72 struct od_table_entry* entry;
73 struct litmus_lock* l;
74
75 TS_SYSCALL_IN_START;
76
77 TS_SYSCALL_IN_END;
78
79 TS_LOCK_START;
80
81 entry = get_entry_for_od(lock_od);
82 if (entry && is_lock(entry)) {
83 l = get_lock(entry);
84 TRACE_CUR("attempts to lock 0x%p\n", l);
85 err = l->ops->lock(l);
86 }
87
88 /* Note: task my have been suspended or preempted in between! Take
89 * this into account when computing overheads. */
90 TS_LOCK_END;
91
92 TS_SYSCALL_OUT_START;
93
94 return err;
95}
96
97asmlinkage long sys_litmus_unlock(int lock_od)
98{
99 long err = -EINVAL;
100 struct od_table_entry* entry;
101 struct litmus_lock* l;
102
103 TS_SYSCALL_IN_START;
104
105 TS_SYSCALL_IN_END;
106
107 TS_UNLOCK_START;
108
109 entry = get_entry_for_od(lock_od);
110 if (entry && is_lock(entry)) {
111 l = get_lock(entry);
112 TRACE_CUR("attempts to unlock 0x%p\n", l);
113 err = l->ops->unlock(l);
114 }
115
116 /* Note: task my have been preempted in between! Take this into
117 * account when computing overheads. */
118 TS_UNLOCK_END;
119
120 TS_SYSCALL_OUT_START;
121
122 return err;
123}
124
125struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
126{
127 wait_queue_t* q;
128 struct task_struct* t = NULL;
129
130 if (waitqueue_active(wq)) {
131 q = list_entry(wq->task_list.next,
132 wait_queue_t, task_list);
133 t = (struct task_struct*) q->private;
134 __remove_wait_queue(wq, q);
135 }
136 return(t);
137}
138
139unsigned int __add_wait_queue_prio_exclusive(
140 wait_queue_head_t* head,
141 prio_wait_queue_t *new)
142{
143 struct list_head *pos;
144 unsigned int passed = 0;
145
146 new->wq.flags |= WQ_FLAG_EXCLUSIVE;
147
148 /* find a spot where the new entry is less than the next */
149 list_for_each(pos, &head->task_list) {
150 prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t,
151 wq.task_list);
152
153 if (unlikely(lt_before(new->priority, queued->priority) ||
154 (new->priority == queued->priority &&
155 new->tie_breaker < queued->tie_breaker))) {
156 /* pos is not less than new, thus insert here */
157 __list_add(&new->wq.task_list, pos->prev, pos);
158 goto out;
159 }
160 passed++;
161 }
162
163 /* if we get to this point either the list is empty or every entry
164 * queued element is less than new.
165 * Let's add new to the end. */
166 list_add_tail(&new->wq.task_list, &head->task_list);
167out:
168 return passed;
169}
170
171
172#else
173
174struct fdso_ops generic_lock_ops = {};
175
176asmlinkage long sys_litmus_lock(int sem_od)
177{
178 return -ENOSYS;
179}
180
181asmlinkage long sys_litmus_unlock(int sem_od)
182{
183 return -ENOSYS;
184}
185
186#endif