aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/color_dev.c
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-04-01 19:24:46 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-04-08 14:32:31 -0400
commitae95a4582d707de8a57a8159ea81b16ba7bddd54 (patch)
tree2d4af6e521e655e52cb51476ccf7c20d1d8cfe9c /litmus/color_dev.c
parentdb4d9fcd3dfbda54b351ef42c13d93a00009784f (diff)
Page reclaiming, control devices, cleanup.
Track allocated pages and add a proc handler to reclaim free pages and add control device for allocating colored memory with mmap.
Diffstat (limited to 'litmus/color_dev.c')
-rw-r--r--litmus/color_dev.c312
1 files changed, 312 insertions, 0 deletions
diff --git a/litmus/color_dev.c b/litmus/color_dev.c
new file mode 100644
index 000000000000..b8218b6d1d9c
--- /dev/null
+++ b/litmus/color_dev.c
@@ -0,0 +1,312 @@
1#include <linux/sched.h>
2#include <linux/mm.h>
3#include <linux/fs.h>
4#include <linux/miscdevice.h>
5#include <linux/spinlock.h>
6#include <linux/module.h>
7
8#include <litmus/litmus.h>
9#include <litmus/color.h>
10
11#define ALLOC_NAME "litmus/color_alloc"
12#define CTRL_NAME "litmus/color_ctrl"
13
14static struct non_rt_colors {
15 spinlock_t lock;
16 unsigned long color;
17} non_rt_colors;
18
19extern unsigned long nr_colors;
20
21/***********************************************************
22 * Control device
23***********************************************************/
24
25static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma)
26{
27 TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__,
28 vma->vm_flags, pgprot_val(vma->vm_page_prot));
29
30 TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
31 (void*) vma->vm_start, (void*) vma->vm_end, vma,
32 vma->vm_private_data);
33}
34
35static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma,
36 struct vm_fault *vmf)
37{
38 /* This function should never be called, since
39 * all pages should have been mapped by mmap()
40 * already. */
41 TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags);
42 printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__,
43 vma->vm_flags);
44
45 /* nope, you only get one page */
46 return VM_FAULT_SIGBUS;
47}
48
49static struct vm_operations_struct litmus_color_ctrl_vm_ops = {
50 .close = litmus_color_ctrl_vm_close,
51 .fault = litmus_color_ctrl_vm_fault,
52};
53
54static int mmap_common_checks(struct vm_area_struct *vma)
55{
56 /* you can only map the "first" page */
57 if (vma->vm_pgoff != 0)
58 return -EINVAL;
59
60 /* you can't share it with anyone */
61 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
62 return -EINVAL;
63
64 return 0;
65}
66
67static int alloc_color_ctrl_page(void)
68{
69 struct task_struct *t;
70 int err = 0;
71
72 t = current;
73 /* only allocate if the task doesn't have one yet */
74 if (!tsk_rt(t)->color_ctrl_page) {
75 tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL);
76 if (!tsk_rt(t)->color_ctrl_page)
77 err = -ENOMEM;
78 /* will get de-allocated in task teardown */
79 TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__,
80 tsk_rt(t)->color_ctrl_page);
81 }
82 return err;
83}
84
85static int map_color_ctrl_page(struct vm_area_struct *vma)
86{
87 int err;
88 unsigned long pfn;
89 struct task_struct *t = current;
90 struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page);
91
92 t = current;
93 /* Increase ref count. Is decreased when vma is destroyed. */
94 get_page(color_ctrl);
95 pfn = page_to_pfn(color_ctrl);
96
97 TRACE_CUR(CTRL_NAME
98 ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n",
99 tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl),
100 vma->vm_start, vma->vm_page_prot);
101
102 /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise
103 * userspace actually gets a copy-on-write page. */
104 err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED);
105 TRACE_CUR("page shared: guess:0x1(63)...1??111 actual:0x%lx\n", PAGE_SHARED);
106 /* present, RW, user, accessed, NX=63 */
107
108 if (err)
109 TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err);
110
111 return err;
112}
113
114static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma)
115{
116 int err = 0;
117
118 /* you can only get one page */
119 if (vma->vm_end - vma->vm_start != PAGE_SIZE) {
120 err = -EINVAL;
121 goto out;
122 }
123
124 err = mmap_common_checks(vma);
125 if (err)
126 goto out;
127
128 vma->vm_ops = &litmus_color_ctrl_vm_ops;
129 /* this mapping should not be kept across forks,
130 * and cannot be expanded */
131 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
132
133 err = alloc_color_ctrl_page();
134 if (!err)
135 err = map_color_ctrl_page(vma);
136
137 TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags,
138 pgprot_val(vma->vm_page_prot));
139out:
140 return err;
141}
142
143
144/***********************************************************
145 * Allocation device
146***********************************************************/
147
148static int map_colored_pages_non_rt(struct vm_area_struct *vma)
149{
150 unsigned long color, mapped;
151 int err;
152 const unsigned long nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
153
154 spin_lock(&non_rt_colors.lock);
155 color = non_rt_colors.color;
156 non_rt_colors.color = (non_rt_colors.color + nr_pages) % nr_colors;
157 spin_unlock(&non_rt_colors.lock);
158
159 TRACE_CUR(ALLOC_NAME ": allocating %lu pages from color %lu.\n",
160 nr_pages, color);
161
162 for (mapped = 0; mapped < nr_pages;
163 mapped++, color = (color + 1) % nr_colors)
164 {
165 struct page *page = get_colored_page(color);
166 const unsigned long addr = vma->vm_start + PAGE_SIZE * mapped;
167
168 if (!page) {
169 TRACE_CUR(ALLOC_NAME ": Could not get page with "
170 " color %lu.\n", color);
171 /* TODO unmap mapped pages */
172 break;
173 }
174 TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%llx, pfn:%lu, "
175 "color:%lu count:%d LRU:%d) at 0x%lx "
176 "(prot: 0x%lx)\n",
177 page_to_phys(page), page_to_pfn(page), color,
178 page_count(page), PageLRU(page), addr,
179 pgprot_val(vma->vm_page_prot));
180 err = vm_insert_page(vma, addr, page);
181 if (err) {
182 TRACE_CUR(ALLOC_NAME ": vm_insert_page() failed "
183 "(%d)\n", err);
184 /* TODO unmap mapped pages */
185 break;
186 }
187 add_page_to_alloced_list(page);
188 }
189 return err;
190}
191
192static int map_colored_pages_rt(struct vm_area_struct *vma)
193{
194 /* TODO */
195 return -EINVAL;
196}
197
198static int map_colored_pages(struct vm_area_struct *vma)
199{
200 if (likely(is_realtime(current)))
201 return map_colored_pages_rt(vma);
202 return map_colored_pages_non_rt(vma);
203}
204
205static void litmus_color_alloc_vm_close(struct vm_area_struct *vma)
206{
207 TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__,
208 vma->vm_flags, pgprot_val(vma->vm_page_prot));
209
210 TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
211 (void*) vma->vm_start, (void*) vma->vm_end, vma,
212 vma->vm_private_data);
213}
214
215static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma,
216 struct vm_fault *vmf)
217{
218 /* This function should never be called, since
219 * all pages should have been mapped by mmap()
220 * already. */
221 TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags);
222 printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__,
223 vma->vm_flags);
224
225 /* nope, you only get one page */
226 return VM_FAULT_SIGBUS;
227}
228
229static struct vm_operations_struct litmus_color_alloc_vm_ops = {
230 .close = litmus_color_alloc_vm_close,
231 .fault = litmus_color_alloc_vm_fault,
232};
233
234static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma)
235{
236 int err = 0;
237
238 /* you may only request integer multiple of PAGE_SIZE */
239 if (offset_in_page(vma->vm_end - vma->vm_start)) {
240 err = -EINVAL;
241 goto out;
242 }
243
244 err = mmap_common_checks(vma);
245 if (err)
246 goto out;
247
248 vma->vm_ops = &litmus_color_alloc_vm_ops;
249 /* this mapping should not be kept across forks,
250 * and cannot be expanded */
251 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
252
253 err = map_colored_pages(vma);
254
255 TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags,
256 pgprot_val(vma->vm_page_prot));
257out:
258 return err;
259}
260
261/***********************************************************
262 * Initilization
263***********************************************************/
264
265static struct file_operations litmus_color_ctrl_fops = {
266 .owner = THIS_MODULE,
267 .mmap = litmus_color_ctrl_mmap,
268};
269
270static struct miscdevice litmus_color_ctrl_dev = {
271 .name = CTRL_NAME,
272 .minor = MISC_DYNAMIC_MINOR,
273 .fops = &litmus_color_ctrl_fops,
274};
275
276static struct file_operations litmus_color_alloc_fops = {
277 .owner = THIS_MODULE,
278 .mmap = litmus_color_alloc_mmap,
279};
280
281static struct miscdevice litmus_color_alloc_dev = {
282 .name = ALLOC_NAME,
283 .minor = MISC_DYNAMIC_MINOR,
284 .fops = &litmus_color_alloc_fops,
285};
286
287static int __init init_dev(const char* name, struct miscdevice *dev)
288{
289 int err;
290 err = misc_register(dev);
291 if (err)
292 printk(KERN_WARNING "Could not allocate %s device (%d).\n",
293 name, err);
294 return err;
295}
296
297static int __init init_color_devices(void)
298{
299 int err;
300 spin_lock_init(&non_rt_colors.lock);
301 non_rt_colors.color = 0;
302
303 printk("Allocating LITMUS^RT color devices.\n");
304 err = init_dev(ALLOC_NAME, &litmus_color_alloc_dev);
305 if (err)
306 goto out;
307 err = init_dev(CTRL_NAME, &litmus_color_ctrl_dev);
308out:
309 return err;
310}
311
312module_init(init_color_devices);