1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
|
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <asm/io.h> /* page_to_phys on SPARC */
#include <litmus/litmus.h>
#include <litmus/color.h>
#define ALLOC_NAME "litmus/color_alloc"
#define CTRL_NAME "litmus/color_ctrl"
extern unsigned long nr_colors;
/***********************************************************
* Control device
***********************************************************/
static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma)
{
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__,
vma->vm_flags, pgprot_val(vma->vm_page_prot));
TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
(void*) vma->vm_start, (void*) vma->vm_end, vma,
vma->vm_private_data);
}
static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
/* This function should never be called, since
* all pages should have been mapped by mmap()
* already. */
TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags);
printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__,
vma->vm_flags);
/* nope, you only get one page */
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct litmus_color_ctrl_vm_ops = {
.close = litmus_color_ctrl_vm_close,
.fault = litmus_color_ctrl_vm_fault,
};
static int mmap_common_checks(struct vm_area_struct *vma)
{
/* you can only map the "first" page */
if (vma->vm_pgoff != 0)
return -EINVAL;
#if 0
/* you can't share it with anyone */
/* well, maybe you can... */
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
return -EINVAL;
#endif
return 0;
}
static int alloc_color_ctrl_page(void)
{
struct task_struct *t;
int err = 0;
t = current;
/* only allocate if the task doesn't have one yet */
if (!tsk_rt(t)->color_ctrl_page) {
tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL);
if (!tsk_rt(t)->color_ctrl_page)
err = -ENOMEM;
/* will get de-allocated in task teardown */
TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__,
tsk_rt(t)->color_ctrl_page);
}
return err;
}
static int map_color_ctrl_page(struct vm_area_struct *vma)
{
int err;
unsigned long pfn;
struct task_struct *t = current;
struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page);
t = current;
/* Increase ref count. Is decreased when vma is destroyed. */
get_page(color_ctrl);
pfn = page_to_pfn(color_ctrl);
TRACE_CUR(CTRL_NAME
": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n",
tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl),
vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot));
/* Map it into the vma. Make sure to use PAGE_SHARED, otherwise
* userspace actually gets a copy-on-write page. */
err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED);
if (err)
TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err);
return err;
}
static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma)
{
int err = 0;
/* you can only get one page */
if (vma->vm_end - vma->vm_start != PAGE_SIZE) {
TRACE_CUR(CTRL_NAME ": must allocate a multiple of PAGE_SIZE\n");
err = -EINVAL;
goto out;
}
err = mmap_common_checks(vma);
if (err) {
TRACE_CUR(CTRL_NAME ": failed common mmap checks.\n");
goto out;
}
vma->vm_ops = &litmus_color_ctrl_vm_ops;
/* this mapping should not be kept across forks,
* and cannot be expanded */
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
err = alloc_color_ctrl_page();
if (!err)
err = map_color_ctrl_page(vma);
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags,
pgprot_val(vma->vm_page_prot));
out:
return err;
}
/***********************************************************
* Allocation device
***********************************************************/
#define vma_nr_pages(vma) \
({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
static int do_map_colored_pages(struct vm_area_struct *vma)
{
const unsigned long nr_pages = vma_nr_pages(vma);
struct color_ctrl_page *color_ctrl = tsk_rt(current)->color_ctrl_page;
unsigned long nr_mapped;
int i, err = 0;
TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n",
nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) {
const unsigned long color_no = color_ctrl->colors[i];
unsigned int page_no = 0;
for (; page_no < color_ctrl->pages[i]; ++page_no, ++nr_mapped) {
const unsigned long addr = vma->vm_start +
(nr_mapped << PAGE_SHIFT);
struct page *page = get_colored_page(color_no);
if (!page) {
TRACE_CUR(ALLOC_NAME ": Could not get page with"
" color %lu.\n", color_no);
/* TODO unmap mapped pages */
err = -ENOMEM;
goto out;
}
#ifdef CONFIG_SPARC
clear_user_highpage(page, addr);
#endif
TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, "
"pfn:%8lu, color:%3lu) at 0x%lx (flags:%16lx "
"prot:%16lx) PAGE_SHARED:0x%16lx\n",
page_to_phys(page),
page_to_pfn(page), color_no, addr,
vma->vm_flags, pgprot_val(vma->vm_page_prot),
PAGE_SHARED);
err = remap_pfn_range(vma, addr, page_to_pfn(page),
PAGE_SIZE, PAGE_SHARED);
if (err) {
TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail "
"(%d) (flags:%lx prot:%lx)\n", err,
vma->vm_flags,
pgprot_val(vma->vm_page_prot));
/* TODO unmap mapped pages */
err = -EINVAL;
goto out;
}
add_page_to_alloced_list(page, vma);
}
if (!page_no) {
TRACE_CUR(ALLOC_NAME ": 0 pages given for color %lu\n",
color_no);
err = -EINVAL;
goto out;
}
}
out:
return err;
}
static int map_colored_pages(struct vm_area_struct *vma)
{
int err = 0;
if (!tsk_rt(current)->color_ctrl_page) {
TRACE_CUR("Process has no color control page.\n");
err = -EINVAL;
goto out;
}
if (COLORS_PER_CONTROL_PAGE < vma_nr_pages(vma)) {
TRACE_CUR("Max page request %lu but want %lu.\n",
COLORS_PER_CONTROL_PAGE, vma_nr_pages(vma));
err = -EINVAL;
goto out;
}
err = do_map_colored_pages(vma);
out:
return err;
}
static void litmus_color_alloc_vm_close(struct vm_area_struct *vma)
{
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__,
vma->vm_flags, pgprot_val(vma->vm_page_prot));
TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
(void*) vma->vm_start, (void*) vma->vm_end, vma,
vma->vm_private_data);
reclaim_pages(vma);
}
static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
/* This function should never be called, since
* all pages should have been mapped by mmap()
* already. */
TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags);
printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__,
vma->vm_flags);
/* nope, you only get one page */
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct litmus_color_alloc_vm_ops = {
.close = litmus_color_alloc_vm_close,
.fault = litmus_color_alloc_vm_fault,
};
static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma)
{
int err = 0;
/* you may only request integer multiple of PAGE_SIZE */
if (offset_in_page(vma->vm_end - vma->vm_start)) {
err = -EINVAL;
goto out;
}
err = mmap_common_checks(vma);
if (err)
goto out;
vma->vm_ops = &litmus_color_alloc_vm_ops;
/* this mapping should not be kept across forks,
* and cannot be expanded */
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
err = map_colored_pages(vma);
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags,
pgprot_val(vma->vm_page_prot));
out:
return err;
}
/***********************************************************
* Initilization
***********************************************************/
static struct file_operations litmus_color_ctrl_fops = {
.owner = THIS_MODULE,
.mmap = litmus_color_ctrl_mmap,
};
static struct miscdevice litmus_color_ctrl_dev = {
.name = CTRL_NAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &litmus_color_ctrl_fops,
};
static struct file_operations litmus_color_alloc_fops = {
.owner = THIS_MODULE,
.mmap = litmus_color_alloc_mmap,
};
static struct miscdevice litmus_color_alloc_dev = {
.name = ALLOC_NAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &litmus_color_alloc_fops,
};
static int __init init_dev(const char* name, struct miscdevice *dev)
{
int err;
err = misc_register(dev);
if (err)
printk(KERN_WARNING "Could not allocate %s device (%d).\n",
name, err);
return err;
}
static int __init init_color_devices(void)
{
int err;
printk("Allocating LITMUS^RT color devices.\n");
err = init_dev(ALLOC_NAME, &litmus_color_alloc_dev);
if (err)
goto out;
err = init_dev(CTRL_NAME, &litmus_color_ctrl_dev);
out:
return err;
}
module_init(init_color_devices);
|