diff options
-rw-r--r-- | litmus/color_dev.c | 229 |
1 files changed, 125 insertions, 104 deletions
diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 51760328418e..677905a2a6b1 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c | |||
@@ -15,16 +15,49 @@ | |||
15 | 15 | ||
16 | extern unsigned long nr_colors; | 16 | extern unsigned long nr_colors; |
17 | 17 | ||
18 | /************************************************************ | ||
19 | * Common functions. | ||
20 | ************************************************************/ | ||
21 | |||
22 | static int mmap_common_checks(struct vm_area_struct *vma) | ||
23 | { | ||
24 | /* you can only map the "first" page */ | ||
25 | if (vma->vm_pgoff != 0) | ||
26 | return -EINVAL; | ||
27 | |||
28 | /* you can't share it with anyone */ | ||
29 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) { | ||
30 | TRACE_CUR("error: page had MAYSHARE or SHARED set\n"); | ||
31 | return -EINVAL; | ||
32 | } | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static void mmap_common_vma_flags(struct vm_area_struct *vma) | ||
38 | { | ||
39 | /* This mapping should not be kept across forks, | ||
40 | * cannot be expanded, and is not a "normal" page. */ | ||
41 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO; | ||
42 | |||
43 | /* We don't want the first write access to trigger a "minor" page fault | ||
44 | * to mark the page as dirty. This is transient, private memory, we | ||
45 | * don't care if it was touched or not. __S011 means RW access, but not | ||
46 | * execute, and avoids copy-on-write behavior. | ||
47 | * See protection_map in mmap.c. */ | ||
48 | vma->vm_page_prot = __S011; | ||
49 | } | ||
50 | |||
18 | /*********************************************************** | 51 | /*********************************************************** |
19 | * Control device | 52 | * Control device |
20 | ***********************************************************/ | 53 | ***********************************************************/ |
21 | 54 | ||
22 | static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma) | 55 | static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma) |
23 | { | 56 | { |
24 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, | 57 | TRACE_CUR("flags=0x%lx prot=0x%lx\n", |
25 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); | 58 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); |
26 | 59 | ||
27 | TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | 60 | TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n", |
28 | (void*) vma->vm_start, (void*) vma->vm_end, vma, | 61 | (void*) vma->vm_start, (void*) vma->vm_end, vma, |
29 | vma->vm_private_data); | 62 | vma->vm_private_data); |
30 | } | 63 | } |
@@ -35,11 +68,10 @@ static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma, | |||
35 | /* This function should never be called, since | 68 | /* This function should never be called, since |
36 | * all pages should have been mapped by mmap() | 69 | * all pages should have been mapped by mmap() |
37 | * already. */ | 70 | * already. */ |
38 | TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); | 71 | TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff); |
39 | printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, | 72 | |
40 | vma->vm_flags); | 73 | WARN(1, "Page fault in color ctrl page! flags=0x%lx\n", vma->vm_flags); |
41 | 74 | ||
42 | /* nope, you only get one page */ | ||
43 | return VM_FAULT_SIGBUS; | 75 | return VM_FAULT_SIGBUS; |
44 | } | 76 | } |
45 | 77 | ||
@@ -48,63 +80,41 @@ static struct vm_operations_struct litmus_color_ctrl_vm_ops = { | |||
48 | .fault = litmus_color_ctrl_vm_fault, | 80 | .fault = litmus_color_ctrl_vm_fault, |
49 | }; | 81 | }; |
50 | 82 | ||
51 | static int mmap_common_checks(struct vm_area_struct *vma) | ||
52 | { | ||
53 | /* you can only map the "first" page */ | ||
54 | if (vma->vm_pgoff != 0) | ||
55 | return -EINVAL; | ||
56 | |||
57 | #if 0 | ||
58 | /* you can't share it with anyone */ | ||
59 | /* well, maybe you can... */ | ||
60 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | ||
61 | return -EINVAL; | ||
62 | #endif | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static int alloc_color_ctrl_page(void) | 83 | static int alloc_color_ctrl_page(void) |
68 | { | 84 | { |
69 | struct task_struct *t; | 85 | struct task_struct *t = current; |
70 | int err = 0; | 86 | int err = 0; |
71 | 87 | ||
72 | t = current; | ||
73 | /* only allocate if the task doesn't have one yet */ | 88 | /* only allocate if the task doesn't have one yet */ |
74 | if (!tsk_rt(t)->color_ctrl_page) { | 89 | if (!tsk_rt(t)->color_ctrl_page) { |
75 | tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); | 90 | tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); |
76 | if (!tsk_rt(t)->color_ctrl_page) | 91 | if (!tsk_rt(t)->color_ctrl_page) { |
92 | TRACE_TASK(t, "could not allocate control page\n"); | ||
77 | err = -ENOMEM; | 93 | err = -ENOMEM; |
94 | goto out; | ||
95 | } | ||
78 | /* will get de-allocated in task teardown */ | 96 | /* will get de-allocated in task teardown */ |
79 | TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__, | 97 | TRACE_TASK(t, "color_ctrl_page = %p\n", |
80 | tsk_rt(t)->color_ctrl_page); | 98 | tsk_rt(t)->color_ctrl_page); |
81 | } | 99 | } |
100 | out: | ||
82 | return err; | 101 | return err; |
83 | } | 102 | } |
84 | 103 | ||
85 | static int map_color_ctrl_page(struct vm_area_struct *vma) | 104 | static int map_color_ctrl_page(struct vm_area_struct *vma) |
86 | { | 105 | { |
87 | int err; | 106 | int err = 0; |
88 | unsigned long pfn; | ||
89 | struct task_struct *t = current; | 107 | struct task_struct *t = current; |
90 | struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page); | 108 | struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page); |
91 | 109 | ||
92 | t = current; | 110 | TRACE_CUR("mapping %p (pfn:%lx) to 0x%lx (flags:%lx prot:%lx)\n", |
93 | /* Increase ref count. Is decreased when vma is destroyed. */ | 111 | tsk_rt(t)->color_ctrl_page, page_to_pfn(color_ctrl), |
94 | get_page(color_ctrl); | ||
95 | pfn = page_to_pfn(color_ctrl); | ||
96 | |||
97 | TRACE_CUR(CTRL_NAME | ||
98 | ": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n", | ||
99 | tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), | ||
100 | vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot)); | 112 | vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot)); |
101 | 113 | ||
102 | /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise | 114 | /* Map it into the vma. */ |
103 | * userspace actually gets a copy-on-write page. */ | 115 | err = vm_insert_page(vma, vma->vm_start, color_ctrl); |
104 | err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); | ||
105 | |||
106 | if (err) | 116 | if (err) |
107 | TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); | 117 | TRACE_CUR("vm_insert_page() failed (%d)\n", err); |
108 | 118 | ||
109 | return err; | 119 | return err; |
110 | } | 120 | } |
@@ -115,27 +125,25 @@ static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) | |||
115 | 125 | ||
116 | /* you can only get one page */ | 126 | /* you can only get one page */ |
117 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) { | 127 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) { |
118 | TRACE_CUR(CTRL_NAME ": must allocate a multiple of PAGE_SIZE\n"); | 128 | TRACE_CUR("must allocate a multiple of PAGE_SIZE\n"); |
119 | err = -EINVAL; | 129 | err = -EINVAL; |
120 | goto out; | 130 | goto out; |
121 | } | 131 | } |
122 | 132 | ||
123 | err = mmap_common_checks(vma); | 133 | err = mmap_common_checks(vma); |
124 | if (err) { | 134 | if (err) { |
125 | TRACE_CUR(CTRL_NAME ": failed common mmap checks.\n"); | 135 | TRACE_CUR("failed common mmap checks.\n"); |
126 | goto out; | 136 | goto out; |
127 | } | 137 | } |
128 | 138 | ||
129 | vma->vm_ops = &litmus_color_ctrl_vm_ops; | 139 | vma->vm_ops = &litmus_color_ctrl_vm_ops; |
130 | /* this mapping should not be kept across forks, | 140 | mmap_common_vma_flags(vma); |
131 | * and cannot be expanded */ | ||
132 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
133 | 141 | ||
134 | err = alloc_color_ctrl_page(); | 142 | err = alloc_color_ctrl_page(); |
135 | if (!err) | 143 | if (!err) |
136 | err = map_color_ctrl_page(vma); | 144 | err = map_color_ctrl_page(vma); |
137 | 145 | ||
138 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, | 146 | TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, |
139 | pgprot_val(vma->vm_page_prot)); | 147 | pgprot_val(vma->vm_page_prot)); |
140 | out: | 148 | out: |
141 | return err; | 149 | return err; |
@@ -149,6 +157,51 @@ out: | |||
149 | #define vma_nr_pages(vma) \ | 157 | #define vma_nr_pages(vma) \ |
150 | ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) | 158 | ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) |
151 | 159 | ||
160 | static int do_map_colored_page_set(struct vm_area_struct *vma, | ||
161 | const unsigned long starting_at, | ||
162 | const unsigned long color_no, | ||
163 | const int nr_pages) | ||
164 | { | ||
165 | int i, err = 0; | ||
166 | |||
167 | for (i = 0; i < nr_pages; ++i) { | ||
168 | const unsigned long addr = starting_at + (i << PAGE_SHIFT); | ||
169 | struct page *page = get_colored_page(color_no); | ||
170 | |||
171 | if (!page) { | ||
172 | TRACE_CUR("Could not get page with color %lu.\n", | ||
173 | color_no); | ||
174 | /* TODO unmap mapped pages? */ | ||
175 | err = -ENOMEM; | ||
176 | goto out; | ||
177 | } | ||
178 | |||
179 | #ifdef CONFIG_SPARC | ||
180 | clear_user_highpage(page, addr); | ||
181 | #endif | ||
182 | |||
183 | TRACE_CUR("insert page (pa:0x%10llx, pfn:%8lu, color:%3lu) " | ||
184 | "at 0x%lx vma:(flags:%16lx prot:%16lx)\n", | ||
185 | page_to_phys(page), | ||
186 | page_to_pfn(page), | ||
187 | color_no, | ||
188 | addr, | ||
189 | vma->vm_flags, | ||
190 | pgprot_val(vma->vm_page_prot)); | ||
191 | |||
192 | err = vm_insert_page(vma, addr, page); | ||
193 | if (err) { | ||
194 | TRACE_CUR("vm_insert_page() failed (%d)\n", err); | ||
195 | /* TODO unmap mapped pages? */ | ||
196 | err = -EINVAL; | ||
197 | goto out; | ||
198 | } | ||
199 | add_page_to_alloced_list(page, vma); | ||
200 | } | ||
201 | out: | ||
202 | return err; | ||
203 | } | ||
204 | |||
152 | static int do_map_colored_pages(struct vm_area_struct *vma) | 205 | static int do_map_colored_pages(struct vm_area_struct *vma) |
153 | { | 206 | { |
154 | const unsigned long nr_pages = vma_nr_pages(vma); | 207 | const unsigned long nr_pages = vma_nr_pages(vma); |
@@ -156,67 +209,36 @@ static int do_map_colored_pages(struct vm_area_struct *vma) | |||
156 | unsigned long nr_mapped; | 209 | unsigned long nr_mapped; |
157 | int i, err = 0; | 210 | int i, err = 0; |
158 | 211 | ||
159 | TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", | 212 | TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n", |
160 | nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); | 213 | nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); |
161 | 214 | ||
162 | #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE | 215 | #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE |
216 | /* Adds non-cached on x86 */ | ||
163 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 217 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
164 | #endif | 218 | #endif |
165 | 219 | ||
166 | for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) { | 220 | for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) { |
167 | const unsigned long color_no = color_ctrl->colors[i]; | 221 | const unsigned long color_no = color_ctrl->colors[i]; |
168 | unsigned int page_no = 0; | 222 | const unsigned long color_quantity = color_ctrl->pages[i]; |
223 | const unsigned long addr = vma->vm_start + | ||
224 | (nr_mapped << PAGE_SHIFT); | ||
169 | 225 | ||
170 | for (; page_no < color_ctrl->pages[i]; ++page_no, ++nr_mapped) { | 226 | if (!nr_pages) { |
171 | const unsigned long addr = vma->vm_start + | 227 | TRACE_CUR("0 pages given for color %lu\n", color_no); |
172 | (nr_mapped << PAGE_SHIFT); | 228 | err = -EINVAL; |
173 | struct page *page = get_colored_page(color_no); | 229 | goto out; |
174 | #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE | ||
175 | const pgprot_t ins_prot = pgprot_noncached(PAGE_SHARED); | ||
176 | #else | ||
177 | const pgprot_t ins_prot = PAGE_SHARED; | ||
178 | #endif | ||
179 | |||
180 | if (!page) { | ||
181 | TRACE_CUR(ALLOC_NAME ": Could not get page with" | ||
182 | " color %lu.\n", color_no); | ||
183 | /* TODO unmap mapped pages */ | ||
184 | err = -ENOMEM; | ||
185 | goto out; | ||
186 | } | ||
187 | |||
188 | #ifdef CONFIG_SPARC | ||
189 | clear_user_highpage(page, addr); | ||
190 | #endif | ||
191 | |||
192 | TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, " | ||
193 | "pfn:%8lu, color:%3lu, prot:%lx) at 0x%lx " | ||
194 | "vma:(flags:%16lx prot:%16lx)\n", | ||
195 | page_to_phys(page), | ||
196 | page_to_pfn(page), color_no, | ||
197 | pgprot_val(ins_prot), addr, | ||
198 | vma->vm_flags, | ||
199 | pgprot_val(vma->vm_page_prot)); | ||
200 | |||
201 | err = remap_pfn_range(vma, addr, page_to_pfn(page), | ||
202 | PAGE_SIZE, ins_prot); | ||
203 | if (err) { | ||
204 | TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail " | ||
205 | "(%d)\n", err); | ||
206 | /* TODO unmap mapped pages */ | ||
207 | err = -EINVAL; | ||
208 | goto out; | ||
209 | } | ||
210 | add_page_to_alloced_list(page, vma); | ||
211 | } | 230 | } |
212 | 231 | ||
213 | if (!page_no) { | 232 | err = do_map_colored_page_set(vma, addr, |
214 | TRACE_CUR(ALLOC_NAME ": 0 pages given for color %lu\n", | 233 | color_no, color_quantity); |
215 | color_no); | 234 | if (err) { |
235 | TRACE_CUR("Could not map colored page set.\n"); | ||
216 | err = -EINVAL; | 236 | err = -EINVAL; |
217 | goto out; | 237 | goto out; |
218 | } | 238 | } |
239 | nr_mapped += color_quantity; | ||
219 | } | 240 | } |
241 | TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped); | ||
220 | out: | 242 | out: |
221 | return err; | 243 | return err; |
222 | } | 244 | } |
@@ -244,10 +266,10 @@ out: | |||
244 | 266 | ||
245 | static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) | 267 | static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) |
246 | { | 268 | { |
247 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, | 269 | TRACE_CUR("flags=0x%lx prot=0x%lx\n", |
248 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); | 270 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); |
249 | 271 | ||
250 | TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | 272 | TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n", |
251 | (void*) vma->vm_start, (void*) vma->vm_end, vma, | 273 | (void*) vma->vm_start, (void*) vma->vm_end, vma, |
252 | vma->vm_private_data); | 274 | vma->vm_private_data); |
253 | reclaim_pages(vma); | 275 | reclaim_pages(vma); |
@@ -259,11 +281,10 @@ static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, | |||
259 | /* This function should never be called, since | 281 | /* This function should never be called, since |
260 | * all pages should have been mapped by mmap() | 282 | * all pages should have been mapped by mmap() |
261 | * already. */ | 283 | * already. */ |
262 | TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); | 284 | TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff); |
263 | printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, | 285 | |
264 | vma->vm_flags); | 286 | WARN(1, "Page fault in color ctrl page! flags=0x%lx\n", vma->vm_flags); |
265 | 287 | ||
266 | /* nope, you only get one page */ | ||
267 | return VM_FAULT_SIGBUS; | 288 | return VM_FAULT_SIGBUS; |
268 | } | 289 | } |
269 | 290 | ||
@@ -283,17 +304,17 @@ static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma | |||
283 | } | 304 | } |
284 | 305 | ||
285 | err = mmap_common_checks(vma); | 306 | err = mmap_common_checks(vma); |
286 | if (err) | 307 | if (err) { |
308 | TRACE_CUR("failed mmap common checks\n"); | ||
287 | goto out; | 309 | goto out; |
310 | } | ||
288 | 311 | ||
289 | vma->vm_ops = &litmus_color_alloc_vm_ops; | 312 | vma->vm_ops = &litmus_color_alloc_vm_ops; |
290 | /* this mapping should not be kept across forks, | 313 | mmap_common_vma_flags(vma); |
291 | * and cannot be expanded */ | ||
292 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
293 | 314 | ||
294 | err = map_colored_pages(vma); | 315 | err = map_colored_pages(vma); |
295 | 316 | ||
296 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, | 317 | TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, |
297 | pgprot_val(vma->vm_page_prot)); | 318 | pgprot_val(vma->vm_page_prot)); |
298 | out: | 319 | out: |
299 | return err; | 320 | return err; |