diff options
author | Christopher Kenna <cjk@cs.unc.edu> | 2012-10-07 03:06:22 -0400 |
---|---|---|
committer | Christopher Kenna <cjk@cs.unc.edu> | 2012-10-07 16:33:31 -0400 |
commit | fae703727534228a8fe508880b844af629781ea1 (patch) | |
tree | cba3fbd4e65fb8901ade99b8bb6ddeab2fc0945d | |
parent | b7d3fdb9450b667a57a34d67764a2523af921e40 (diff) |
Architecture dependent uncachable control page.
While working on an ODROID-X (Samsung Exynos4412 ARM Cortex-A9), I
experienced non-determinism when reading and writing values to the
LITMUS^RT control page. Writes to the control page from user- (kernel-)
space where not always reflected in kernel- (user-) space.
Neither restricting the task to run on a single CPU nor inserting
general memory barriers (with mb()) fixed the problem. Mapping the
control page as uncachable in both kernel and user space did fix the
problem, which is what this patch does.
Also, since vunmap() cannot be called from an interrupt context, I had
to add a workqueue that unmaps and frees the control page when it is no
longer needed. (On my system, exit_litmus() was called from interrupt
context while the kernel reaped the task_struct.)
Conflicts:
include/litmus/rt_param.h
litmus/litmus.c
Does not make the color control page uncachable yet!
Signed-off-by: Christopher Kenna <cjk@cs.unc.edu>
-rw-r--r-- | arch/arm/Kconfig | 3 | ||||
-rw-r--r-- | include/litmus/litmus.h | 2 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 3 | ||||
-rw-r--r-- | litmus/ctrldev.c | 150 | ||||
-rw-r--r-- | litmus/litmus.c | 37 |
5 files changed, 178 insertions, 17 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fb228eaaab2b..662cb7d9e093 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -2047,4 +2047,7 @@ config ARCH_HAS_SEND_PULL_TIMERS | |||
2047 | config ARCH_HAS_FEATHER_TRACE | 2047 | config ARCH_HAS_FEATHER_TRACE |
2048 | def_bool n | 2048 | def_bool n |
2049 | 2049 | ||
2050 | config ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
2051 | def_bool y | ||
2052 | |||
2050 | source "litmus/Kconfig" | 2053 | source "litmus/Kconfig" |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 2cd68ba3b752..93961d08efd5 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -12,6 +12,8 @@ | |||
12 | extern atomic_t release_master_cpu; | 12 | extern atomic_t release_master_cpu; |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | void litmus_schedule_deallocation(struct task_struct *t); | ||
16 | |||
15 | /* in_list - is a given list_head queued on some list? | 17 | /* in_list - is a given list_head queued on some list? |
16 | */ | 18 | */ |
17 | static inline int in_list(struct list_head* list) | 19 | static inline int in_list(struct list_head* list) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 2eaa0c532414..28a4d538a2ac 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -264,6 +264,9 @@ struct rt_param { | |||
264 | 264 | ||
265 | /* Pointer to the page shared between userspace and kernel. */ | 265 | /* Pointer to the page shared between userspace and kernel. */ |
266 | struct control_page * ctrl_page; | 266 | struct control_page * ctrl_page; |
267 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
268 | void *ctrl_page_orig; | ||
269 | #endif | ||
267 | 270 | ||
268 | lt_t total_tardy; | 271 | lt_t total_tardy; |
269 | lt_t max_tardy; | 272 | lt_t max_tardy; |
diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c index 9969ab17c190..07c3a680ca6b 100644 --- a/litmus/ctrldev.c +++ b/litmus/ctrldev.c | |||
@@ -3,6 +3,10 @@ | |||
3 | #include <linux/fs.h> | 3 | #include <linux/fs.h> |
4 | #include <linux/miscdevice.h> | 4 | #include <linux/miscdevice.h> |
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/slab.h> | ||
7 | #include <linux/vmalloc.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/workqueue.h> | ||
6 | 10 | ||
7 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
8 | 12 | ||
@@ -10,32 +14,157 @@ | |||
10 | 14 | ||
11 | #define CTRL_NAME "litmus/ctrl" | 15 | #define CTRL_NAME "litmus/ctrl" |
12 | 16 | ||
17 | static struct workqueue_struct *wq_litmus_dealloc; | ||
18 | |||
19 | struct litmus_dealloc_work { | ||
20 | struct work_struct work_struct; | ||
21 | void *ctrl_page_mem; | ||
22 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
23 | void *ctrl_page_unmap; | ||
24 | #endif | ||
25 | }; | ||
26 | |||
27 | static void litmus_dealloc(struct work_struct *work_in) | ||
28 | { | ||
29 | struct litmus_dealloc_work *work = container_of(work_in, | ||
30 | struct litmus_dealloc_work, work_struct); | ||
31 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
32 | TRACE("vunmap() control page %p.\n", work->ctrl_page_unmap); | ||
33 | vunmap(work->ctrl_page_unmap); | ||
34 | #endif | ||
35 | TRACE("freeing ctrl_page %p\n", work->ctrl_page_mem); | ||
36 | free_page((unsigned long) work->ctrl_page_mem); | ||
37 | |||
38 | kfree((void*) work); | ||
39 | } | ||
40 | |||
41 | void litmus_schedule_deallocation(struct task_struct *t) | ||
42 | { | ||
43 | struct litmus_dealloc_work *work; | ||
44 | |||
45 | if (NULL == tsk_rt(t)->ctrl_page) | ||
46 | return; | ||
47 | |||
48 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
49 | if (!work) { | ||
50 | WARN(1, "Could not allocate LITMUS deallocation work.\n"); | ||
51 | return; | ||
52 | } | ||
53 | |||
54 | INIT_WORK(&work->work_struct, litmus_dealloc); | ||
55 | |||
56 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
57 | work->ctrl_page_mem = tsk_rt(t)->ctrl_page_orig; | ||
58 | work->ctrl_page_unmap = tsk_rt(t)->ctrl_page; | ||
59 | #else | ||
60 | work->ctrl_page_mem = tsk_rt(t)->ctrl_page; | ||
61 | #endif | ||
62 | queue_work(wq_litmus_dealloc, &work->work_struct); | ||
63 | } | ||
64 | |||
65 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
66 | /* | ||
67 | * remap_noncached - creates a non-cached memory "shadow mapping" | ||
68 | * @addr: memory base virtual address | ||
69 | * @len: length to remap | ||
70 | * | ||
71 | * The caller should vunmap(addr) when the mapping is no longer needed. | ||
72 | * The caller should also save the original @addr to free it later. | ||
73 | */ | ||
74 | static void * remap_noncached(void *addr, size_t len) | ||
75 | { | ||
76 | void *vaddr; | ||
77 | int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE); | ||
78 | struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL); | ||
79 | void *page_addr = (void *)((unsigned long)addr & PAGE_MASK); | ||
80 | int i; | ||
81 | |||
82 | if (NULL == pages) { | ||
83 | TRACE_CUR("No memory!\n"); | ||
84 | return ERR_PTR(-ENOMEM); | ||
85 | } | ||
86 | |||
87 | for (i = 0; i < nr_pages; i++) { | ||
88 | if (is_vmalloc_or_module_addr(page_addr)) { | ||
89 | kfree(pages); | ||
90 | TRACE_CUR("Remapping vmalloc or module memory?\n"); | ||
91 | return ERR_PTR(-EINVAL); | ||
92 | } | ||
93 | |||
94 | pages[i] = virt_to_page(page_addr); | ||
95 | if (NULL == pages[i]) { | ||
96 | kfree(pages); | ||
97 | TRACE_CUR("Bad virtual address.\n"); | ||
98 | return ERR_PTR(-EINVAL); | ||
99 | } | ||
100 | page_addr += PAGE_SIZE; | ||
101 | } | ||
102 | |||
103 | vaddr = vmap(pages, nr_pages, VM_MAP, pgprot_noncached(PAGE_KERNEL)); | ||
104 | kfree(pages); | ||
105 | if (NULL == vaddr) { | ||
106 | TRACE_CUR("vmap() failed.\n"); | ||
107 | return ERR_PTR(-ENOMEM); | ||
108 | } | ||
109 | return vaddr + offset_in_page(addr); | ||
110 | } | ||
111 | #endif | ||
112 | |||
13 | /* allocate t->rt_param.ctrl_page*/ | 113 | /* allocate t->rt_param.ctrl_page*/ |
14 | static int alloc_ctrl_page(struct task_struct *t) | 114 | static int alloc_ctrl_page(struct task_struct *t) |
15 | { | 115 | { |
116 | void *mem; | ||
117 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
118 | void *mem_remap; | ||
119 | #endif | ||
16 | int err = 0; | 120 | int err = 0; |
17 | 121 | ||
18 | /* only allocate if the task doesn't have one yet */ | 122 | /* only allocate if the task doesn't have one yet */ |
19 | if (!tsk_rt(t)->ctrl_page) { | 123 | if (!tsk_rt(t)->ctrl_page) { |
20 | tsk_rt(t)->ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); | 124 | mem = (void*) get_zeroed_page(GFP_KERNEL); |
21 | if (!tsk_rt(t)->ctrl_page) | 125 | if (!mem) { |
22 | err = -ENOMEM; | 126 | err = -ENOMEM; |
127 | goto out; | ||
128 | } | ||
129 | |||
130 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
131 | mem_remap = remap_noncached(mem, PAGE_SIZE); | ||
132 | if (IS_ERR(mem_remap)) { | ||
133 | err = PTR_ERR(mem_remap); | ||
134 | free_page((unsigned long) mem); | ||
135 | goto out; | ||
136 | } | ||
137 | tsk_rt(t)->ctrl_page_orig = mem; | ||
138 | tsk_rt(t)->ctrl_page = mem_remap; | ||
139 | TRACE_TASK(t, "ctrl_page_orig = %p\n", | ||
140 | tsk_rt(t)->ctrl_page_orig); | ||
141 | #else | ||
142 | tsk_rt(t)->ctrl_page = mem; | ||
143 | #endif | ||
144 | |||
23 | /* will get de-allocated in task teardown */ | 145 | /* will get de-allocated in task teardown */ |
24 | TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__, | 146 | TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__, |
25 | tsk_rt(t)->ctrl_page); | 147 | tsk_rt(t)->ctrl_page); |
26 | } | 148 | } |
149 | out: | ||
27 | return err; | 150 | return err; |
28 | } | 151 | } |
29 | 152 | ||
30 | static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) | 153 | static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) |
31 | { | 154 | { |
155 | struct page *ctrl; | ||
32 | int err; | 156 | int err; |
33 | 157 | ||
34 | struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page); | 158 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE |
159 | /* vm_insert_page() using the "real" vaddr, not the shadow mapping. */ | ||
160 | ctrl = virt_to_page(tsk_rt(t)->ctrl_page_orig); | ||
161 | #else | ||
162 | ctrl = virt_to_page(tsk_rt(t)->ctrl_page); | ||
163 | #endif | ||
35 | 164 | ||
36 | TRACE_CUR(CTRL_NAME | 165 | TRACE_CUR(CTRL_NAME |
37 | ": mapping %p (pfn:%lx) to 0x%lx (prot:%lx)\n", | 166 | ": mapping %p (pfn:%lx) to 0x%lx (prot:%lx)\n", |
38 | tsk_rt(t)->ctrl_page,page_to_pfn(ctrl), vma->vm_start, | 167 | tsk_rt(t)->ctrl_page, page_to_pfn(ctrl), vma->vm_start, |
39 | vma->vm_page_prot); | 168 | vma->vm_page_prot); |
40 | 169 | ||
41 | /* Map it into the vma. */ | 170 | /* Map it into the vma. */ |
@@ -104,7 +233,11 @@ static int litmus_ctrl_mmap(struct file* filp, struct vm_area_struct* vma) | |||
104 | * don't care if it was touched or not. __S011 means RW access, but not | 233 | * don't care if it was touched or not. __S011 means RW access, but not |
105 | * execute, and avoids copy-on-write behavior. | 234 | * execute, and avoids copy-on-write behavior. |
106 | * See protection_map in mmap.c. */ | 235 | * See protection_map in mmap.c. */ |
236 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
237 | vma->vm_page_prot = pgprot_noncached(__S011); | ||
238 | #else | ||
107 | vma->vm_page_prot = __S011; | 239 | vma->vm_page_prot = __S011; |
240 | #endif | ||
108 | 241 | ||
109 | err = alloc_ctrl_page(current); | 242 | err = alloc_ctrl_page(current); |
110 | if (!err) | 243 | if (!err) |
@@ -137,11 +270,20 @@ static int __init init_litmus_ctrl_dev(void) | |||
137 | err = misc_register(&litmus_ctrl_dev); | 270 | err = misc_register(&litmus_ctrl_dev); |
138 | if (err) | 271 | if (err) |
139 | printk("Could not allocate %s device (%d).\n", CTRL_NAME, err); | 272 | printk("Could not allocate %s device (%d).\n", CTRL_NAME, err); |
273 | |||
274 | wq_litmus_dealloc = alloc_workqueue("litmus_dealloc", | ||
275 | WQ_NON_REENTRANT|WQ_MEM_RECLAIM, 0); | ||
276 | if (NULL == wq_litmus_dealloc) { | ||
277 | printk("Could not allocate vunmap workqueue.\n"); | ||
278 | misc_deregister(&litmus_ctrl_dev); | ||
279 | } | ||
140 | return err; | 280 | return err; |
141 | } | 281 | } |
142 | 282 | ||
143 | static void __exit exit_litmus_ctrl_dev(void) | 283 | static void __exit exit_litmus_ctrl_dev(void) |
144 | { | 284 | { |
285 | flush_workqueue(wq_litmus_dealloc); | ||
286 | destroy_workqueue(wq_litmus_dealloc); | ||
145 | misc_deregister(&litmus_ctrl_dev); | 287 | misc_deregister(&litmus_ctrl_dev); |
146 | } | 288 | } |
147 | 289 | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 07d9d70346a0..f8397898ce0a 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/vmalloc.h> | ||
12 | 13 | ||
13 | #include <litmus/litmus.h> | 14 | #include <litmus/litmus.h> |
14 | #include <litmus/bheap.h> | 15 | #include <litmus/bheap.h> |
@@ -388,14 +389,21 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
388 | { | 389 | { |
389 | struct rt_task user_config = {}; | 390 | struct rt_task user_config = {}; |
390 | void* ctrl_page = NULL; | 391 | void* ctrl_page = NULL; |
392 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
393 | void * ctrl_page_orig = NULL; | ||
394 | #endif | ||
395 | |||
391 | void* color_ctrl_page = NULL; | 396 | void* color_ctrl_page = NULL; |
392 | 397 | ||
393 | if (restore) { | 398 | if (restore) { |
394 | /* Safe user-space provided configuration data. | 399 | /* Safe user-space provided configuration data. |
395 | * and allocated page. */ | 400 | * and allocated page. */ |
396 | user_config = p->rt_param.task_params; | 401 | user_config = p->rt_param.task_params; |
397 | ctrl_page = p->rt_param.ctrl_page; | 402 | ctrl_page = p->rt_param.ctrl_page; |
398 | color_ctrl_page = p->rt_param.color_ctrl_page; | 403 | color_ctrl_page = p->rt_param.color_ctrl_page; |
404 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
405 | ctrl_page_orig = p->rt_param.ctrl_page_orig; | ||
406 | #endif | ||
399 | } | 407 | } |
400 | 408 | ||
401 | /* We probably should not be inheriting any task's priority | 409 | /* We probably should not be inheriting any task's priority |
@@ -409,8 +417,11 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
409 | /* Restore preserved fields. */ | 417 | /* Restore preserved fields. */ |
410 | if (restore) { | 418 | if (restore) { |
411 | p->rt_param.task_params = user_config; | 419 | p->rt_param.task_params = user_config; |
412 | p->rt_param.ctrl_page = ctrl_page; | 420 | p->rt_param.ctrl_page = ctrl_page; |
413 | p->rt_param.color_ctrl_page = color_ctrl_page; | 421 | p->rt_param.color_ctrl_page = color_ctrl_page; |
422 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
423 | p->rt_param.ctrl_page_orig = ctrl_page_orig; | ||
424 | #endif | ||
414 | } | 425 | } |
415 | } | 426 | } |
416 | 427 | ||
@@ -560,6 +571,9 @@ void litmus_fork(struct task_struct* p) | |||
560 | /* non-rt tasks might have ctrl_page set */ | 571 | /* non-rt tasks might have ctrl_page set */ |
561 | tsk_rt(p)->ctrl_page = NULL; | 572 | tsk_rt(p)->ctrl_page = NULL; |
562 | tsk_rt(p)->color_ctrl_page = NULL; | 573 | tsk_rt(p)->color_ctrl_page = NULL; |
574 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
575 | tsk_rt(p)->ctrl_page_orig = NULL; | ||
576 | #endif | ||
563 | } | 577 | } |
564 | 578 | ||
565 | /* od tables are never inherited across a fork */ | 579 | /* od tables are never inherited across a fork */ |
@@ -576,31 +590,27 @@ void litmus_exec(void) | |||
576 | 590 | ||
577 | if (is_realtime(p)) { | 591 | if (is_realtime(p)) { |
578 | WARN_ON(p->rt_param.inh_task); | 592 | WARN_ON(p->rt_param.inh_task); |
579 | if (tsk_rt(p)->ctrl_page) { | ||
580 | free_page((unsigned long) tsk_rt(p)->ctrl_page); | ||
581 | tsk_rt(p)->ctrl_page = NULL; | ||
582 | } | ||
583 | if (tsk_rt(p)->color_ctrl_page) { | 593 | if (tsk_rt(p)->color_ctrl_page) { |
584 | free_page((unsigned long) tsk_rt(p)->color_ctrl_page); | 594 | free_page((unsigned long) tsk_rt(p)->color_ctrl_page); |
585 | tsk_rt(p)->color_ctrl_page = NULL; | 595 | tsk_rt(p)->color_ctrl_page = NULL; |
586 | } | 596 | } |
597 | litmus_schedule_deallocation(p); | ||
598 | tsk_rt(p)->ctrl_page = NULL; | ||
599 | #ifdef CONFIG_ARCH_NEEDS_UNCACHED_CONTROL_PAGE | ||
600 | tsk_rt(p)->ctrl_page_orig = NULL; | ||
601 | #endif | ||
587 | } | 602 | } |
588 | } | 603 | } |
589 | 604 | ||
590 | void exit_litmus(struct task_struct *dead_tsk) | 605 | void exit_litmus(struct task_struct *dead_tsk) |
591 | { | 606 | { |
607 | |||
592 | /* We also allow non-RT tasks to | 608 | /* We also allow non-RT tasks to |
593 | * allocate control pages to allow | 609 | * allocate control pages to allow |
594 | * measurements with non-RT tasks. | 610 | * measurements with non-RT tasks. |
595 | * So check if we need to free the page | 611 | * So check if we need to free the page |
596 | * in any case. | 612 | * in any case. |
597 | */ | 613 | */ |
598 | if (tsk_rt(dead_tsk)->ctrl_page) { | ||
599 | TRACE_TASK(dead_tsk, | ||
600 | "freeing ctrl_page %p\n", | ||
601 | tsk_rt(dead_tsk)->ctrl_page); | ||
602 | free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); | ||
603 | } | ||
604 | if (tsk_rt(dead_tsk)->color_ctrl_page) { | 614 | if (tsk_rt(dead_tsk)->color_ctrl_page) { |
605 | TRACE_TASK(dead_tsk, | 615 | TRACE_TASK(dead_tsk, |
606 | "freeing color_ctrl_page %p\n", | 616 | "freeing color_ctrl_page %p\n", |
@@ -616,6 +626,7 @@ void exit_litmus(struct task_struct *dead_tsk) | |||
616 | if (tsk_rt(dead_tsk)->mc_data) | 626 | if (tsk_rt(dead_tsk)->mc_data) |
617 | kmem_cache_free(mc_data_cache, tsk_rt(dead_tsk)->mc_data); | 627 | kmem_cache_free(mc_data_cache, tsk_rt(dead_tsk)->mc_data); |
618 | #endif | 628 | #endif |
629 | litmus_schedule_deallocation(dead_tsk); | ||
619 | 630 | ||
620 | /* main cleanup only for RT tasks */ | 631 | /* main cleanup only for RT tasks */ |
621 | if (is_realtime(dead_tsk)) | 632 | if (is_realtime(dead_tsk)) |