diff options
author | Christopher Kenna <cjk@cs.unc.edu> | 2012-04-01 19:24:46 -0400 |
---|---|---|
committer | Christopher Kenna <cjk@cs.unc.edu> | 2012-04-08 14:32:31 -0400 |
commit | ae95a4582d707de8a57a8159ea81b16ba7bddd54 (patch) | |
tree | 2d4af6e521e655e52cb51476ccf7c20d1d8cfe9c /litmus | |
parent | db4d9fcd3dfbda54b351ef42c13d93a00009784f (diff) |
Page reclaiming, control devices, cleanup.
Track allocated pages and add a proc handler to reclaim free pages and
add control device for allocating colored memory with mmap.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/color.c | 82 | ||||
-rw-r--r-- | litmus/color_dev.c | 312 | ||||
-rw-r--r-- | litmus/color_proc.c | 24 | ||||
-rw-r--r-- | litmus/litmus.c | 25 |
5 files changed, 421 insertions, 25 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index e4c937bc2850..2d77d11e905e 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -19,7 +19,8 @@ obj-y = sched_plugin.o litmus.o \ | |||
19 | sched_gsn_edf.o \ | 19 | sched_gsn_edf.o \ |
20 | sched_psn_edf.o \ | 20 | sched_psn_edf.o \ |
21 | color.o \ | 21 | color.o \ |
22 | color_proc.o | 22 | color_proc.o \ |
23 | color_dev.o | ||
23 | 24 | ||
24 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 25 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
25 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 26 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
diff --git a/litmus/color.c b/litmus/color.c index aefb76e36626..a3cc193418c0 100644 --- a/litmus/color.c +++ b/litmus/color.c | |||
@@ -1,15 +1,14 @@ | |||
1 | #include <linux/spinlock.h> | 1 | #include <linux/spinlock.h> |
2 | 2 | ||
3 | #include <linux/mmzone.h> | ||
4 | #include <linux/module.h> | 3 | #include <linux/module.h> |
5 | #include <linux/cpu.h> | 4 | #include <linux/mm.h> |
6 | #include <linux/mm_types.h> | ||
7 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
8 | #include <linux/page-flags.h> | ||
9 | #include <linux/sysctl.h> | 6 | #include <linux/sysctl.h> |
10 | #include <linux/lockdep.h> | 7 | #include <linux/lockdep.h> |
8 | #include <linux/sched.h> /* required by litmus.h */ | ||
11 | 9 | ||
12 | #include <litmus/color.h> | 10 | #include <litmus/color.h> |
11 | #include <litmus/litmus.h> /* for in_list(...) */ | ||
13 | 12 | ||
14 | #define MPRINT(fmt, args...) \ | 13 | #define MPRINT(fmt, args...) \ |
15 | printk(KERN_INFO "[%s@%s:%d]: " fmt, \ | 14 | printk(KERN_INFO "[%s@%s:%d]: " fmt, \ |
@@ -31,13 +30,20 @@ struct color_group { | |||
31 | atomic_t nr_pages; | 30 | atomic_t nr_pages; |
32 | }; | 31 | }; |
33 | 32 | ||
33 | static struct alloced_pages { | ||
34 | spinlock_t lock; | ||
35 | struct list_head list; | ||
36 | } alloced_pages; | ||
37 | |||
34 | static unsigned long color_mask; | 38 | static unsigned long color_mask; |
35 | static struct color_group *color_groups; | 39 | static struct color_group *color_groups; |
36 | 40 | ||
41 | |||
37 | /* non-static: extern'ed in various files */ | 42 | /* non-static: extern'ed in various files */ |
38 | unsigned long nr_colors; | 43 | unsigned long nr_colors; |
39 | struct color_cache_info color_cache_info; | 44 | struct color_cache_info color_cache_info; |
40 | int color_sysctl_add_pages_data; | 45 | int color_sysctl_add_pages_data; |
46 | int color_sysctl_reclaim_pages_data; | ||
41 | 47 | ||
42 | static inline unsigned long page_color(struct page *page) | 48 | static inline unsigned long page_color(struct page *page) |
43 | { | 49 | { |
@@ -48,12 +54,25 @@ void add_page_to_color_list(struct page *page) | |||
48 | { | 54 | { |
49 | const unsigned long color = page_color(page); | 55 | const unsigned long color = page_color(page); |
50 | struct color_group *cgroup = &color_groups[color]; | 56 | struct color_group *cgroup = &color_groups[color]; |
57 | BUG_ON(in_list(&page->lru) || PageLRU(page)); | ||
58 | BUG_ON(page_mapped(page) || page_count(page) > 1); | ||
51 | spin_lock(&cgroup->lock); | 59 | spin_lock(&cgroup->lock); |
52 | list_add_tail(&page->lru, &cgroup->list); | 60 | list_add_tail(&page->lru, &cgroup->list); |
53 | atomic_inc(&cgroup->nr_pages); | 61 | atomic_inc(&cgroup->nr_pages); |
62 | SetPageLRU(page); | ||
54 | spin_unlock(&cgroup->lock); | 63 | spin_unlock(&cgroup->lock); |
55 | } | 64 | } |
56 | 65 | ||
66 | void add_page_to_alloced_list(struct page *page) | ||
67 | { | ||
68 | BUG_ON(in_list(&page->lru) || PageLRU(page)); | ||
69 | BUG_ON(!page_mapped(page) || page_count(page) < 2); | ||
70 | spin_lock(&alloced_pages.lock); | ||
71 | list_add_tail(&page->lru, &alloced_pages.list); | ||
72 | SetPageLRU(page); | ||
73 | spin_unlock(&alloced_pages.lock); | ||
74 | } | ||
75 | |||
57 | struct page* get_colored_page(unsigned long color) | 76 | struct page* get_colored_page(unsigned long color) |
58 | { | 77 | { |
59 | struct color_group *cgroup; | 78 | struct color_group *cgroup; |
@@ -66,8 +85,10 @@ struct page* get_colored_page(unsigned long color) | |||
66 | goto out_unlock; | 85 | goto out_unlock; |
67 | } | 86 | } |
68 | page = list_first_entry(&cgroup->list, struct page, lru); | 87 | page = list_first_entry(&cgroup->list, struct page, lru); |
88 | BUG_ON(page_mapped(page) || page_count(page) > 1); | ||
69 | list_del(&page->lru); | 89 | list_del(&page->lru); |
70 | atomic_dec(&cgroup->nr_pages); | 90 | atomic_dec(&cgroup->nr_pages); |
91 | ClearPageLRU(page); | ||
71 | out_unlock: | 92 | out_unlock: |
72 | spin_unlock(&cgroup->lock); | 93 | spin_unlock(&cgroup->lock); |
73 | return page; | 94 | return page; |
@@ -88,12 +109,10 @@ static unsigned long smallest_nr_pages(void) | |||
88 | static int do_add_pages(void) | 109 | static int do_add_pages(void) |
89 | { | 110 | { |
90 | struct page *page, *page_tmp; | 111 | struct page *page, *page_tmp; |
91 | struct list_head free_later; | 112 | LIST_HEAD(free_later); |
92 | unsigned long color; | 113 | unsigned long color; |
93 | int ret = 0; | 114 | int ret = 0; |
94 | 115 | ||
95 | INIT_LIST_HEAD(&free_later); | ||
96 | |||
97 | while (smallest_nr_pages() < PAGES_PER_COLOR) { | 116 | while (smallest_nr_pages() < PAGES_PER_COLOR) { |
98 | page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | | 117 | page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | |
99 | __GFP_MOVABLE); | 118 | __GFP_MOVABLE); |
@@ -116,6 +135,25 @@ out: | |||
116 | return ret; | 135 | return ret; |
117 | } | 136 | } |
118 | 137 | ||
138 | static int do_reclaim_pages(void) | ||
139 | { | ||
140 | struct page *page, *page_tmp; | ||
141 | unsigned long nr_reclaimed = 0; | ||
142 | spin_lock(&alloced_pages.lock); | ||
143 | list_for_each_entry_safe(page, page_tmp, &alloced_pages.list, lru) { | ||
144 | if (1 == page_count(page) && !page_mapped(page)) { | ||
145 | list_del(&page->lru); | ||
146 | ClearPageLRU(page); | ||
147 | add_page_to_color_list(page); | ||
148 | nr_reclaimed++; | ||
149 | TRACE_CUR("Reclaimed page (pfn:%lu phys:0x%lx).\n", | ||
150 | page_to_pfn(page), page_to_phys(page)); | ||
151 | } | ||
152 | } | ||
153 | spin_unlock(&alloced_pages.lock); | ||
154 | TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); | ||
155 | return 0; | ||
156 | } | ||
119 | 157 | ||
120 | /*********************************************************** | 158 | /*********************************************************** |
121 | * Proc | 159 | * Proc |
@@ -158,6 +196,19 @@ out: | |||
158 | return ret; | 196 | return ret; |
159 | } | 197 | } |
160 | 198 | ||
199 | int color_reclaim_pages_handler(struct ctl_table *table, int write, | ||
200 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
201 | { | ||
202 | int ret = 0; | ||
203 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
204 | if (ret) | ||
205 | goto out; | ||
206 | if (write && color_sysctl_reclaim_pages_data) | ||
207 | ret = do_reclaim_pages(); | ||
208 | out: | ||
209 | return ret; | ||
210 | } | ||
211 | |||
161 | /*********************************************************** | 212 | /*********************************************************** |
162 | * Initialization | 213 | * Initialization |
163 | ***********************************************************/ | 214 | ***********************************************************/ |
@@ -181,7 +232,7 @@ static void __init init_mask(void) | |||
181 | color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ | 232 | color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ |
182 | (PAGE_SIZE - 1); | 233 | (PAGE_SIZE - 1); |
183 | nr_colors = (color_mask >> PAGE_SHIFT) + 1; | 234 | nr_colors = (color_mask >> PAGE_SHIFT) + 1; |
184 | MPRINT("color mask: 0x%lx total colors: %lu\n", color_mask, | 235 | printk("Color mask: 0x%lx Total colors: %lu\n", color_mask, |
185 | nr_colors); | 236 | nr_colors); |
186 | BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); | 237 | BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); |
187 | } | 238 | } |
@@ -214,18 +265,23 @@ out: | |||
214 | static int __init init_color(void) | 265 | static int __init init_color(void) |
215 | { | 266 | { |
216 | int ret = 0; | 267 | int ret = 0; |
268 | printk("Initializing LITMUS^RT cache coloring.\n"); | ||
217 | 269 | ||
218 | MPRINT("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", | 270 | BUG_ON(color_cache_info.size <= 1048576 || |
271 | color_cache_info.ways < 15 || | ||
272 | color_cache_info.line_size != 64); | ||
273 | INIT_LIST_HEAD(&alloced_pages.list); | ||
274 | spin_lock_init(&alloced_pages.lock); | ||
275 | |||
276 | printk("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", | ||
219 | color_cache_info.size, color_cache_info.line_size, | 277 | color_cache_info.size, color_cache_info.line_size, |
220 | color_cache_info.ways, color_cache_info.sets); | 278 | color_cache_info.ways, color_cache_info.sets); |
221 | if (!color_cache_info.size){ | 279 | if (!color_cache_info.size) { |
222 | printk(KERN_WARNING "No cache information found.\n"); | 280 | printk(KERN_WARNING "No cache information found.\n"); |
223 | ret = -EINVAL; | 281 | ret = -EINVAL; |
224 | goto out; | 282 | goto out; |
225 | } | 283 | } |
226 | BUG_ON(color_cache_info.size <= 1048576 || | 284 | |
227 | color_cache_info.ways < 15 || | ||
228 | color_cache_info.line_size != 64); | ||
229 | init_mask(); | 285 | init_mask(); |
230 | ret = init_color_groups(); | 286 | ret = init_color_groups(); |
231 | out: | 287 | out: |
diff --git a/litmus/color_dev.c b/litmus/color_dev.c new file mode 100644 index 000000000000..b8218b6d1d9c --- /dev/null +++ b/litmus/color_dev.c | |||
@@ -0,0 +1,312 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/mm.h> | ||
3 | #include <linux/fs.h> | ||
4 | #include <linux/miscdevice.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | #include <linux/module.h> | ||
7 | |||
8 | #include <litmus/litmus.h> | ||
9 | #include <litmus/color.h> | ||
10 | |||
11 | #define ALLOC_NAME "litmus/color_alloc" | ||
12 | #define CTRL_NAME "litmus/color_ctrl" | ||
13 | |||
14 | static struct non_rt_colors { | ||
15 | spinlock_t lock; | ||
16 | unsigned long color; | ||
17 | } non_rt_colors; | ||
18 | |||
19 | extern unsigned long nr_colors; | ||
20 | |||
21 | /*********************************************************** | ||
22 | * Control device | ||
23 | ***********************************************************/ | ||
24 | |||
25 | static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma) | ||
26 | { | ||
27 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, | ||
28 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); | ||
29 | |||
30 | TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | ||
31 | (void*) vma->vm_start, (void*) vma->vm_end, vma, | ||
32 | vma->vm_private_data); | ||
33 | } | ||
34 | |||
35 | static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma, | ||
36 | struct vm_fault *vmf) | ||
37 | { | ||
38 | /* This function should never be called, since | ||
39 | * all pages should have been mapped by mmap() | ||
40 | * already. */ | ||
41 | TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); | ||
42 | printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, | ||
43 | vma->vm_flags); | ||
44 | |||
45 | /* nope, you only get one page */ | ||
46 | return VM_FAULT_SIGBUS; | ||
47 | } | ||
48 | |||
49 | static struct vm_operations_struct litmus_color_ctrl_vm_ops = { | ||
50 | .close = litmus_color_ctrl_vm_close, | ||
51 | .fault = litmus_color_ctrl_vm_fault, | ||
52 | }; | ||
53 | |||
54 | static int mmap_common_checks(struct vm_area_struct *vma) | ||
55 | { | ||
56 | /* you can only map the "first" page */ | ||
57 | if (vma->vm_pgoff != 0) | ||
58 | return -EINVAL; | ||
59 | |||
60 | /* you can't share it with anyone */ | ||
61 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | ||
62 | return -EINVAL; | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static int alloc_color_ctrl_page(void) | ||
68 | { | ||
69 | struct task_struct *t; | ||
70 | int err = 0; | ||
71 | |||
72 | t = current; | ||
73 | /* only allocate if the task doesn't have one yet */ | ||
74 | if (!tsk_rt(t)->color_ctrl_page) { | ||
75 | tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); | ||
76 | if (!tsk_rt(t)->color_ctrl_page) | ||
77 | err = -ENOMEM; | ||
78 | /* will get de-allocated in task teardown */ | ||
79 | TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__, | ||
80 | tsk_rt(t)->color_ctrl_page); | ||
81 | } | ||
82 | return err; | ||
83 | } | ||
84 | |||
85 | static int map_color_ctrl_page(struct vm_area_struct *vma) | ||
86 | { | ||
87 | int err; | ||
88 | unsigned long pfn; | ||
89 | struct task_struct *t = current; | ||
90 | struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page); | ||
91 | |||
92 | t = current; | ||
93 | /* Increase ref count. Is decreased when vma is destroyed. */ | ||
94 | get_page(color_ctrl); | ||
95 | pfn = page_to_pfn(color_ctrl); | ||
96 | |||
97 | TRACE_CUR(CTRL_NAME | ||
98 | ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", | ||
99 | tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), | ||
100 | vma->vm_start, vma->vm_page_prot); | ||
101 | |||
102 | /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise | ||
103 | * userspace actually gets a copy-on-write page. */ | ||
104 | err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); | ||
105 | TRACE_CUR("page shared: guess:0x1(63)...1??111 actual:0x%lx\n", PAGE_SHARED); | ||
106 | /* present, RW, user, accessed, NX=63 */ | ||
107 | |||
108 | if (err) | ||
109 | TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); | ||
110 | |||
111 | return err; | ||
112 | } | ||
113 | |||
114 | static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) | ||
115 | { | ||
116 | int err = 0; | ||
117 | |||
118 | /* you can only get one page */ | ||
119 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) { | ||
120 | err = -EINVAL; | ||
121 | goto out; | ||
122 | } | ||
123 | |||
124 | err = mmap_common_checks(vma); | ||
125 | if (err) | ||
126 | goto out; | ||
127 | |||
128 | vma->vm_ops = &litmus_color_ctrl_vm_ops; | ||
129 | /* this mapping should not be kept across forks, | ||
130 | * and cannot be expanded */ | ||
131 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
132 | |||
133 | err = alloc_color_ctrl_page(); | ||
134 | if (!err) | ||
135 | err = map_color_ctrl_page(vma); | ||
136 | |||
137 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, | ||
138 | pgprot_val(vma->vm_page_prot)); | ||
139 | out: | ||
140 | return err; | ||
141 | } | ||
142 | |||
143 | |||
144 | /*********************************************************** | ||
145 | * Allocation device | ||
146 | ***********************************************************/ | ||
147 | |||
148 | static int map_colored_pages_non_rt(struct vm_area_struct *vma) | ||
149 | { | ||
150 | unsigned long color, mapped; | ||
151 | int err; | ||
152 | const unsigned long nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
153 | |||
154 | spin_lock(&non_rt_colors.lock); | ||
155 | color = non_rt_colors.color; | ||
156 | non_rt_colors.color = (non_rt_colors.color + nr_pages) % nr_colors; | ||
157 | spin_unlock(&non_rt_colors.lock); | ||
158 | |||
159 | TRACE_CUR(ALLOC_NAME ": allocating %lu pages from color %lu.\n", | ||
160 | nr_pages, color); | ||
161 | |||
162 | for (mapped = 0; mapped < nr_pages; | ||
163 | mapped++, color = (color + 1) % nr_colors) | ||
164 | { | ||
165 | struct page *page = get_colored_page(color); | ||
166 | const unsigned long addr = vma->vm_start + PAGE_SIZE * mapped; | ||
167 | |||
168 | if (!page) { | ||
169 | TRACE_CUR(ALLOC_NAME ": Could not get page with " | ||
170 | " color %lu.\n", color); | ||
171 | /* TODO unmap mapped pages */ | ||
172 | break; | ||
173 | } | ||
174 | TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%llx, pfn:%lu, " | ||
175 | "color:%lu count:%d LRU:%d) at 0x%lx " | ||
176 | "(prot: 0x%lx)\n", | ||
177 | page_to_phys(page), page_to_pfn(page), color, | ||
178 | page_count(page), PageLRU(page), addr, | ||
179 | pgprot_val(vma->vm_page_prot)); | ||
180 | err = vm_insert_page(vma, addr, page); | ||
181 | if (err) { | ||
182 | TRACE_CUR(ALLOC_NAME ": vm_insert_page() failed " | ||
183 | "(%d)\n", err); | ||
184 | /* TODO unmap mapped pages */ | ||
185 | break; | ||
186 | } | ||
187 | add_page_to_alloced_list(page); | ||
188 | } | ||
189 | return err; | ||
190 | } | ||
191 | |||
192 | static int map_colored_pages_rt(struct vm_area_struct *vma) | ||
193 | { | ||
194 | /* TODO */ | ||
195 | return -EINVAL; | ||
196 | } | ||
197 | |||
198 | static int map_colored_pages(struct vm_area_struct *vma) | ||
199 | { | ||
200 | if (likely(is_realtime(current))) | ||
201 | return map_colored_pages_rt(vma); | ||
202 | return map_colored_pages_non_rt(vma); | ||
203 | } | ||
204 | |||
205 | static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) | ||
206 | { | ||
207 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, | ||
208 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); | ||
209 | |||
210 | TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | ||
211 | (void*) vma->vm_start, (void*) vma->vm_end, vma, | ||
212 | vma->vm_private_data); | ||
213 | } | ||
214 | |||
215 | static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, | ||
216 | struct vm_fault *vmf) | ||
217 | { | ||
218 | /* This function should never be called, since | ||
219 | * all pages should have been mapped by mmap() | ||
220 | * already. */ | ||
221 | TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); | ||
222 | printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, | ||
223 | vma->vm_flags); | ||
224 | |||
225 | /* nope, you only get one page */ | ||
226 | return VM_FAULT_SIGBUS; | ||
227 | } | ||
228 | |||
229 | static struct vm_operations_struct litmus_color_alloc_vm_ops = { | ||
230 | .close = litmus_color_alloc_vm_close, | ||
231 | .fault = litmus_color_alloc_vm_fault, | ||
232 | }; | ||
233 | |||
234 | static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma) | ||
235 | { | ||
236 | int err = 0; | ||
237 | |||
238 | /* you may only request integer multiple of PAGE_SIZE */ | ||
239 | if (offset_in_page(vma->vm_end - vma->vm_start)) { | ||
240 | err = -EINVAL; | ||
241 | goto out; | ||
242 | } | ||
243 | |||
244 | err = mmap_common_checks(vma); | ||
245 | if (err) | ||
246 | goto out; | ||
247 | |||
248 | vma->vm_ops = &litmus_color_alloc_vm_ops; | ||
249 | /* this mapping should not be kept across forks, | ||
250 | * and cannot be expanded */ | ||
251 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
252 | |||
253 | err = map_colored_pages(vma); | ||
254 | |||
255 | TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, | ||
256 | pgprot_val(vma->vm_page_prot)); | ||
257 | out: | ||
258 | return err; | ||
259 | } | ||
260 | |||
261 | /*********************************************************** | ||
262 | * Initilization | ||
263 | ***********************************************************/ | ||
264 | |||
265 | static struct file_operations litmus_color_ctrl_fops = { | ||
266 | .owner = THIS_MODULE, | ||
267 | .mmap = litmus_color_ctrl_mmap, | ||
268 | }; | ||
269 | |||
270 | static struct miscdevice litmus_color_ctrl_dev = { | ||
271 | .name = CTRL_NAME, | ||
272 | .minor = MISC_DYNAMIC_MINOR, | ||
273 | .fops = &litmus_color_ctrl_fops, | ||
274 | }; | ||
275 | |||
276 | static struct file_operations litmus_color_alloc_fops = { | ||
277 | .owner = THIS_MODULE, | ||
278 | .mmap = litmus_color_alloc_mmap, | ||
279 | }; | ||
280 | |||
281 | static struct miscdevice litmus_color_alloc_dev = { | ||
282 | .name = ALLOC_NAME, | ||
283 | .minor = MISC_DYNAMIC_MINOR, | ||
284 | .fops = &litmus_color_alloc_fops, | ||
285 | }; | ||
286 | |||
287 | static int __init init_dev(const char* name, struct miscdevice *dev) | ||
288 | { | ||
289 | int err; | ||
290 | err = misc_register(dev); | ||
291 | if (err) | ||
292 | printk(KERN_WARNING "Could not allocate %s device (%d).\n", | ||
293 | name, err); | ||
294 | return err; | ||
295 | } | ||
296 | |||
297 | static int __init init_color_devices(void) | ||
298 | { | ||
299 | int err; | ||
300 | spin_lock_init(&non_rt_colors.lock); | ||
301 | non_rt_colors.color = 0; | ||
302 | |||
303 | printk("Allocating LITMUS^RT color devices.\n"); | ||
304 | err = init_dev(ALLOC_NAME, &litmus_color_alloc_dev); | ||
305 | if (err) | ||
306 | goto out; | ||
307 | err = init_dev(CTRL_NAME, &litmus_color_ctrl_dev); | ||
308 | out: | ||
309 | return err; | ||
310 | } | ||
311 | |||
312 | module_init(init_color_devices); | ||
diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 31eec0d728a5..cac336ac1731 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c | |||
@@ -5,14 +5,23 @@ | |||
5 | #include <litmus/color.h> | 5 | #include <litmus/color.h> |
6 | 6 | ||
7 | extern int color_sysctl_add_pages_data; /* litmus/color.c */ | 7 | extern int color_sysctl_add_pages_data; /* litmus/color.c */ |
8 | extern int color_sysctl_reclaim_pages_data; /* litmus/color.c */ | ||
8 | 9 | ||
9 | static int zero = 0; | 10 | static int zero = 0; |
10 | static int one = 1; | 11 | static int one = 1; |
11 | 12 | ||
12 | #define NR_PAGES_INDEX 1 /* location of nr_pages in the table below */ | 13 | #define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */ |
13 | static struct ctl_table color_table[] = | 14 | static struct ctl_table color_table[] = |
14 | { | 15 | { |
15 | { | 16 | { |
17 | /* you MUST update NR_PAGES_INDEX if you move this entry */ | ||
18 | .procname = "nr_pages", | ||
19 | .mode = 0444, | ||
20 | .proc_handler = color_nr_pages_handler, | ||
21 | .data = NULL, /* dynamically set later */ | ||
22 | .maxlen = 0, /* also set later */ | ||
23 | }, | ||
24 | { | ||
16 | .procname = "add_pages", | 25 | .procname = "add_pages", |
17 | .data = &color_sysctl_add_pages_data, | 26 | .data = &color_sysctl_add_pages_data, |
18 | .maxlen = sizeof(int), | 27 | .maxlen = sizeof(int), |
@@ -22,11 +31,13 @@ static struct ctl_table color_table[] = | |||
22 | .extra2 = &one, | 31 | .extra2 = &one, |
23 | }, | 32 | }, |
24 | { | 33 | { |
25 | .procname = "nr_pages", | 34 | .procname = "reclaim_pages", |
26 | .mode = 0444, | 35 | .data = &color_sysctl_reclaim_pages_data, |
27 | .proc_handler = color_nr_pages_handler, | 36 | .maxlen = sizeof(int), |
28 | .data = NULL, /* dynamically later */ | 37 | .mode = 0644, |
29 | .maxlen = 0, /* also set later */ | 38 | .proc_handler = color_reclaim_pages_handler, |
39 | .extra1 = &zero, | ||
40 | .extra2 = &one, | ||
30 | }, | 41 | }, |
31 | { } | 42 | { } |
32 | }; | 43 | }; |
@@ -71,6 +82,7 @@ static struct ctl_table_header *litmus_sysctls; | |||
71 | static int __init litmus_sysctl_init(void) | 82 | static int __init litmus_sysctl_init(void) |
72 | { | 83 | { |
73 | int ret = 0; | 84 | int ret = 0; |
85 | printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n"); | ||
74 | litmus_sysctls = register_sysctl_table(litmus_dir_table); | 86 | litmus_sysctls = register_sysctl_table(litmus_dir_table); |
75 | if (!litmus_sysctls) { | 87 | if (!litmus_sysctls) { |
76 | printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n"); | 88 | printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n"); |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 301390148d02..eaa2070d28ce 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -291,12 +291,14 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
291 | { | 291 | { |
292 | struct rt_task user_config = {}; | 292 | struct rt_task user_config = {}; |
293 | void* ctrl_page = NULL; | 293 | void* ctrl_page = NULL; |
294 | void* color_ctrl_page = NULL; | ||
294 | 295 | ||
295 | if (restore) { | 296 | if (restore) { |
296 | /* Safe user-space provided configuration data. | 297 | /* Safe user-space provided configuration data. |
297 | * and allocated page. */ | 298 | * and allocated page. */ |
298 | user_config = p->rt_param.task_params; | 299 | user_config = p->rt_param.task_params; |
299 | ctrl_page = p->rt_param.ctrl_page; | 300 | ctrl_page = p->rt_param.ctrl_page; |
301 | color_ctrl_page = p->rt_param.color_ctrl_page; | ||
300 | } | 302 | } |
301 | 303 | ||
302 | /* We probably should not be inheriting any task's priority | 304 | /* We probably should not be inheriting any task's priority |
@@ -309,8 +311,9 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
309 | 311 | ||
310 | /* Restore preserved fields. */ | 312 | /* Restore preserved fields. */ |
311 | if (restore) { | 313 | if (restore) { |
312 | p->rt_param.task_params = user_config; | 314 | p->rt_param.task_params = user_config; |
313 | p->rt_param.ctrl_page = ctrl_page; | 315 | p->rt_param.ctrl_page = ctrl_page; |
316 | p->rt_param.color_ctrl_page = color_ctrl_page; | ||
314 | } | 317 | } |
315 | } | 318 | } |
316 | 319 | ||
@@ -451,9 +454,11 @@ void litmus_fork(struct task_struct* p) | |||
451 | reinit_litmus_state(p, 0); | 454 | reinit_litmus_state(p, 0); |
452 | /* Don't let the child be a real-time task. */ | 455 | /* Don't let the child be a real-time task. */ |
453 | p->sched_reset_on_fork = 1; | 456 | p->sched_reset_on_fork = 1; |
454 | } else | 457 | } else { |
455 | /* non-rt tasks might have ctrl_page set */ | 458 | /* non-rt tasks might have ctrl_page set */ |
456 | tsk_rt(p)->ctrl_page = NULL; | 459 | tsk_rt(p)->ctrl_page = NULL; |
460 | tsk_rt(p)->color_ctrl_page = NULL; | ||
461 | } | ||
457 | 462 | ||
458 | /* od tables are never inherited across a fork */ | 463 | /* od tables are never inherited across a fork */ |
459 | p->od_table = NULL; | 464 | p->od_table = NULL; |
@@ -473,6 +478,10 @@ void litmus_exec(void) | |||
473 | free_page((unsigned long) tsk_rt(p)->ctrl_page); | 478 | free_page((unsigned long) tsk_rt(p)->ctrl_page); |
474 | tsk_rt(p)->ctrl_page = NULL; | 479 | tsk_rt(p)->ctrl_page = NULL; |
475 | } | 480 | } |
481 | if (tsk_rt(p)->color_ctrl_page) { | ||
482 | free_page((unsigned long) tsk_rt(p)->color_ctrl_page); | ||
483 | tsk_rt(p)->color_ctrl_page = NULL; | ||
484 | } | ||
476 | } | 485 | } |
477 | } | 486 | } |
478 | 487 | ||
@@ -490,6 +499,12 @@ void exit_litmus(struct task_struct *dead_tsk) | |||
490 | tsk_rt(dead_tsk)->ctrl_page); | 499 | tsk_rt(dead_tsk)->ctrl_page); |
491 | free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); | 500 | free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); |
492 | } | 501 | } |
502 | if (tsk_rt(dead_tsk)->color_ctrl_page) { | ||
503 | TRACE_TASK(dead_tsk, | ||
504 | "freeing color_ctrl_page %p\n", | ||
505 | tsk_rt(dead_tsk)->color_ctrl_page); | ||
506 | free_page((unsigned long) tsk_rt(dead_tsk)->color_ctrl_page); | ||
507 | } | ||
493 | 508 | ||
494 | /* main cleanup only for RT tasks */ | 509 | /* main cleanup only for RT tasks */ |
495 | if (is_realtime(dead_tsk)) | 510 | if (is_realtime(dead_tsk)) |