aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-05-02 18:48:03 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-05-02 18:48:03 -0400
commitbf554059414a34dd17cd08a9c6bc6cfafa9ac717 (patch)
treef18d8dd5dfb4d681a9fbe746f5aab0aba666941c /litmus
parentba84bc13f622bf1e8dd0b35dae311117cdab1dc6 (diff)
use remap_pfn_range and automatic reclaiming
Diffstat (limited to 'litmus')
-rw-r--r--litmus/color.c111
-rw-r--r--litmus/color_dev.c23
-rw-r--r--litmus/color_proc.c10
3 files changed, 76 insertions, 68 deletions
diff --git a/litmus/color.c b/litmus/color.c
index ae6c3f6b8206..93d12a718543 100644
--- a/litmus/color.c
+++ b/litmus/color.c
@@ -27,11 +27,6 @@ struct color_group {
27 atomic_t nr_pages; 27 atomic_t nr_pages;
28}; 28};
29 29
30static struct alloced_pages {
31 spinlock_t lock;
32 struct list_head list;
33} alloced_pages;
34
35static unsigned long color_mask; 30static unsigned long color_mask;
36static struct color_group *color_groups; 31static struct color_group *color_groups;
37 32
@@ -42,40 +37,31 @@ unsigned long nr_colors;
42struct color_cache_info color_cache_info; 37struct color_cache_info color_cache_info;
43#endif 38#endif
44int color_sysctl_add_pages_data; 39int color_sysctl_add_pages_data;
45int color_sysctl_reclaim_pages_data;
46 40
47static inline unsigned long page_color(struct page *page) 41static inline unsigned long page_color(struct page *page)
48{ 42{
49 return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); 43 return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT);
50} 44}
51 45
46/*
47 * Page's count should be one, it sould not be on any LRU list.
48 */
52void add_page_to_color_list(struct page *page) 49void add_page_to_color_list(struct page *page)
53{ 50{
54 const unsigned long color = page_color(page); 51 const unsigned long color = page_color(page);
55 struct color_group *cgroup = &color_groups[color]; 52 struct color_group *cgroup = &color_groups[color];
56 BUG_ON(in_list(&page->lru) || PageLRU(page)); 53 BUG_ON(in_list(&page->lru) || PageLRU(page));
57 BUG_ON(page_mapped(page) || page_count(page) > 1); 54 BUG_ON(page_count(page) > 1);
58 spin_lock(&cgroup->lock); 55 spin_lock(&cgroup->lock);
59 list_add_tail(&page->lru, &cgroup->list); 56 list_add_tail(&page->lru, &cgroup->list);
60 atomic_inc(&cgroup->nr_pages); 57 atomic_inc(&cgroup->nr_pages);
61 SetPageLRU(page); 58 SetPageLRU(page);
62 SetPageReserved(page);
63 spin_unlock(&cgroup->lock); 59 spin_unlock(&cgroup->lock);
64} 60}
65 61
66void add_page_to_alloced_list(struct page *page) 62/*
67{ 63 * Increase's page's count to two.
68 BUG_ON(in_list(&page->lru) || PageLRU(page)); 64 */
69 //BUG_ON(!page_mapped(page) || page_count(page) < 2);
70 TRACE_CUR("pfn:%d page_mapped:%d page_count:%d\n",
71 page_to_pfn(page), page_mapped(page),
72 page_count(page));
73 spin_lock(&alloced_pages.lock);
74 list_add_tail(&page->lru, &alloced_pages.list);
75 SetPageLRU(page);
76 spin_unlock(&alloced_pages.lock);
77}
78
79struct page* get_colored_page(unsigned long color) 65struct page* get_colored_page(unsigned long color)
80{ 66{
81 struct color_group *cgroup; 67 struct color_group *cgroup;
@@ -92,7 +78,8 @@ struct page* get_colored_page(unsigned long color)
92 goto out_unlock; 78 goto out_unlock;
93 } 79 }
94 page = list_first_entry(&cgroup->list, struct page, lru); 80 page = list_first_entry(&cgroup->list, struct page, lru);
95 BUG_ON(page_mapped(page) || page_count(page) > 1); 81 BUG_ON(page_count(page) > 1);
82 get_page(page);
96 list_del(&page->lru); 83 list_del(&page->lru);
97 atomic_dec(&cgroup->nr_pages); 84 atomic_dec(&cgroup->nr_pages);
98 ClearPageLRU(page); 85 ClearPageLRU(page);
@@ -136,9 +123,10 @@ static int do_add_pages(void)
136 goto out; 123 goto out;
137 } 124 }
138 color = page_color(page); 125 color = page_color(page);
139 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) 126 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
127 SetPageReserved(page);
140 add_page_to_color_list(page); 128 add_page_to_color_list(page);
141 else 129 } else
142 list_add_tail(&page->lru, &free_later); 130 list_add_tail(&page->lru, &free_later);
143 } 131 }
144 list_for_each_entry_safe(page, page_tmp, &free_later, lru) { 132 list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
@@ -149,29 +137,63 @@ out:
149 return ret; 137 return ret;
150} 138}
151 139
152static int do_reclaim_pages(void) 140static struct alloced_pages {
141 spinlock_t lock;
142 struct list_head list;
143} alloced_pages;
144
145struct alloced_page {
146 struct page *page;
147 struct vm_area_struct *vma;
148 struct list_head list;
149};
150
151static struct alloced_page * new_alloced_page(struct page *page,
152 struct vm_area_struct *vma)
153{ 153{
154 struct page *page, *page_tmp; 154 struct alloced_page *ap = kmalloc(sizeof(*ap), GFP_KERNEL);
155 INIT_LIST_HEAD(&ap->list);
156 ap->page = page;
157 ap->vma = vma;
158 return ap;
159}
160
161/*
162 * Page's count should be two or more. It should not be on aly LRU list.
163 */
164void add_page_to_alloced_list(struct page *page, struct vm_area_struct *vma)
165{
166 struct alloced_page *ap;
167
168 BUG_ON(page_count(page) < 2);
169 ap = new_alloced_page(page, vma);
170 spin_lock(&alloced_pages.lock);
171 list_add_tail(&ap->list, &alloced_pages.list);
172 spin_unlock(&alloced_pages.lock);
173}
174
175/*
176 * Reclaim pages.
177 */
178void reclaim_pages(struct vm_area_struct *vma)
179{
180 struct alloced_page *ap, *ap_tmp;
155 unsigned long nr_reclaimed = 0; 181 unsigned long nr_reclaimed = 0;
156 spin_lock(&alloced_pages.lock); 182 spin_lock(&alloced_pages.lock);
157 list_for_each_entry_safe(page, page_tmp, &alloced_pages.list, lru) { 183 list_for_each_entry_safe(ap, ap_tmp, &alloced_pages.list, list) {
158 TRACE_CUR("pfn:%8lu page_mapped:%d page_count:%d\n", 184 if (vma == ap->vma) {
159 page_to_pfn(page), page_mapped(page), 185 list_del(&ap->list);
160 page_count(page)); 186 put_page(ap->page);
161 if (1 == page_count(page) && !page_mapped(page)) { 187 add_page_to_color_list(ap->page);
162 list_del(&page->lru);
163 ClearPageLRU(page);
164 add_page_to_color_list(page);
165 nr_reclaimed++; 188 nr_reclaimed++;
166 TRACE_CUR("reclaiming page (pa:0x%10llx, pfn:%8lu, " 189 TRACE_CUR("reclaiming page (pa:0x%10llx, pfn:%8lu, "
167 "color:%3lu)\n", page_to_phys(page), 190 "color:%3lu)\n", page_to_phys(ap->page),
168 page_to_pfn(page), page_color(page)); 191 page_to_pfn(ap->page), page_color(ap->page));
192 kfree(ap);
169 } 193 }
170
171 } 194 }
172 spin_unlock(&alloced_pages.lock); 195 spin_unlock(&alloced_pages.lock);
173 TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); 196 TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed);
174 return 0;
175} 197}
176 198
177/*********************************************************** 199/***********************************************************
@@ -215,19 +237,6 @@ out:
215 return ret; 237 return ret;
216} 238}
217 239
218int color_reclaim_pages_handler(struct ctl_table *table, int write,
219 void __user *buffer, size_t *lenp, loff_t *ppos)
220{
221 int ret = 0;
222 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
223 if (ret)
224 goto out;
225 if (write && color_sysctl_reclaim_pages_data)
226 ret = do_reclaim_pages();
227out:
228 return ret;
229}
230
231/*********************************************************** 240/***********************************************************
232 * Initialization 241 * Initialization
233***********************************************************/ 242***********************************************************/
diff --git a/litmus/color_dev.c b/litmus/color_dev.c
index c6e500722819..d681f57be01f 100644
--- a/litmus/color_dev.c
+++ b/litmus/color_dev.c
@@ -54,9 +54,12 @@ static int mmap_common_checks(struct vm_area_struct *vma)
54 if (vma->vm_pgoff != 0) 54 if (vma->vm_pgoff != 0)
55 return -EINVAL; 55 return -EINVAL;
56 56
57#if 0
57 /* you can't share it with anyone */ 58 /* you can't share it with anyone */
59 /* well, maybe you can... */
58 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) 60 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
59 return -EINVAL; 61 return -EINVAL;
62#endif
60 63
61 return 0; 64 return 0;
62} 65}
@@ -92,9 +95,9 @@ static int map_color_ctrl_page(struct vm_area_struct *vma)
92 pfn = page_to_pfn(color_ctrl); 95 pfn = page_to_pfn(color_ctrl);
93 96
94 TRACE_CUR(CTRL_NAME 97 TRACE_CUR(CTRL_NAME
95 ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", 98 ": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n",
96 tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), 99 tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl),
97 vma->vm_start, vma->vm_page_prot); 100 vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot));
98 101
99 /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise 102 /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise
100 * userspace actually gets a copy-on-write page. */ 103 * userspace actually gets a copy-on-write page. */
@@ -155,7 +158,8 @@ static int do_map_colored_pages(struct vm_area_struct *vma)
155 color_t *cur_color; 158 color_t *cur_color;
156 int err; 159 int err;
157 160
158 TRACE_CUR(ALLOC_NAME ": allocating %lu pages\n", nr_pages); 161 TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n",
162 nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
159 163
160 for ( nr_mapped = 0, 164 for ( nr_mapped = 0,
161 cur_color = tsk_rt(current)->color_ctrl_page->colors; 165 cur_color = tsk_rt(current)->color_ctrl_page->colors;
@@ -177,19 +181,23 @@ static int do_map_colored_pages(struct vm_area_struct *vma)
177 clear_user_highpage(page, addr); 181 clear_user_highpage(page, addr);
178#endif 182#endif
179 TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " 183 TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, "
180 "color:%3lu) at 0x%lx\n", page_to_phys(page), 184 "color:%3lu) at 0x%lx (flags:%lx prot:%lx\n",
181 page_to_pfn(page), this_color, addr); 185 page_to_phys(page),
186 page_to_pfn(page), this_color, addr,
187 vma->vm_flags, pgprot_val(vma->vm_page_prot));
182 //err = vm_insert_page(vma, addr, page); 188 //err = vm_insert_page(vma, addr, page);
183 err = remap_pfn_range(vma, addr, page_to_pfn(page), 189 err = remap_pfn_range(vma, addr, page_to_pfn(page),
184 PAGE_SIZE, PAGE_SHARED); 190 PAGE_SIZE, PAGE_SHARED);
185 if (err) { 191 if (err) {
186 TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed " 192 TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed "
187 "(%d)\n", err); 193 "(%d) (flags:%lx prot:%lx)\n", err,
194 vma->vm_flags,
195 pgprot_val(vma->vm_page_prot));
188 /* TODO unmap mapped pages */ 196 /* TODO unmap mapped pages */
189 err = -EINVAL; 197 err = -EINVAL;
190 break; 198 break;
191 } 199 }
192 add_page_to_alloced_list(page); 200 add_page_to_alloced_list(page, vma);
193 } 201 }
194 return err; 202 return err;
195} 203}
@@ -223,6 +231,7 @@ static void litmus_color_alloc_vm_close(struct vm_area_struct *vma)
223 TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", 231 TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
224 (void*) vma->vm_start, (void*) vma->vm_end, vma, 232 (void*) vma->vm_start, (void*) vma->vm_end, vma,
225 vma->vm_private_data); 233 vma->vm_private_data);
234 reclaim_pages(vma);
226} 235}
227 236
228static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, 237static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma,
diff --git a/litmus/color_proc.c b/litmus/color_proc.c
index cac336ac1731..4cb6c9ac89bb 100644
--- a/litmus/color_proc.c
+++ b/litmus/color_proc.c
@@ -5,7 +5,6 @@
5#include <litmus/color.h> 5#include <litmus/color.h>
6 6
7extern int color_sysctl_add_pages_data; /* litmus/color.c */ 7extern int color_sysctl_add_pages_data; /* litmus/color.c */
8extern int color_sysctl_reclaim_pages_data; /* litmus/color.c */
9 8
10static int zero = 0; 9static int zero = 0;
11static int one = 1; 10static int one = 1;
@@ -30,15 +29,6 @@ static struct ctl_table color_table[] =
30 .extra1 = &zero, 29 .extra1 = &zero,
31 .extra2 = &one, 30 .extra2 = &one,
32 }, 31 },
33 {
34 .procname = "reclaim_pages",
35 .data = &color_sysctl_reclaim_pages_data,
36 .maxlen = sizeof(int),
37 .mode = 0644,
38 .proc_handler = color_reclaim_pages_handler,
39 .extra1 = &zero,
40 .extra2 = &one,
41 },
42 { } 32 { }
43}; 33};
44 34