aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChengYang Fu <chengyangfu@gmail.com>2015-01-20 11:08:04 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-01-28 09:37:37 -0500
commitf9b8ce9e2c06fe8ecd3141837da910675af238c3 (patch)
tree33a1031c9a8b5317f13db171a426bd4f580d1ec8
parent5ba38eb6290a0c1767932c03b15edb0627ffd6b2 (diff)
add bank_proc.c for cache and bank coloring
Conflicts: litmus/litmus.c
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/bank_proc.c254
-rw-r--r--litmus/cache_proc.c2
-rw-r--r--litmus/litmus.c8
4 files changed, 262 insertions, 3 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 997524f91363..713a14f7c4fc 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -20,6 +20,7 @@ obj-y = sched_plugin.o litmus.o \
20 ctrldev.o \ 20 ctrldev.o \
21 uncachedev.o \ 21 uncachedev.o \
22 cache_proc.o \ 22 cache_proc.o \
23 bank_proc.o \
23 sched_gsn_edf.o \ 24 sched_gsn_edf.o \
24 sched_psn_edf.o \ 25 sched_psn_edf.o \
25 sched_pfp.o 26 sched_pfp.o
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
new file mode 100644
index 000000000000..2c69657b53bd
--- /dev/null
+++ b/litmus/bank_proc.c
@@ -0,0 +1,254 @@
1#include <linux/init.h>
2#include <linux/types.h>
3#include <linux/kernel.h>
4#include <linux/module.h>
5#include <linux/sysctl.h>
6#include <linux/slab.h>
7#include <linux/io.h>
8#include <linux/mutex.h>
9#include <linux/mm.h>
10#include <linux/random.h>
11
12#include <litmus/litmus_proc.h>
13#include <litmus/sched_trace.h>
14
15#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
16
17// This is Address Decoding for imx6-sabredsd board
18#define CACHE_MASK 0x0000f000
19#define BANK_MASK 0x00007000
20#define OFFSET_SHIFT 12
21
22#define PAGES_PER_COLOR 1024
23
24unsigned long number_banks;
25unsigned long number_cachecolors;
26
27struct color_group {
28 spinlock_t lock;
29 char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
30 struct list_head list;
31 atomic_t nr_pages;
32};
33
34static struct alloced_pages {
35 spinlock_t lock;
36 struct list_head list;
37} alloced_pages;
38
39struct alloced_page {
40 struct page *page;
41 struct vm_area_struct *vma;
42 struct list_head list;
43};
44
45static struct color_group *color_groups;
46static struct lock_class_key color_lock_keys[16];
47
48//static struct color_group *color_groups;
49
50/* Decoding page color, 0~15 */
51static inline unsigned long page_color(struct page *page)
52{
53 return ((page_to_phys(page)& CACHE_MASK) >> PAGE_SHIFT);
54}
55
56/* Decoding page bank number, 0~7 */
57static inline unsigned long page_bank(struct page *page)
58{
59 return ((page_to_phys(page)& BANK_MASK) >> PAGE_SHIFT);
60}
61
62static unsigned long smallest_nr_pages(void)
63{
64 unsigned long i, min_pages = -1;
65 struct color_group *cgroup;
66 for (i = 0; i < number_cachecolors; ++i) {
67 cgroup = &color_groups[i];
68 if (atomic_read(&cgroup->nr_pages) < min_pages)
69 min_pages = atomic_read(&cgroup->nr_pages);
70 }
71 return min_pages;
72}
73/*
74 * Page's count should be one, it sould not be on any LRU list.
75 */
76void add_page_to_color_list(struct page *page)
77{
78 const unsigned long color = page_color(page);
79 struct color_group *cgroup = &color_groups[color];
80 BUG_ON(in_list(&page->lru) || PageLRU(page));
81 BUG_ON(page_count(page) > 1);
82 spin_lock(&cgroup->lock);
83 list_add_tail(&page->lru, &cgroup->list);
84 atomic_inc(&cgroup->nr_pages);
85// SetPageLRU(page);
86 spin_unlock(&cgroup->lock);
87}
88
89static int do_add_pages(void)
90{
91 printk("LITMUS do add pages\n");
92
93 struct page *page, *page_tmp;
94 LIST_HEAD(free_later);
95 unsigned long color;
96 int ret = 0;
97
98 while (smallest_nr_pages() < PAGES_PER_COLOR) {
99
100 //page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE);
101 page = alloc_page(GFP_HIGHUSER_MOVABLE);
102
103 if (unlikely(!page)) {
104 printk(KERN_WARNING "Could not allocate pages.\n");
105 ret = -ENOMEM;
106 goto out;
107 }
108 color = page_color(page);
109 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
110 // SetPageReserved(page);
111 add_page_to_color_list(page);
112 } else
113 list_add_tail(&page->lru, &free_later);
114 }
115 list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
116 list_del(&page->lru);
117 __free_page(page);
118 }
119 /* setup the color queue stuff */
120// ret = setup_flusher_array();
121out:
122 return ret;
123}
124
125
126/*
127 * provide pages for replacement
128 * node = 0 for Level A, B tasks in Cpu 0
129 * node = 1 for Level A, B tasks in Cpu 1
130 * node = 2 for Level A, B tasks in Cpu 2
131 * node = 3 for Level A, B tasks in Cpu 3
132 * node = 4 for Level C tasks
133 */
134 #if 1
135struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
136{
137 printk("allocate new page node = %d\n", node);
138// return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
139 struct color_group *cgroup;
140 struct page *rPage = NULL;
141 unsigned int color;
142 get_random_bytes(&color, sizeof(unsigned int));
143
144 /*
145 if(node ==0){
146 color = (color%2)*8+node;
147 }else if(node == 1){
148 color = (color%2)*8+node;
149 }else if(node == 2){
150 color = (color%2)*8+;
151 }else if(node == 3){
152 color = color%2 + 6;
153 }else if(node == 4){
154 color = color%8 + 8;
155 }else{
156 goto out;
157 }
158 */
159
160 switch(node ){
161 case 0:
162 case 1:
163 case 2:
164 case 3:
165 color = (color%2) * 8 + node;
166 break;
167 case 4:
168 color = (color%8)+4;
169 if(color >=8)
170 color+=4;
171 break;
172 default:
173 goto out;
174 }
175
176
177 printk("allocate new page color = %d\n", color);
178
179 cgroup = &color_groups[color];
180 spin_lock(&cgroup->lock);
181 if (unlikely(!atomic_read(&cgroup->nr_pages))) {
182 TRACE_CUR("No free %lu colored pages.\n", color);
183 printk(KERN_WARNING "no free %lu colored pages.\n", color);
184 goto out_unlock;
185 }
186 rPage = list_first_entry(&cgroup->list, struct page, lru);
187 BUG_ON(page_count(rPage) > 1);
188 get_page(rPage);
189 list_del(&rPage->lru);
190 atomic_dec(&cgroup->nr_pages);
191// ClearPageLRU(rPage);
192out_unlock:
193 spin_unlock(&cgroup->lock);
194out:
195 do_add_pages();
196 return rPage;
197 //return alloc_page(GFP_HIGHUSER_MOVABLE);
198}
199#endif
200
201static int __init init_variables(void)
202{
203 number_banks = 1+(BANK_MASK >> PAGE_SHIFT);
204 number_cachecolors = 1+(CACHE_MASK >> PAGE_SHIFT);
205}
206
207
208
209static int __init init_color_groups(void)
210{
211 struct color_group *cgroup;
212 unsigned long i;
213 int err = 0;
214
215 color_groups = kmalloc(number_cachecolors *
216 sizeof(struct color_group), GFP_KERNEL);
217 if (!color_groups) {
218 printk(KERN_WARNING "Could not allocate color groups.\n");
219 err = -ENOMEM;
220 }else{
221
222 for (i = 0; i < number_cachecolors; ++i) {
223 cgroup = &color_groups[i];
224 atomic_set(&cgroup->nr_pages, 0);
225 INIT_LIST_HEAD(&cgroup->list);
226 spin_lock_init(&cgroup->lock);
227// LOCKDEP_DYNAMIC_ALLOC(&cgroup->lock, &color_lock_keys[i],
228// cgroup->_lock_name, "color%lu", i);
229 }
230 }
231 return err;
232}
233
234/*
235 * Initialzie the this proc
236 */
237static int __init litmus_color_init(void)
238{
239 int err=0;
240
241 INIT_LIST_HEAD(&alloced_pages.list);
242 spin_lock_init(&alloced_pages.lock);
243 init_variables();
244 printk("Cache number = %d , Cache mask = 0x%lx\n", number_cachecolors, CACHE_MASK);
245 printk("Bank number = %d , Bank mask = 0x%lx\n", number_banks, BANK_MASK);
246 init_color_groups();
247 do_add_pages();
248
249 printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
250 return err;
251}
252
253module_init(litmus_color_init);
254
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index 4425bfb8bbd4..cc818b9f1fc4 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -242,4 +242,4 @@ out:
242 return ret; 242 return ret;
243} 243}
244 244
245module_init(litmus_sysctl_init); \ No newline at end of file 245module_init(litmus_sysctl_init);
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 6034ff8731af..dcb9ed58962c 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -392,11 +392,14 @@ static struct page *walk_page_table(unsigned long addr)
392extern int isolate_lru_page(struct page *page); 392extern int isolate_lru_page(struct page *page);
393extern void putback_lru_page(struct page *page); 393extern void putback_lru_page(struct page *page);
394 394
395extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
396
397#if 0
395static struct page *new_alloc_page(struct page *page, unsigned long node, int **x) 398static struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
396{ 399{
397 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 400 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
398} 401}
399 402#endif
400asmlinkage long sys_set_page_color(int cpu) 403asmlinkage long sys_set_page_color(int cpu)
401{ 404{
402 long ret = 0; 405 long ret = 0;
@@ -442,6 +445,7 @@ asmlinkage long sys_set_page_color(int cpu)
442 continue; 445 continue;
443 if (!old_page) 446 if (!old_page)
444 continue; 447 continue;
448
445 if (PageReserved(old_page)) { 449 if (PageReserved(old_page)) {
446 put_page(old_page); 450 put_page(old_page);
447 continue; 451 continue;
@@ -482,7 +486,7 @@ asmlinkage long sys_set_page_color(int cpu)
482 486
483 ret = 0; 487 ret = 0;
484 if (!list_empty(&pagelist)) { 488 if (!list_empty(&pagelist)) {
485 ret = migrate_pages(&pagelist, new_alloc_page, 0, MIGRATE_ASYNC, MR_SYSCALL); 489 ret = migrate_pages(&pagelist, new_alloc_page, 4, MIGRATE_ASYNC, MR_SYSCALL);
486 if (ret) { 490 if (ret) {
487 printk(KERN_INFO "%ld pages not migrated.\n", ret); 491 printk(KERN_INFO "%ld pages not migrated.\n", ret);
488 putback_lru_pages(&pagelist); 492 putback_lru_pages(&pagelist);