diff options
author | ChengYang Fu <chengyangfu@gmail.com> | 2015-03-02 16:11:18 -0500 |
---|---|---|
committer | ChengYang Fu <chengyangfu@gmail.com> | 2015-03-02 16:11:18 -0500 |
commit | 6b091698a8c1575d96e6c4e3dd36252cfa7aabd1 (patch) | |
tree | 798cc1819088544bfe317206dda2f9866cf42286 | |
parent | d9f5d5edbda26349cf6bf4e7d371d6e91660fe0f (diff) |
-rw-r--r-- | litmus/bank_proc.c | 210 |
1 files changed, 123 insertions, 87 deletions
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index 07d572833b30..295c450bfbe2 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c | |||
@@ -1,3 +1,9 @@ | |||
1 | /* | ||
2 | * bank_proc.c -- Implementation of the page coloring for cache and bank partition. | ||
3 | * The file will keep a pool of colored pages. Users can require pages with | ||
4 | * specific color or bank number. | ||
5 | * Part of the code is modified from Jonathan Herman's code | ||
6 | */ | ||
1 | #include <linux/init.h> | 7 | #include <linux/init.h> |
2 | #include <linux/types.h> | 8 | #include <linux/types.h> |
3 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
@@ -14,16 +20,23 @@ | |||
14 | 20 | ||
15 | #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 | 21 | #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 |
16 | 22 | ||
17 | // This is Address Decoding for imx6-sabredsd board | 23 | // This Address Decoding is used in imx6-sabredsd platform |
18 | #define CACHE_MASK 0x0000f000 | 24 | #define CACHE_MASK 0x0000f000 |
19 | #define BANK_MASK 0x00007000 | 25 | #define BANK_MASK 0x00007000 |
20 | #define OFFSET_SHIFT 12 | 26 | #define OFFSET_SHIFT 12 |
21 | 27 | ||
22 | #define PAGES_PER_COLOR 1024 | 28 | #define PAGES_PER_COLOR 1024 |
23 | 29 | ||
30 | unsigned long used_cachecolor; | ||
31 | unsigned long curr_cachecolor; | ||
32 | |||
33 | |||
24 | unsigned long number_banks; | 34 | unsigned long number_banks; |
25 | unsigned long number_cachecolors; | 35 | unsigned long number_cachecolors; |
26 | 36 | ||
37 | /* | ||
38 | * Every page list should contain a lock, a list, and a number recording how many pages it store | ||
39 | */ | ||
27 | struct color_group { | 40 | struct color_group { |
28 | spinlock_t lock; | 41 | spinlock_t lock; |
29 | char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN]; | 42 | char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN]; |
@@ -31,6 +44,10 @@ struct color_group { | |||
31 | atomic_t nr_pages; | 44 | atomic_t nr_pages; |
32 | }; | 45 | }; |
33 | 46 | ||
47 | /* | ||
48 | * This is old code which is not used in current version | ||
49 | */ | ||
50 | /* | ||
34 | static struct alloced_pages { | 51 | static struct alloced_pages { |
35 | spinlock_t lock; | 52 | spinlock_t lock; |
36 | struct list_head list; | 53 | struct list_head list; |
@@ -41,6 +58,7 @@ struct alloced_page { | |||
41 | struct vm_area_struct *vma; | 58 | struct vm_area_struct *vma; |
42 | struct list_head list; | 59 | struct list_head list; |
43 | }; | 60 | }; |
61 | */ | ||
44 | 62 | ||
45 | static struct color_group *color_groups; | 63 | static struct color_group *color_groups; |
46 | static struct lock_class_key color_lock_keys[16]; | 64 | static struct lock_class_key color_lock_keys[16]; |
@@ -59,6 +77,9 @@ static inline unsigned long page_bank(struct page *page) | |||
59 | return ((page_to_phys(page)& BANK_MASK) >> PAGE_SHIFT); | 77 | return ((page_to_phys(page)& BANK_MASK) >> PAGE_SHIFT); |
60 | } | 78 | } |
61 | 79 | ||
80 | /* | ||
81 | * It is used to determine the smallest number of page lists. | ||
82 | */ | ||
62 | static unsigned long smallest_nr_pages(void) | 83 | static unsigned long smallest_nr_pages(void) |
63 | { | 84 | { |
64 | unsigned long i, min_pages = -1; | 85 | unsigned long i, min_pages = -1; |
@@ -70,8 +91,9 @@ static unsigned long smallest_nr_pages(void) | |||
70 | } | 91 | } |
71 | return min_pages; | 92 | return min_pages; |
72 | } | 93 | } |
94 | |||
73 | /* | 95 | /* |
74 | * Page's count should be one, it sould not be on any LRU list. | 96 | * Add a page to current pool. |
75 | */ | 97 | */ |
76 | void add_page_to_color_list(struct page *page) | 98 | void add_page_to_color_list(struct page *page) |
77 | { | 99 | { |
@@ -82,22 +104,26 @@ void add_page_to_color_list(struct page *page) | |||
82 | spin_lock(&cgroup->lock); | 104 | spin_lock(&cgroup->lock); |
83 | list_add_tail(&page->lru, &cgroup->list); | 105 | list_add_tail(&page->lru, &cgroup->list); |
84 | atomic_inc(&cgroup->nr_pages); | 106 | atomic_inc(&cgroup->nr_pages); |
85 | // SetPageLRU(page); | ||
86 | spin_unlock(&cgroup->lock); | 107 | spin_unlock(&cgroup->lock); |
87 | } | 108 | } |
88 | 109 | ||
110 | /* | ||
111 | * Replenish the page pool. | ||
112 | * If the newly allocate page is what we want, it will be pushed to the correct page list | ||
113 | * otherwise, it will be freed. | ||
114 | */ | ||
89 | static int do_add_pages(void) | 115 | static int do_add_pages(void) |
90 | { | 116 | { |
91 | //printk("LITMUS do add pages\n"); | 117 | printk("LITMUS do add pages\n"); |
92 | 118 | ||
93 | struct page *page, *page_tmp; | 119 | struct page *page, *page_tmp; |
94 | LIST_HEAD(free_later); | 120 | LIST_HEAD(free_later); |
95 | unsigned long color; | 121 | unsigned long color; |
96 | int ret = 0; | 122 | int ret = 0; |
97 | 123 | ||
124 | // until all the page lists contain enough pages | ||
98 | while (smallest_nr_pages() < PAGES_PER_COLOR) { | 125 | while (smallest_nr_pages() < PAGES_PER_COLOR) { |
99 | 126 | ||
100 | //page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE); | ||
101 | page = alloc_page(GFP_HIGHUSER_MOVABLE); | 127 | page = alloc_page(GFP_HIGHUSER_MOVABLE); |
102 | 128 | ||
103 | if (unlikely(!page)) { | 129 | if (unlikely(!page)) { |
@@ -107,70 +133,79 @@ static int do_add_pages(void) | |||
107 | } | 133 | } |
108 | color = page_color(page); | 134 | color = page_color(page); |
109 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { | 135 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { |
110 | // SetPageReserved(page); | ||
111 | add_page_to_color_list(page); | 136 | add_page_to_color_list(page); |
112 | } else | 137 | } else{ |
138 | // Pages here will be freed later | ||
113 | list_add_tail(&page->lru, &free_later); | 139 | list_add_tail(&page->lru, &free_later); |
140 | } | ||
114 | } | 141 | } |
142 | // Free the unwanted pages | ||
115 | list_for_each_entry_safe(page, page_tmp, &free_later, lru) { | 143 | list_for_each_entry_safe(page, page_tmp, &free_later, lru) { |
116 | list_del(&page->lru); | 144 | list_del(&page->lru); |
117 | __free_page(page); | 145 | __free_page(page); |
118 | } | 146 | } |
119 | /* setup the color queue stuff */ | ||
120 | // ret = setup_flusher_array(); | ||
121 | out: | 147 | out: |
122 | return ret; | 148 | return ret; |
123 | } | 149 | } |
124 | 150 | ||
125 | extern int l2_usable_sets; | 151 | /* |
152 | * Provide pages for replacement according cache color | ||
153 | * This should be the only implementation here | ||
154 | * This function should not be accessed by others directly. | ||
155 | * | ||
156 | */ | ||
157 | static struct page *new_alloc_page_color( unsigned long color) | ||
158 | { | ||
159 | printk("allocate new page color = %d\n", color); | ||
160 | struct color_group *cgroup; | ||
161 | struct page *rPage = NULL; | ||
162 | |||
163 | if( (color <0) || (color)>15) { | ||
164 | TRACE_CUR("Wrong color %lu\n", color); | ||
165 | printk(KERN_WARNING "Wrong color %lu\n", color); | ||
166 | goto out_unlock; | ||
167 | } | ||
168 | |||
169 | |||
170 | cgroup = &color_groups[color]; | ||
171 | spin_lock(&cgroup->lock); | ||
172 | if (unlikely(!atomic_read(&cgroup->nr_pages))) { | ||
173 | TRACE_CUR("No free %lu colored pages.\n", color); | ||
174 | printk(KERN_WARNING "no free %lu colored pages.\n", color); | ||
175 | goto out_unlock; | ||
176 | } | ||
177 | rPage = list_first_entry(&cgroup->list, struct page, lru); | ||
178 | BUG_ON(page_count(rPage) > 1); | ||
179 | get_page(rPage); | ||
180 | list_del(&rPage->lru); | ||
181 | atomic_dec(&cgroup->nr_pages); | ||
182 | // ClearPageLRU(rPage); | ||
183 | out_unlock: | ||
184 | spin_unlock(&cgroup->lock); | ||
185 | out: | ||
186 | do_add_pages(); | ||
187 | return rPage; | ||
188 | } | ||
189 | |||
126 | 190 | ||
127 | /* | 191 | /* |
128 | * provide pages for replacement | 192 | * provide pages for replacement according to |
129 | * node = 0 for Level A, B tasks in Cpu 0 | 193 | * node = 0 for Level A, B tasks in Cpu 0 |
130 | * node = 1 for Level A, B tasks in Cpu 1 | 194 | * node = 1 for Level A, B tasks in Cpu 1 |
131 | * node = 2 for Level A, B tasks in Cpu 2 | 195 | * node = 2 for Level A, B tasks in Cpu 2 |
132 | * node = 3 for Level A, B tasks in Cpu 3 | 196 | * node = 3 for Level A, B tasks in Cpu 3 |
133 | * node = 4 for Level C tasks | 197 | * node = 4 for Level C tasks |
134 | */ | 198 | */ |
135 | #if 1 | ||
136 | struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | 199 | struct page *new_alloc_page(struct page *page, unsigned long node, int **x) |
137 | { | 200 | { |
138 | //printk("allocate new page node = %d\n", node); | 201 | printk("allocate new page node = %d\n", node); |
139 | // return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); | 202 | // return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); |
140 | struct color_group *cgroup; | 203 | struct color_group *cgroup; |
141 | struct page *rPage = NULL; | 204 | struct page *rPage = NULL; |
142 | unsigned int color; | 205 | unsigned int color; |
143 | get_random_bytes(&color, sizeof(unsigned int)); | 206 | get_random_bytes(&color, sizeof(unsigned int)); |
144 | 207 | ||
145 | /* | 208 | // Decode the node to decide what color pages we should provide |
146 | if(node ==0){ | ||
147 | color = (color%2)*8+node; | ||
148 | }else if(node == 1){ | ||
149 | color = (color%2)*8+node; | ||
150 | }else if(node == 2){ | ||
151 | color = (color%2)*8+; | ||
152 | }else if(node == 3){ | ||
153 | color = color%2 + 6; | ||
154 | }else if(node == 4){ | ||
155 | color = color%8 + 8; | ||
156 | }else{ | ||
157 | goto out; | ||
158 | } | ||
159 | */ | ||
160 | switch(node ){ | ||
161 | case 0: | ||
162 | color = (color % l2_usable_sets); | ||
163 | break; | ||
164 | case 1: | ||
165 | case 2: | ||
166 | case 3: | ||
167 | case 4: | ||
168 | color = (color% (16-l2_usable_sets)) + l2_usable_sets; | ||
169 | break; | ||
170 | default: | ||
171 | goto out; | ||
172 | } | ||
173 | /* | ||
174 | switch(node ){ | 209 | switch(node ){ |
175 | case 0: | 210 | case 0: |
176 | case 1: | 211 | case 1: |
@@ -184,34 +219,22 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | |||
184 | color+=4; | 219 | color+=4; |
185 | break; | 220 | break; |
186 | default: | 221 | default: |
187 | goto out; | 222 | TRACE_CUR("Wrong color %lu\n", color); |
223 | printk(KERN_WARNING "Wrong color %lu\n", color); | ||
224 | return rPage; | ||
188 | } | 225 | } |
189 | */ | ||
190 | 226 | ||
191 | //printk("allocate new page color = %d\n", color); | 227 | |
192 | //TRACE("allocate new page color = %d\n", color); | 228 | printk("allocate new page color = %d\n", color); |
193 | 229 | ||
194 | cgroup = &color_groups[color]; | 230 | rPage = new_alloc_page_color(color); |
195 | spin_lock(&cgroup->lock); | 231 | return rPage; |
196 | if (unlikely(!atomic_read(&cgroup->nr_pages))) { | ||
197 | //TRACE_CUR("No free %lu colored pages.\n", color); | ||
198 | printk(KERN_WARNING "no free %lu colored pages.\n", color); | ||
199 | goto out_unlock; | ||
200 | } | ||
201 | rPage = list_first_entry(&cgroup->list, struct page, lru); | ||
202 | BUG_ON(page_count(rPage) > 1); | ||
203 | get_page(rPage); | ||
204 | list_del(&rPage->lru); | ||
205 | atomic_dec(&cgroup->nr_pages); | ||
206 | // ClearPageLRU(rPage); | ||
207 | out_unlock: | ||
208 | spin_unlock(&cgroup->lock); | ||
209 | out: | ||
210 | do_add_pages(); | ||
211 | return rPage; | ||
212 | } | 232 | } |
213 | #endif | ||
214 | 233 | ||
234 | /* | ||
235 | * Provide pages for replacement according to bank number. | ||
236 | * This is used in cache way partition | ||
237 | */ | ||
215 | struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x) | 238 | struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x) |
216 | { | 239 | { |
217 | printk("allocate new page bank = %d\n", banknr); | 240 | printk("allocate new page bank = %d\n", banknr); |
@@ -225,30 +248,43 @@ struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int | |||
225 | }else{ | 248 | }else{ |
226 | goto out; | 249 | goto out; |
227 | } | 250 | } |
251 | |||
252 | rPage = new_alloc_page_color(color); | ||
228 | 253 | ||
229 | cgroup = &color_groups[color]; | ||
230 | spin_lock(&cgroup->lock); | ||
231 | if (unlikely(!atomic_read(&cgroup->nr_pages))) { | ||
232 | TRACE_CUR("No free %lu colored pages.\n", color); | ||
233 | printk(KERN_WARNING "no free %lu colored pages.\n", color); | ||
234 | goto out_unlock; | ||
235 | } | ||
236 | rPage = list_first_entry(&cgroup->list, struct page, lru); | ||
237 | BUG_ON(page_count(rPage) > 1); | ||
238 | get_page(rPage); | ||
239 | list_del(&rPage->lru); | ||
240 | atomic_dec(&cgroup->nr_pages); | ||
241 | // ClearPageLRU(rPage); | ||
242 | out_unlock: | ||
243 | spin_unlock(&cgroup->lock); | ||
244 | out: | 254 | out: |
245 | do_add_pages(); | ||
246 | return rPage; | 255 | return rPage; |
256 | } | ||
247 | 257 | ||
248 | 258 | ||
259 | void set_number_of_colors(unsigned long colornr) | ||
260 | { | ||
261 | used_cachecolor = colornr ; | ||
262 | curr_cachecolor = 0; | ||
263 | } | ||
264 | |||
249 | 265 | ||
266 | /* | ||
267 | * Provide pages for replacement | ||
268 | * This is used to generate experiments | ||
269 | */ | ||
270 | struct page *new_alloc_page_predefined(struct page *page, int **x) | ||
271 | { | ||
272 | unsigned int color = curr_cachecolor; | ||
273 | |||
274 | printk("allocate new page color = %d\n", color); | ||
275 | struct color_group *cgroup; | ||
276 | struct page *rPage = NULL; | ||
277 | |||
278 | rPage = new_alloc_page_color(color); | ||
279 | color = (color + 1)% used_cachecolor; | ||
280 | out: | ||
281 | return rPage; | ||
250 | } | 282 | } |
251 | 283 | ||
284 | |||
285 | /* | ||
286 | * Initialize the numbers of banks and cache colors | ||
287 | */ | ||
252 | static int __init init_variables(void) | 288 | static int __init init_variables(void) |
253 | { | 289 | { |
254 | number_banks = 1+(BANK_MASK >> PAGE_SHIFT); | 290 | number_banks = 1+(BANK_MASK >> PAGE_SHIFT); |
@@ -256,7 +292,9 @@ static int __init init_variables(void) | |||
256 | } | 292 | } |
257 | 293 | ||
258 | 294 | ||
259 | 295 | /* | |
296 | * Initialize the page pool | ||
297 | */ | ||
260 | static int __init init_color_groups(void) | 298 | static int __init init_color_groups(void) |
261 | { | 299 | { |
262 | struct color_group *cgroup; | 300 | struct color_group *cgroup; |
@@ -275,22 +313,20 @@ static int __init init_color_groups(void) | |||
275 | atomic_set(&cgroup->nr_pages, 0); | 313 | atomic_set(&cgroup->nr_pages, 0); |
276 | INIT_LIST_HEAD(&cgroup->list); | 314 | INIT_LIST_HEAD(&cgroup->list); |
277 | spin_lock_init(&cgroup->lock); | 315 | spin_lock_init(&cgroup->lock); |
278 | // LOCKDEP_DYNAMIC_ALLOC(&cgroup->lock, &color_lock_keys[i], | ||
279 | // cgroup->_lock_name, "color%lu", i); | ||
280 | } | 316 | } |
281 | } | 317 | } |
282 | return err; | 318 | return err; |
283 | } | 319 | } |
284 | 320 | ||
285 | /* | 321 | /* |
286 | * Initialzie the this proc | 322 | * Initialzie this proc |
287 | */ | 323 | */ |
288 | static int __init litmus_color_init(void) | 324 | static int __init litmus_color_init(void) |
289 | { | 325 | { |
290 | int err=0; | 326 | int err=0; |
291 | 327 | ||
292 | INIT_LIST_HEAD(&alloced_pages.list); | 328 | //INIT_LIST_HEAD(&alloced_pages.list); |
293 | spin_lock_init(&alloced_pages.lock); | 329 | //spin_lock_init(&alloced_pages.lock); |
294 | init_variables(); | 330 | init_variables(); |
295 | printk("Cache number = %d , Cache mask = 0x%lx\n", number_cachecolors, CACHE_MASK); | 331 | printk("Cache number = %d , Cache mask = 0x%lx\n", number_cachecolors, CACHE_MASK); |
296 | printk("Bank number = %d , Bank mask = 0x%lx\n", number_banks, BANK_MASK); | 332 | printk("Bank number = %d , Bank mask = 0x%lx\n", number_banks, BANK_MASK); |