aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-05-01 16:47:16 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-05-01 16:47:16 -0400
commit8c20bdbf935c54784e634af2039a5897581e65e8 (patch)
tree296487b40226983df01028d48d10ac8e3fbf399a
parent643dae844fc39fa3dc9f45db98070cb29e99e3d5 (diff)
Add system calls for RTSS16
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl3
-rw-r--r--include/litmus/cache_proc.h1
-rw-r--r--include/litmus/unistd_32.h4
-rw-r--r--include/litmus/unistd_64.h19
-rw-r--r--litmus/cache_proc.c140
-rw-r--r--litmus/litmus.c95
9 files changed, 227 insertions, 42 deletions
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 3b7d36b921d3..5291b703ed3f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
19 * This may need to be greater than __NR_last_syscall+1 in order to 19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table 20 * account for the padding in the syscall table
21 */ 21 */
22#define __NR_syscalls (388 + NR_litmus_syscalls + 2) 22#define __NR_syscalls (388 + NR_litmus_syscalls + 0)
23 23
24 24
25/* 25/*
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 6c2ad82fd2c8..55dc86323c86 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -415,7 +415,8 @@
415 CALL(sys_set_mc2_task_param) 415 CALL(sys_set_mc2_task_param)
416 CALL(sys_set_page_color) 416 CALL(sys_set_page_color)
417/* 405 */ CALL(sys_test_call) 417/* 405 */ CALL(sys_test_call)
418 418 CALL(sys_run_test)
419 CALL(sys_lock_buffer)
419 420
420#ifndef syscalls_counted 421#ifndef syscalls_counted
421.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 422.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index 075709a93d34..b303a9b2183a 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -383,3 +383,5 @@
383374 i386 set_mc2_task_param sys_set_mc2_task_param 383374 i386 set_mc2_task_param sys_set_mc2_task_param
384375 i386 set_page_color sys_set_page_color 384375 i386 set_page_color sys_set_page_color
385376 i386 test_call sys_test_call 385376 i386 test_call sys_test_call
386377 i386 run_test sys_run_test
387378 i386 lock_buffer sys_lock_buffer
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index e8fdeecd3d21..5f24a80930cc 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -348,7 +348,8 @@
348366 common set_mc2_task_param sys_set_mc2_task_param 348366 common set_mc2_task_param sys_set_mc2_task_param
349367 common set_page_color sys_set_page_color 349367 common set_page_color sys_set_page_color
350368 common test_call sys_test_call 350368 common test_call sys_test_call
351 351369 common run_test sys_run_test
352370 common lock_buffer sys_lock_buffer
352 353
353# 354#
354# x32-specific system call numbers start at 512 to avoid cache impact 355# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/include/litmus/cache_proc.h b/include/litmus/cache_proc.h
index 962851da34cc..e9440de504fc 100644
--- a/include/litmus/cache_proc.h
+++ b/include/litmus/cache_proc.h
@@ -7,6 +7,7 @@ void litmus_setup_lockdown(void __iomem*, u32);
7void enter_irq_mode(void); 7void enter_irq_mode(void);
8void exit_irq_mode(void); 8void exit_irq_mode(void);
9void flush_cache(int all); 9void flush_cache(int all);
10void lock_cache(int cpu, u32 val);
10 11
11extern struct page *new_alloc_page_color(unsigned long color); 12extern struct page *new_alloc_page_color(unsigned long color);
12 13
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index 0000010f7d95..86bbbb8d33ea 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -23,5 +23,7 @@
23#define __NR_set_mc2_task_param __LSC(15) 23#define __NR_set_mc2_task_param __LSC(15)
24#define __NR_set_page_color __LSC(16) 24#define __NR_set_page_color __LSC(16)
25#define __NR_test_call __LSC(17) 25#define __NR_test_call __LSC(17)
26#define __NR_run_test __LSC(18)
27#define __NR_lock_buffer __LSC(19)
26 28
27#define NR_litmus_syscalls 18 29#define NR_litmus_syscalls 20
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index 1a741bd3e5da..4b96e7c259d1 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -30,8 +30,21 @@ __SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11) 30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call) 31__SYSCALL(__NR_null_call, sys_null_call)
32#define __NR_get_current_budget __LSC(12) 32#define __NR_get_current_budget __LSC(12)
33__SYSCALL(____NR_get_current_budget, sys_get_current_budget) 33__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
34#define __NR_test_call __LSC(13) 34#define __NR_reservation_create __LSC(13)
35__SYSCALL(__NR_reservation_create, sys_reservation_create)
36#define __NR_reservation_destroy __LSC(14)
37__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
38#define __NR_set_mc2_task_param __LSC(15)
39__SYSCALL(__NR_set_mc2_task_param, sys_set_mc2_task_param)
40#define __NR_set_page_color __LSC(16)
41__SYSCALL(__NR_set_page_color, sys_set_page_color)
42#define __NR_test_call __LSC(17)
35__SYSCALL(__NR_test_call, sys_test_call) 43__SYSCALL(__NR_test_call, sys_test_call)
44#define __NR_run_test __LSC(18)
45__SYSCALL(__NR_run_test, sys_run_test)
46#define __NR_lock_buffer __LSC(19)
47__SYACALL(__NR_lock_buffer, sys_lock_buffer)
36 48
37#define NR_litmus_syscalls 14 49
50#define NR_litmus_syscalls 20
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index 01350294afaf..397214f2b8cc 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -1,3 +1,5 @@
1#include <asm/uaccess.h>
2#include <linux/uaccess.h>
1#include <linux/init.h> 3#include <linux/init.h>
2#include <linux/types.h> 4#include <linux/types.h>
3#include <linux/kernel.h> 5#include <linux/kernel.h>
@@ -7,6 +9,7 @@
7#include <linux/io.h> 9#include <linux/io.h>
8#include <linux/mutex.h> 10#include <linux/mutex.h>
9#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/random.h>
10 13
11#include <litmus/litmus_proc.h> 14#include <litmus/litmus_proc.h>
12#include <litmus/sched_trace.h> 15#include <litmus/sched_trace.h>
@@ -21,6 +24,14 @@
21#define LOCK_ALL (~UNLOCK_ALL) 24#define LOCK_ALL (~UNLOCK_ALL)
22#define MAX_NR_WAYS 16 25#define MAX_NR_WAYS 16
23#define MAX_NR_COLORS 16 26#define MAX_NR_COLORS 16
27#define CACHELINE_SIZE 32
28#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
29#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
30
31typedef struct cacheline
32{
33 int line[INTS_IN_CACHELINE];
34} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
24 35
25void mem_lock(u32 lock_val, int cpu); 36void mem_lock(u32 lock_val, int cpu);
26 37
@@ -1121,6 +1132,135 @@ void flush_cache(int all)
1121 raw_spin_unlock_irqrestore(&cache_lock, flags); 1132 raw_spin_unlock_irqrestore(&cache_lock, flags);
1122} 1133}
1123 1134
1135/* src = shared, dst = local */
1136#if 0 // random
1137asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
1138{
1139 /* size is in KB */
1140 long ret = 0;
1141 lt_t t1, t2;
1142 int numlines = size * CACHELINES_IN_1KB;
1143 int next, sum = 0, ran;
1144 unsigned long flags;
1145
1146 get_random_bytes(&ran, sizeof(int));
1147 next = ran % ((size*1024)/sizeof(cacheline_t));
1148
1149 //preempt_disable();
1150 if (type == 1) {
1151 int i, j;
1152 color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
1153 color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
1154
1155 local_irq_save(flags);
1156 t1 = litmus_clock();
1157 for (i = 0; i < numlines; i++) {
1158 next = src[next].line[0];
1159 for (j = 1; j < INTS_IN_CACHELINE; j++) {
1160 dst[next].line[j] = src[next].line[j]; // read
1161 //src[next].line[j] = dst[next].line[j]; // write
1162 }
1163 }
1164 t2 = litmus_clock();
1165 local_irq_restore(flags);
1166 sum = next + (int)t2;
1167 t2 -= t1;
1168 ret = put_user(t2, ts);
1169 }
1170 else {
1171 int i, j;
1172 color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
1173 local_irq_save(flags);
1174 t1 = litmus_clock();
1175 for (i = 0; i < numlines; i++) {
1176 next = src[next].line[0];
1177 for (j = 1; j < INTS_IN_CACHELINE; j++) {
1178 dst[next].line[j] = src[next].line[j]; //read
1179 //src[next].line[j] = dst[next].line[j]; //write
1180 }
1181 }
1182 t2 = litmus_clock();
1183 local_irq_restore(flags);
1184 sum = next + (int)t2;
1185 t2 -= t1;
1186 ret = put_user(t2, ts);
1187 v7_flush_kern_dcache_area(src, size*1024);
1188 }
1189 //preempt_enable();
1190 flush_cache(1);
1191
1192 return ret;
1193}
1194#else
1195// sequential
1196asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
1197{
1198 /* size is in KB */
1199 long ret = 0;
1200 lt_t t1, t2;
1201 int numlines = size * CACHELINES_IN_1KB;
1202 int sum = 0;
1203 unsigned long flags;
1204
1205 //preempt_disable();
1206 if (type == 1) {
1207 int i, j;
1208 color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
1209 color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
1210
1211 local_irq_save(flags);
1212 t1 = litmus_clock();
1213 for (i = 0; i < numlines; i++) {
1214 for (j = 0; j < INTS_IN_CACHELINE; j++) {
1215 //dst[i].line[j] = src[i].line[j]; // read
1216 src[i].line[j] = dst[i].line[j]; // write
1217 }
1218 }
1219 t2 = litmus_clock();
1220 local_irq_restore(flags);
1221 sum = (int)(t1 + t2);
1222 t2 -= t1;
1223 ret = put_user(t2, ts);
1224 }
1225 else {
1226 int i, j;
1227 color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
1228 local_irq_save(flags);
1229 t1 = litmus_clock();
1230 for (i = 0; i < numlines; i++) {
1231 for (j = 0; j < INTS_IN_CACHELINE; j++) {
1232 //dst[i].line[j] = src[i].line[j]; //read
1233 src[i].line[j] = dst[i].line[j]; //write
1234 }
1235 }
1236 t2 = litmus_clock();
1237 local_irq_restore(flags);
1238 sum = (int)(t1 + t2);
1239 t2 -= t1;
1240 ret = put_user(t2, ts);
1241 v7_flush_kern_dcache_area(src, size*1024);
1242 }
1243 //preempt_enable();
1244 flush_cache(1);
1245
1246 return ret;
1247}
1248#endif
1249
1250asmlinkage long sys_lock_buffer(void* vaddr, size_t size, u32 lock_way, u32 unlock_way)
1251{
1252 /* size is in bytes */
1253 long ret = 0;
1254 int i;
1255 u32 lock_val, unlock_val;
1256
1257 lock_val = ~lock_way & 0x0000ffff;
1258 unlock_val = ~unlock_way & 0x0000ffff;
1259 color_read_in_mem_lock(lock_val, unlock_val, (void*)vaddr, (void*)vaddr + size);
1260
1261 return ret;
1262}
1263
1124#define TRIALS 1000 1264#define TRIALS 1000
1125 1265
1126static int perf_test(void) { 1266static int perf_test(void) {
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 0f122e1e7ec1..ddb80e1aae12 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -356,17 +356,26 @@ asmlinkage long sys_set_page_color(int cpu)
356 long ret = 0; 356 long ret = 0;
357 //struct page *page_itr = NULL; 357 //struct page *page_itr = NULL;
358 struct vm_area_struct *vma_itr = NULL; 358 struct vm_area_struct *vma_itr = NULL;
359 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0; 359 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0;
360 unsigned long node; 360 unsigned long node;
361 enum crit_level lv; 361 enum crit_level lv;
362 struct mm_struct *mm;
362 363
363 LIST_HEAD(pagelist); 364 LIST_HEAD(pagelist);
364 LIST_HEAD(shared_pagelist); 365 LIST_HEAD(shared_pagelist);
365 366
367 migrate_prep();
366 368
367 down_read(&current->mm->mmap_sem); 369 rcu_read_lock();
370 get_task_struct(current);
371 rcu_read_unlock();
372 mm = get_task_mm(current);
373 put_task_struct(current);
374
375 //down_read(&current->mm->mmap_sem);
376 down_read(&mm->mmap_sem);
368 TRACE_TASK(current, "SYSCALL set_page_color\n"); 377 TRACE_TASK(current, "SYSCALL set_page_color\n");
369 vma_itr = current->mm->mmap; 378 vma_itr = mm->mmap;
370 while (vma_itr != NULL) { 379 while (vma_itr != NULL) {
371 unsigned int num_pages = 0, i; 380 unsigned int num_pages = 0, i;
372 struct page *old_page = NULL; 381 struct page *old_page = NULL;
@@ -376,7 +385,6 @@ asmlinkage long sys_set_page_color(int cpu)
376 //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); 385 //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags);
377 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); 386 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE);
378 //printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot); 387 //printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot);
379
380 for (i = 0; i < num_pages; i++) { 388 for (i = 0; i < num_pages; i++) {
381 old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); 389 old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
382 390
@@ -393,7 +401,7 @@ asmlinkage long sys_set_page_color(int cpu)
393 401
394 TRACE_TASK(current, "addr: %08x, pfn: %x, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); 402 TRACE_TASK(current, "addr: %08x, pfn: %x, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page));
395 403
396 if (page_mapcount(old_page) == 1) { 404 //if (page_mapcount(old_page) == 1) {
397 ret = isolate_lru_page(old_page); 405 ret = isolate_lru_page(old_page);
398 if (!ret) { 406 if (!ret) {
399 list_add_tail(&old_page->lru, &pagelist); 407 list_add_tail(&old_page->lru, &pagelist);
@@ -406,12 +414,14 @@ asmlinkage long sys_set_page_color(int cpu)
406 } 414 }
407 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); 415 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
408 put_page(old_page); 416 put_page(old_page);
409 } 417 //}
418 /*
410 else { 419 else {
411 nr_shared_pages++; 420 nr_shared_pages++;
412 //printk(KERN_INFO "SHARED _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); 421 //printk(KERN_INFO "SHARED _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
413 put_page(old_page); 422 put_page(old_page);
414 } 423 }
424 */
415 } 425 }
416 426
417 vma_itr = vma_itr->vm_next; 427 vma_itr = vma_itr->vm_next;
@@ -432,11 +442,12 @@ asmlinkage long sys_set_page_color(int cpu)
432 node = 8; 442 node = 8;
433 else 443 else
434 node = cpu*2 + lv; 444 node = cpu*2 + lv;
435 //node= 0;
436 445
437 if (!list_empty(&pagelist)) { 446 if (!list_empty(&pagelist)) {
438 ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_ASYNC, MR_SYSCALL); 447 ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
439 TRACE_TASK(current, "%ld pages not migrated.\n", ret); 448 TRACE_TASK(current, "%ld pages not migrated.\n", ret);
449 printk(KERN_INFO "%ld pages not migrated.\n", ret);
450 nr_not_migrated = ret;
440 if (ret) { 451 if (ret) {
441 putback_movable_pages(&pagelist); 452 putback_movable_pages(&pagelist);
442 } 453 }
@@ -453,7 +464,7 @@ asmlinkage long sys_set_page_color(int cpu)
453 vma_itr = vma_itr->vm_next; 464 vma_itr = vma_itr->vm_next;
454 } 465 }
455*/ 466*/
456 up_read(&current->mm->mmap_sem); 467 up_read(&mm->mmap_sem);
457 468
458/* 469/*
459 list_for_each_entry(page_itr, &shared_pagelist, lru) { 470 list_for_each_entry(page_itr, &shared_pagelist, lru) {
@@ -461,7 +472,7 @@ asmlinkage long sys_set_page_color(int cpu)
461 } 472 }
462*/ 473*/
463 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); 474 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed);
464 printk(KERN_INFO "node = %ld, nr_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages, nr_shared_pages, nr_failed); 475 printk(KERN_INFO "node = %ld, nr_migrated_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_failed-2, nr_failed);
465 //printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id())); 476 //printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id()));
466 flush_cache(1); 477 flush_cache(1);
467 478
@@ -477,34 +488,48 @@ asmlinkage long sys_test_call(unsigned int param)
477 488
478 TRACE_CUR("test_call param = %d\n", param); 489 TRACE_CUR("test_call param = %d\n", param);
479 490
480 down_read(&current->mm->mmap_sem); 491 if (param == 0) {
481 vma_itr = current->mm->mmap; 492 down_read(&current->mm->mmap_sem);
482 while (vma_itr != NULL) { 493 vma_itr = current->mm->mmap;
483 printk(KERN_INFO "--------------------------------------------\n"); 494 while (vma_itr != NULL) {
484 printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start); 495 printk(KERN_INFO "--------------------------------------------\n");
485 printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end); 496 printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start);
486 printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); 497 printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end);
487 printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); 498 printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags);
488 printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); 499 printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot));
489/* if (vma_itr->vm_file) { 500 printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
490 struct file *fp = vma_itr->vm_file; 501 /* if (vma_itr->vm_file) {
491 unsigned long fcount = atomic_long_read(&(fp->f_count)); 502 struct file *fp = vma_itr->vm_file;
492 printk(KERN_INFO "f_count : %ld\n", fcount); 503 unsigned long fcount = atomic_long_read(&(fp->f_count));
493 if (fcount > 1) { 504 printk(KERN_INFO "f_count : %ld\n", fcount);
494 vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot); 505 if (fcount > 1) {
506 vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot);
507 }
495 } 508 }
509 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot));
510 */
511 vma_itr = vma_itr->vm_next;
512 }
513 printk(KERN_INFO "--------------------------------------------\n");
514 up_read(&current->mm->mmap_sem);
515
516 local_irq_save(flags);
517 l2c310_flush_all();
518 local_irq_restore(flags);
519 }
520 else if (param == 1) {
521 int i;
522 flush_cache(1);
523 for (i = 0; i < 4; i++) {
524 lock_cache(i, 0x00003fff);
525 }
526 }
527 else if (param == 2) {
528 int i;
529 for (i = 0; i < 4; i++) {
530 lock_cache(i, 0xffffffff);
496 } 531 }
497 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot));
498*/
499 vma_itr = vma_itr->vm_next;
500 } 532 }
501 printk(KERN_INFO "--------------------------------------------\n");
502 up_read(&current->mm->mmap_sem);
503
504 local_irq_save(flags);
505 l2c310_flush_all();
506 local_irq_restore(flags);
507
508 return ret; 533 return ret;
509} 534}
510 535