aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/tlb.c
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-10-16 04:27:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:05 -0400
commitba180fd437156f7fd8cfb2fdd021d949eeef08d6 (patch)
treeb9f38b9cdd7a5b1aacf00341d1948314663c5871 /arch/um/kernel/tlb.c
parent77bf4400319db9d2a8af6b00c2be6faa0f3d07cb (diff)
uml: style fixes pass 3
Formatting changes in the files which have been changed in the course of folding foo_skas functions into their callers. These include: copyright updates header file trimming style fixes adding severity to printks These changes should be entirely non-functional. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r--arch/um/kernel/tlb.c158
1 files changed, 79 insertions, 79 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 12b8c637527d..849922fcfb60 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -1,19 +1,16 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/mm.h" 6#include "linux/mm.h"
7#include "asm/page.h"
8#include "asm/pgalloc.h"
9#include "asm/pgtable.h" 7#include "asm/pgtable.h"
10#include "asm/tlbflush.h" 8#include "asm/tlbflush.h"
11#include "as-layout.h" 9#include "as-layout.h"
12#include "tlb.h"
13#include "mem.h"
14#include "mem_user.h" 10#include "mem_user.h"
15#include "os.h" 11#include "os.h"
16#include "skas.h" 12#include "skas.h"
13#include "tlb.h"
17 14
18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 unsigned int prot, struct host_vm_op *ops, int *index, 16 unsigned int prot, struct host_vm_op *ops, int *index,
@@ -26,18 +23,18 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
26 int fd, ret = 0; 23 int fd, ret = 0;
27 24
28 fd = phys_mapping(phys, &offset); 25 fd = phys_mapping(phys, &offset);
29 if(*index != -1){ 26 if (*index != -1) {
30 last = &ops[*index]; 27 last = &ops[*index];
31 if((last->type == MMAP) && 28 if ((last->type == MMAP) &&
32 (last->u.mmap.addr + last->u.mmap.len == virt) && 29 (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && 30 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
34 (last->u.mmap.offset + last->u.mmap.len == offset)){ 31 (last->u.mmap.offset + last->u.mmap.len == offset)) {
35 last->u.mmap.len += len; 32 last->u.mmap.len += len;
36 return 0; 33 return 0;
37 } 34 }
38 } 35 }
39 36
40 if(*index == last_filled){ 37 if (*index == last_filled) {
41 ret = (*do_ops)(mmu, ops, last_filled, 0, flush); 38 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
42 *index = -1; 39 *index = -1;
43 } 40 }
@@ -62,16 +59,16 @@ static int add_munmap(unsigned long addr, unsigned long len,
62 struct host_vm_op *last; 59 struct host_vm_op *last;
63 int ret = 0; 60 int ret = 0;
64 61
65 if(*index != -1){ 62 if (*index != -1) {
66 last = &ops[*index]; 63 last = &ops[*index];
67 if((last->type == MUNMAP) && 64 if ((last->type == MUNMAP) &&
68 (last->u.munmap.addr + last->u.mmap.len == addr)){ 65 (last->u.munmap.addr + last->u.mmap.len == addr)) {
69 last->u.munmap.len += len; 66 last->u.munmap.len += len;
70 return 0; 67 return 0;
71 } 68 }
72 } 69 }
73 70
74 if(*index == last_filled){ 71 if (*index == last_filled) {
75 ret = (*do_ops)(mmu, ops, last_filled, 0, flush); 72 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
76 *index = -1; 73 *index = -1;
77 } 74 }
@@ -92,17 +89,17 @@ static int add_mprotect(unsigned long addr, unsigned long len,
92 struct host_vm_op *last; 89 struct host_vm_op *last;
93 int ret = 0; 90 int ret = 0;
94 91
95 if(*index != -1){ 92 if (*index != -1) {
96 last = &ops[*index]; 93 last = &ops[*index];
97 if((last->type == MPROTECT) && 94 if ((last->type == MPROTECT) &&
98 (last->u.mprotect.addr + last->u.mprotect.len == addr) && 95 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
99 (last->u.mprotect.prot == prot)){ 96 (last->u.mprotect.prot == prot)) {
100 last->u.mprotect.len += len; 97 last->u.mprotect.len += len;
101 return 0; 98 return 0;
102 } 99 }
103 } 100 }
104 101
105 if(*index == last_filled){ 102 if (*index == last_filled) {
106 ret = (*do_ops)(mmu, ops, last_filled, 0, flush); 103 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
107 *index = -1; 104 *index = -1;
108 } 105 }
@@ -141,15 +138,15 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
141 } 138 }
142 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 139 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
143 (x ? UM_PROT_EXEC : 0)); 140 (x ? UM_PROT_EXEC : 0));
144 if(force || pte_newpage(*pte)){ 141 if (force || pte_newpage(*pte)) {
145 if(pte_present(*pte)) 142 if (pte_present(*pte))
146 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, 143 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
147 PAGE_SIZE, prot, ops, op_index, 144 PAGE_SIZE, prot, ops, op_index,
148 last_op, mmu, flush, do_ops); 145 last_op, mmu, flush, do_ops);
149 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, 146 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
150 last_op, mmu, flush, do_ops); 147 last_op, mmu, flush, do_ops);
151 } 148 }
152 else if(pte_newprot(*pte)) 149 else if (pte_newprot(*pte))
153 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, 150 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
154 last_op, mmu, flush, do_ops); 151 last_op, mmu, flush, do_ops);
155 *pte = pte_mkuptodate(*pte); 152 *pte = pte_mkuptodate(*pte);
@@ -172,8 +169,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
172 pmd = pmd_offset(pud, addr); 169 pmd = pmd_offset(pud, addr);
173 do { 170 do {
174 next = pmd_addr_end(addr, end); 171 next = pmd_addr_end(addr, end);
175 if(!pmd_present(*pmd)){ 172 if (!pmd_present(*pmd)) {
176 if(force || pmd_newpage(*pmd)){ 173 if (force || pmd_newpage(*pmd)) {
177 ret = add_munmap(addr, next - addr, ops, 174 ret = add_munmap(addr, next - addr, ops,
178 op_index, last_op, mmu, 175 op_index, last_op, mmu,
179 flush, do_ops); 176 flush, do_ops);
@@ -202,8 +199,8 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
202 pud = pud_offset(pgd, addr); 199 pud = pud_offset(pgd, addr);
203 do { 200 do {
204 next = pud_addr_end(addr, end); 201 next = pud_addr_end(addr, end);
205 if(!pud_present(*pud)){ 202 if (!pud_present(*pud)) {
206 if(force || pud_newpage(*pud)){ 203 if (force || pud_newpage(*pud)) {
207 ret = add_munmap(addr, next - addr, ops, 204 ret = add_munmap(addr, next - addr, ops,
208 op_index, last_op, mmu, 205 op_index, last_op, mmu,
209 flush, do_ops); 206 flush, do_ops);
@@ -233,8 +230,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
233 pgd = pgd_offset(mm, addr); 230 pgd = pgd_offset(mm, addr);
234 do { 231 do {
235 next = pgd_addr_end(addr, end_addr); 232 next = pgd_addr_end(addr, end_addr);
236 if(!pgd_present(*pgd)){ 233 if (!pgd_present(*pgd)) {
237 if (force || pgd_newpage(*pgd)){ 234 if (force || pgd_newpage(*pgd)) {
238 ret = add_munmap(addr, next - addr, ops, 235 ret = add_munmap(addr, next - addr, ops,
239 &op_index, last_op, mmu, 236 &op_index, last_op, mmu,
240 &flush, do_ops); 237 &flush, do_ops);
@@ -246,12 +243,13 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
246 do_ops); 243 do_ops);
247 } while (pgd++, addr = next, ((addr != end_addr) && !ret)); 244 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
248 245
249 if(!ret) 246 if (!ret)
250 ret = (*do_ops)(mmu, ops, op_index, 1, &flush); 247 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
251 248
252 /* This is not an else because ret is modified above */ 249 /* This is not an else because ret is modified above */
253 if(ret) { 250 if (ret) {
254 printk("fix_range_common: failed, killing current process\n"); 251 printk(KERN_ERR "fix_range_common: failed, killing current "
252 "process\n");
255 force_sig(SIGKILL, current); 253 force_sig(SIGKILL, current);
256 } 254 }
257} 255}
@@ -267,17 +265,17 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
267 int updated = 0, err; 265 int updated = 0, err;
268 266
269 mm = &init_mm; 267 mm = &init_mm;
270 for(addr = start; addr < end;){ 268 for (addr = start; addr < end;) {
271 pgd = pgd_offset(mm, addr); 269 pgd = pgd_offset(mm, addr);
272 if(!pgd_present(*pgd)){ 270 if (!pgd_present(*pgd)) {
273 last = ADD_ROUND(addr, PGDIR_SIZE); 271 last = ADD_ROUND(addr, PGDIR_SIZE);
274 if(last > end) 272 if (last > end)
275 last = end; 273 last = end;
276 if(pgd_newpage(*pgd)){ 274 if (pgd_newpage(*pgd)) {
277 updated = 1; 275 updated = 1;
278 err = os_unmap_memory((void *) addr, 276 err = os_unmap_memory((void *) addr,
279 last - addr); 277 last - addr);
280 if(err < 0) 278 if (err < 0)
281 panic("munmap failed, errno = %d\n", 279 panic("munmap failed, errno = %d\n",
282 -err); 280 -err);
283 } 281 }
@@ -286,15 +284,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
286 } 284 }
287 285
288 pud = pud_offset(pgd, addr); 286 pud = pud_offset(pgd, addr);
289 if(!pud_present(*pud)){ 287 if (!pud_present(*pud)) {
290 last = ADD_ROUND(addr, PUD_SIZE); 288 last = ADD_ROUND(addr, PUD_SIZE);
291 if(last > end) 289 if (last > end)
292 last = end; 290 last = end;
293 if(pud_newpage(*pud)){ 291 if (pud_newpage(*pud)) {
294 updated = 1; 292 updated = 1;
295 err = os_unmap_memory((void *) addr, 293 err = os_unmap_memory((void *) addr,
296 last - addr); 294 last - addr);
297 if(err < 0) 295 if (err < 0)
298 panic("munmap failed, errno = %d\n", 296 panic("munmap failed, errno = %d\n",
299 -err); 297 -err);
300 } 298 }
@@ -303,15 +301,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
303 } 301 }
304 302
305 pmd = pmd_offset(pud, addr); 303 pmd = pmd_offset(pud, addr);
306 if(!pmd_present(*pmd)){ 304 if (!pmd_present(*pmd)) {
307 last = ADD_ROUND(addr, PMD_SIZE); 305 last = ADD_ROUND(addr, PMD_SIZE);
308 if(last > end) 306 if (last > end)
309 last = end; 307 last = end;
310 if(pmd_newpage(*pmd)){ 308 if (pmd_newpage(*pmd)) {
311 updated = 1; 309 updated = 1;
312 err = os_unmap_memory((void *) addr, 310 err = os_unmap_memory((void *) addr,
313 last - addr); 311 last - addr);
314 if(err < 0) 312 if (err < 0)
315 panic("munmap failed, errno = %d\n", 313 panic("munmap failed, errno = %d\n",
316 -err); 314 -err);
317 } 315 }
@@ -320,25 +318,25 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
320 } 318 }
321 319
322 pte = pte_offset_kernel(pmd, addr); 320 pte = pte_offset_kernel(pmd, addr);
323 if(!pte_present(*pte) || pte_newpage(*pte)){ 321 if (!pte_present(*pte) || pte_newpage(*pte)) {
324 updated = 1; 322 updated = 1;
325 err = os_unmap_memory((void *) addr, 323 err = os_unmap_memory((void *) addr,
326 PAGE_SIZE); 324 PAGE_SIZE);
327 if(err < 0) 325 if (err < 0)
328 panic("munmap failed, errno = %d\n", 326 panic("munmap failed, errno = %d\n",
329 -err); 327 -err);
330 if(pte_present(*pte)) 328 if (pte_present(*pte))
331 map_memory(addr, 329 map_memory(addr,
332 pte_val(*pte) & PAGE_MASK, 330 pte_val(*pte) & PAGE_MASK,
333 PAGE_SIZE, 1, 1, 1); 331 PAGE_SIZE, 1, 1, 1);
334 } 332 }
335 else if(pte_newprot(*pte)){ 333 else if (pte_newprot(*pte)) {
336 updated = 1; 334 updated = 1;
337 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); 335 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
338 } 336 }
339 addr += PAGE_SIZE; 337 addr += PAGE_SIZE;
340 } 338 }
341 return(updated); 339 return updated;
342} 340}
343 341
344void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) 342void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
@@ -354,15 +352,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
354 352
355 address &= PAGE_MASK; 353 address &= PAGE_MASK;
356 pgd = pgd_offset(mm, address); 354 pgd = pgd_offset(mm, address);
357 if(!pgd_present(*pgd)) 355 if (!pgd_present(*pgd))
358 goto kill; 356 goto kill;
359 357
360 pud = pud_offset(pgd, address); 358 pud = pud_offset(pgd, address);
361 if(!pud_present(*pud)) 359 if (!pud_present(*pud))
362 goto kill; 360 goto kill;
363 361
364 pmd = pmd_offset(pud, address); 362 pmd = pmd_offset(pud, address);
365 if(!pmd_present(*pmd)) 363 if (!pmd_present(*pmd))
366 goto kill; 364 goto kill;
367 365
368 pte = pte_offset_kernel(pmd, address); 366 pte = pte_offset_kernel(pmd, address);
@@ -380,8 +378,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
380 mm_id = &mm->context.skas.id; 378 mm_id = &mm->context.skas.id;
381 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
382 (x ? UM_PROT_EXEC : 0)); 380 (x ? UM_PROT_EXEC : 0));
383 if(pte_newpage(*pte)){ 381 if (pte_newpage(*pte)) {
384 if(pte_present(*pte)){ 382 if (pte_present(*pte)) {
385 unsigned long long offset; 383 unsigned long long offset;
386 int fd; 384 int fd;
387 385
@@ -391,10 +389,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
391 } 389 }
392 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); 390 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
393 } 391 }
394 else if(pte_newprot(*pte)) 392 else if (pte_newprot(*pte))
395 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush); 393 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
396 394
397 if(err) 395 if (err)
398 goto kill; 396 goto kill;
399 397
400 *pte = pte_mkuptodate(*pte); 398 *pte = pte_mkuptodate(*pte);
@@ -402,28 +400,28 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
402 return; 400 return;
403 401
404kill: 402kill:
405 printk("Failed to flush page for address 0x%lx\n", address); 403 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
406 force_sig(SIGKILL, current); 404 force_sig(SIGKILL, current);
407} 405}
408 406
409pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) 407pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
410{ 408{
411 return(pgd_offset(mm, address)); 409 return pgd_offset(mm, address);
412} 410}
413 411
414pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) 412pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
415{ 413{
416 return(pud_offset(pgd, address)); 414 return pud_offset(pgd, address);
417} 415}
418 416
419pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) 417pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
420{ 418{
421 return(pmd_offset(pud, address)); 419 return pmd_offset(pud, address);
422} 420}
423 421
424pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) 422pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
425{ 423{
426 return(pte_offset_kernel(pmd, address)); 424 return pte_offset_kernel(pmd, address);
427} 425}
428 426
429pte_t *addr_pte(struct task_struct *task, unsigned long addr) 427pte_t *addr_pte(struct task_struct *task, unsigned long addr)
@@ -432,7 +430,7 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
432 pud_t *pud = pud_offset(pgd, addr); 430 pud_t *pud = pud_offset(pgd, addr);
433 pmd_t *pmd = pmd_offset(pud, addr); 431 pmd_t *pmd = pmd_offset(pud, addr);
434 432
435 return(pte_offset_map(pmd, addr)); 433 return pte_offset_map(pmd, addr);
436} 434}
437 435
438void flush_tlb_all(void) 436void flush_tlb_all(void)
@@ -452,18 +450,18 @@ void flush_tlb_kernel_vm(void)
452 450
453void __flush_tlb_one(unsigned long addr) 451void __flush_tlb_one(unsigned long addr)
454{ 452{
455 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); 453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
456} 454}
457 455
458static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, 456static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
459 int finished, void **flush) 457 int finished, void **flush)
460{ 458{
461 struct host_vm_op *op; 459 struct host_vm_op *op;
462 int i, ret = 0; 460 int i, ret = 0;
463 461
464 for(i = 0; i <= last && !ret; i++){ 462 for (i = 0; i <= last && !ret; i++) {
465 op = &ops[i]; 463 op = &ops[i];
466 switch(op->type){ 464 switch(op->type) {
467 case MMAP: 465 case MMAP:
468 ret = map(&mmu->skas.id, op->u.mmap.addr, 466 ret = map(&mmu->skas.id, op->u.mmap.addr,
469 op->u.mmap.len, op->u.mmap.prot, 467 op->u.mmap.len, op->u.mmap.prot,
@@ -480,7 +478,8 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
480 finished, flush); 478 finished, flush);
481 break; 479 break;
482 default: 480 default:
483 printk("Unknown op type %d in do_ops\n", op->type); 481 printk(KERN_ERR "Unknown op type %d in do_ops\n",
482 op->type);
484 break; 483 break;
485 } 484 }
486 } 485 }
@@ -491,32 +490,33 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
491static void fix_range(struct mm_struct *mm, unsigned long start_addr, 490static void fix_range(struct mm_struct *mm, unsigned long start_addr,
492 unsigned long end_addr, int force) 491 unsigned long end_addr, int force)
493{ 492{
494 if(!proc_mm && (end_addr > CONFIG_STUB_START)) 493 if (!proc_mm && (end_addr > CONFIG_STUB_START))
495 end_addr = CONFIG_STUB_START; 494 end_addr = CONFIG_STUB_START;
496 495
497 fix_range_common(mm, start_addr, end_addr, force, do_ops); 496 fix_range_common(mm, start_addr, end_addr, force, do_ops);
498} 497}
499 498
500void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 499void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
501 unsigned long end) 500 unsigned long end)
502{ 501{
503 if(vma->vm_mm == NULL) 502 if (vma->vm_mm == NULL)
504 flush_tlb_kernel_range_common(start, end); 503 flush_tlb_kernel_range_common(start, end);
505 else fix_range(vma->vm_mm, start, end, 0); 504 else fix_range(vma->vm_mm, start, end, 0);
506} 505}
507 506
508void flush_tlb_mm(struct mm_struct *mm) 507void flush_tlb_mm(struct mm_struct *mm)
509{ 508{
510 unsigned long end; 509 unsigned long end;
511 510
512 /* Don't bother flushing if this address space is about to be 511 /*
513 * destroyed. 512 * Don't bother flushing if this address space is about to be
514 */ 513 * destroyed.
515 if(atomic_read(&mm->mm_users) == 0) 514 */
516 return; 515 if (atomic_read(&mm->mm_users) == 0)
516 return;
517 517
518 end = proc_mm ? task_size : CONFIG_STUB_START; 518 end = proc_mm ? task_size : CONFIG_STUB_START;
519 fix_range(mm, 0, end, 0); 519 fix_range(mm, 0, end, 0);
520} 520}
521 521
522void force_flush_all(void) 522void force_flush_all(void)
@@ -524,7 +524,7 @@ void force_flush_all(void)
524 struct mm_struct *mm = current->mm; 524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap; 525 struct vm_area_struct *vma = mm->mmap;
526 526
527 while(vma != NULL) { 527 while (vma != NULL) {
528 fix_range(mm, vma->vm_start, vma->vm_end, 1); 528 fix_range(mm, vma->vm_start, vma->vm_end, 1);
529 vma = vma->vm_next; 529 vma = vma->vm_next;
530 } 530 }