diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:34:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:07 -0500 |
commit | d7c8f21a8cad0228c7c5ce2bb6dbd95d1ee49d13 (patch) | |
tree | d1e305bec62022a0bec82a3499a372c2c7c40583 /arch/x86/mm/pageattr.c | |
parent | d1028a154c65d7fadd1b2d0276c077014d401ec7 (diff) |
x86: cpa: move flush to cpa
The set_memory_* and set_pages_* family of API's currently requires the
callers to do a global tlb flush after the function call; forgetting this is
a very nasty deathtrap. This patch moves the global tlb flush into
each of the callers
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 137 |
1 files changed, 71 insertions, 66 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e4d2b6930e61..a2d747c06147 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -23,6 +23,36 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
23 | } | 23 | } |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Flushing functions | ||
27 | */ | ||
28 | void clflush_cache_range(void *addr, int size) | ||
29 | { | ||
30 | int i; | ||
31 | |||
32 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | ||
33 | clflush(addr+i); | ||
34 | } | ||
35 | |||
36 | static void flush_kernel_map(void *arg) | ||
37 | { | ||
38 | /* | ||
39 | * Flush all to work around Errata in early athlons regarding | ||
40 | * large page flushing. | ||
41 | */ | ||
42 | __flush_tlb_all(); | ||
43 | |||
44 | if (boot_cpu_data.x86_model >= 4) | ||
45 | wbinvd(); | ||
46 | } | ||
47 | |||
48 | static void global_flush_tlb(void) | ||
49 | { | ||
50 | BUG_ON(irqs_disabled()); | ||
51 | |||
52 | on_each_cpu(flush_kernel_map, NULL, 1, 1); | ||
53 | } | ||
54 | |||
55 | /* | ||
26 | * Certain areas of memory on x86 require very specific protection flags, | 56 | * Certain areas of memory on x86 require very specific protection flags, |
27 | * for example the BIOS area or kernel text. Callers don't always get this | 57 | * for example the BIOS area or kernel text. Callers don't always get this |
28 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | 58 | * right (again, ioremap() on BIOS memory is not uncommon) so this function |
@@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages, | |||
328 | 358 | ||
329 | int set_memory_uc(unsigned long addr, int numpages) | 359 | int set_memory_uc(unsigned long addr, int numpages) |
330 | { | 360 | { |
331 | pgprot_t uncached; | 361 | int err; |
332 | 362 | ||
333 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 363 | err = change_page_attr_set(addr, numpages, |
334 | return change_page_attr_set(addr, numpages, uncached); | 364 | __pgprot(_PAGE_PCD | _PAGE_PWT)); |
365 | global_flush_tlb(); | ||
366 | return err; | ||
335 | } | 367 | } |
336 | EXPORT_SYMBOL(set_memory_uc); | 368 | EXPORT_SYMBOL(set_memory_uc); |
337 | 369 | ||
338 | int set_memory_wb(unsigned long addr, int numpages) | 370 | int set_memory_wb(unsigned long addr, int numpages) |
339 | { | 371 | { |
340 | pgprot_t uncached; | 372 | int err; |
341 | 373 | ||
342 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 374 | err = change_page_attr_clear(addr, numpages, |
343 | return change_page_attr_clear(addr, numpages, uncached); | 375 | __pgprot(_PAGE_PCD | _PAGE_PWT)); |
376 | global_flush_tlb(); | ||
377 | return err; | ||
344 | } | 378 | } |
345 | EXPORT_SYMBOL(set_memory_wb); | 379 | EXPORT_SYMBOL(set_memory_wb); |
346 | 380 | ||
347 | int set_memory_x(unsigned long addr, int numpages) | 381 | int set_memory_x(unsigned long addr, int numpages) |
348 | { | 382 | { |
349 | pgprot_t nx; | 383 | int err; |
350 | 384 | ||
351 | pgprot_val(nx) = _PAGE_NX; | 385 | err = change_page_attr_clear(addr, numpages, |
352 | return change_page_attr_clear(addr, numpages, nx); | 386 | __pgprot(_PAGE_NX)); |
387 | global_flush_tlb(); | ||
388 | return err; | ||
353 | } | 389 | } |
354 | EXPORT_SYMBOL(set_memory_x); | 390 | EXPORT_SYMBOL(set_memory_x); |
355 | 391 | ||
356 | int set_memory_nx(unsigned long addr, int numpages) | 392 | int set_memory_nx(unsigned long addr, int numpages) |
357 | { | 393 | { |
358 | pgprot_t nx; | 394 | int err; |
359 | 395 | ||
360 | pgprot_val(nx) = _PAGE_NX; | 396 | err = change_page_attr_set(addr, numpages, |
361 | return change_page_attr_set(addr, numpages, nx); | 397 | __pgprot(_PAGE_NX)); |
398 | global_flush_tlb(); | ||
399 | return err; | ||
362 | } | 400 | } |
363 | EXPORT_SYMBOL(set_memory_nx); | 401 | EXPORT_SYMBOL(set_memory_nx); |
364 | 402 | ||
365 | int set_memory_ro(unsigned long addr, int numpages) | 403 | int set_memory_ro(unsigned long addr, int numpages) |
366 | { | 404 | { |
367 | pgprot_t rw; | 405 | int err; |
368 | 406 | ||
369 | pgprot_val(rw) = _PAGE_RW; | 407 | err = change_page_attr_clear(addr, numpages, |
370 | return change_page_attr_clear(addr, numpages, rw); | 408 | __pgprot(_PAGE_RW)); |
409 | global_flush_tlb(); | ||
410 | return err; | ||
371 | } | 411 | } |
372 | 412 | ||
373 | int set_memory_rw(unsigned long addr, int numpages) | 413 | int set_memory_rw(unsigned long addr, int numpages) |
374 | { | 414 | { |
375 | pgprot_t rw; | 415 | int err; |
376 | 416 | ||
377 | pgprot_val(rw) = _PAGE_RW; | 417 | err = change_page_attr_set(addr, numpages, |
378 | return change_page_attr_set(addr, numpages, rw); | 418 | __pgprot(_PAGE_RW)); |
419 | global_flush_tlb(); | ||
420 | return err; | ||
379 | } | 421 | } |
380 | 422 | ||
381 | int set_memory_np(unsigned long addr, int numpages) | 423 | int set_memory_np(unsigned long addr, int numpages) |
382 | { | 424 | { |
383 | pgprot_t present; | 425 | int err; |
384 | 426 | ||
385 | pgprot_val(present) = _PAGE_PRESENT; | 427 | err = change_page_attr_clear(addr, numpages, |
386 | return change_page_attr_clear(addr, numpages, present); | 428 | __pgprot(_PAGE_PRESENT)); |
429 | global_flush_tlb(); | ||
430 | return err; | ||
387 | } | 431 | } |
388 | 432 | ||
389 | int set_pages_uc(struct page *page, int numpages) | 433 | int set_pages_uc(struct page *page, int numpages) |
390 | { | 434 | { |
391 | unsigned long addr = (unsigned long)page_address(page); | 435 | unsigned long addr = (unsigned long)page_address(page); |
392 | pgprot_t uncached; | ||
393 | 436 | ||
394 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 437 | return set_memory_uc(addr, numpages); |
395 | return change_page_attr_set(addr, numpages, uncached); | ||
396 | } | 438 | } |
397 | EXPORT_SYMBOL(set_pages_uc); | 439 | EXPORT_SYMBOL(set_pages_uc); |
398 | 440 | ||
399 | int set_pages_wb(struct page *page, int numpages) | 441 | int set_pages_wb(struct page *page, int numpages) |
400 | { | 442 | { |
401 | unsigned long addr = (unsigned long)page_address(page); | 443 | unsigned long addr = (unsigned long)page_address(page); |
402 | pgprot_t uncached; | ||
403 | 444 | ||
404 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 445 | return set_memory_wb(addr, numpages); |
405 | return change_page_attr_clear(addr, numpages, uncached); | ||
406 | } | 446 | } |
407 | EXPORT_SYMBOL(set_pages_wb); | 447 | EXPORT_SYMBOL(set_pages_wb); |
408 | 448 | ||
409 | int set_pages_x(struct page *page, int numpages) | 449 | int set_pages_x(struct page *page, int numpages) |
410 | { | 450 | { |
411 | unsigned long addr = (unsigned long)page_address(page); | 451 | unsigned long addr = (unsigned long)page_address(page); |
412 | pgprot_t nx; | ||
413 | 452 | ||
414 | pgprot_val(nx) = _PAGE_NX; | 453 | return set_memory_x(addr, numpages); |
415 | return change_page_attr_clear(addr, numpages, nx); | ||
416 | } | 454 | } |
417 | EXPORT_SYMBOL(set_pages_x); | 455 | EXPORT_SYMBOL(set_pages_x); |
418 | 456 | ||
419 | int set_pages_nx(struct page *page, int numpages) | 457 | int set_pages_nx(struct page *page, int numpages) |
420 | { | 458 | { |
421 | unsigned long addr = (unsigned long)page_address(page); | 459 | unsigned long addr = (unsigned long)page_address(page); |
422 | pgprot_t nx; | ||
423 | 460 | ||
424 | pgprot_val(nx) = _PAGE_NX; | 461 | return set_memory_nx(addr, numpages); |
425 | return change_page_attr_set(addr, numpages, nx); | ||
426 | } | 462 | } |
427 | EXPORT_SYMBOL(set_pages_nx); | 463 | EXPORT_SYMBOL(set_pages_nx); |
428 | 464 | ||
429 | int set_pages_ro(struct page *page, int numpages) | 465 | int set_pages_ro(struct page *page, int numpages) |
430 | { | 466 | { |
431 | unsigned long addr = (unsigned long)page_address(page); | 467 | unsigned long addr = (unsigned long)page_address(page); |
432 | pgprot_t rw; | ||
433 | 468 | ||
434 | pgprot_val(rw) = _PAGE_RW; | 469 | return set_memory_ro(addr, numpages); |
435 | return change_page_attr_clear(addr, numpages, rw); | ||
436 | } | 470 | } |
437 | 471 | ||
438 | int set_pages_rw(struct page *page, int numpages) | 472 | int set_pages_rw(struct page *page, int numpages) |
439 | { | 473 | { |
440 | unsigned long addr = (unsigned long)page_address(page); | 474 | unsigned long addr = (unsigned long)page_address(page); |
441 | pgprot_t rw; | ||
442 | |||
443 | pgprot_val(rw) = _PAGE_RW; | ||
444 | return change_page_attr_set(addr, numpages, rw); | ||
445 | } | ||
446 | |||
447 | void clflush_cache_range(void *addr, int size) | ||
448 | { | ||
449 | int i; | ||
450 | |||
451 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | ||
452 | clflush(addr+i); | ||
453 | } | ||
454 | 475 | ||
455 | static void flush_kernel_map(void *arg) | 476 | return set_memory_rw(addr, numpages); |
456 | { | ||
457 | /* | ||
458 | * Flush all to work around Errata in early athlons regarding | ||
459 | * large page flushing. | ||
460 | */ | ||
461 | __flush_tlb_all(); | ||
462 | |||
463 | if (boot_cpu_data.x86_model >= 4) | ||
464 | wbinvd(); | ||
465 | } | 477 | } |
466 | 478 | ||
467 | void global_flush_tlb(void) | ||
468 | { | ||
469 | BUG_ON(irqs_disabled()); | ||
470 | |||
471 | on_each_cpu(flush_kernel_map, NULL, 1, 1); | ||
472 | } | ||
473 | EXPORT_SYMBOL(global_flush_tlb); | ||
474 | 479 | ||
475 | #ifdef CONFIG_DEBUG_PAGEALLOC | 480 | #ifdef CONFIG_DEBUG_PAGEALLOC |
476 | 481 | ||