diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:34:07 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:07 -0500 |
commit | d7c8f21a8cad0228c7c5ce2bb6dbd95d1ee49d13 (patch) | |
tree | d1e305bec62022a0bec82a3499a372c2c7c40583 /arch | |
parent | d1028a154c65d7fadd1b2d0276c077014d401ec7 (diff) |
x86: cpa: move flush to cpa
The set_memory_* and set_pages_* family of API's currently requires the
callers to do a global tlb flush after the function call; forgetting this is
a very nasty deathtrap. This patch moves the global tlb flush into
each of the callers
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 137 |
5 files changed, 71 insertions, 93 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 8860c6eba8ab..4d5cc7181982 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -572,7 +572,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
572 | panic("Cannot allocate GATT table"); | 572 | panic("Cannot allocate GATT table"); |
573 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) | 573 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
574 | panic("Could not set GART PTEs to uncacheable pages"); | 574 | panic("Could not set GART PTEs to uncacheable pages"); |
575 | global_flush_tlb(); | ||
576 | 575 | ||
577 | memset(gatt, 0, gatt_size); | 576 | memset(gatt, 0, gatt_size); |
578 | agp_gatt_table = gatt; | 577 | agp_gatt_table = gatt; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f7b941c3b2c3..0d3369b900e9 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -752,15 +752,11 @@ void mark_rodata_ro(void) | |||
752 | printk("Write protecting the kernel text: %luk\n", size >> 10); | 752 | printk("Write protecting the kernel text: %luk\n", size >> 10); |
753 | 753 | ||
754 | #ifdef CONFIG_CPA_DEBUG | 754 | #ifdef CONFIG_CPA_DEBUG |
755 | global_flush_tlb(); | ||
756 | |||
757 | printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); | 755 | printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); |
758 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | 756 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); |
759 | global_flush_tlb(); | ||
760 | 757 | ||
761 | printk("Testing CPA: write protecting again\n"); | 758 | printk("Testing CPA: write protecting again\n"); |
762 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | 759 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); |
763 | global_flush_tlb(); | ||
764 | #endif | 760 | #endif |
765 | } | 761 | } |
766 | #endif | 762 | #endif |
@@ -770,22 +766,12 @@ void mark_rodata_ro(void) | |||
770 | printk("Write protecting the kernel read-only data: %luk\n", | 766 | printk("Write protecting the kernel read-only data: %luk\n", |
771 | size >> 10); | 767 | size >> 10); |
772 | 768 | ||
773 | /* | ||
774 | * set_pages_*() requires a global_flush_tlb() call after it. | ||
775 | * We do this after the printk so that if something went wrong in the | ||
776 | * change, the printk gets out at least to give a better debug hint | ||
777 | * of who is the culprit. | ||
778 | */ | ||
779 | global_flush_tlb(); | ||
780 | |||
781 | #ifdef CONFIG_CPA_DEBUG | 769 | #ifdef CONFIG_CPA_DEBUG |
782 | printk("Testing CPA: undo %lx-%lx\n", start, start + size); | 770 | printk("Testing CPA: undo %lx-%lx\n", start, start + size); |
783 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); | 771 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
784 | global_flush_tlb(); | ||
785 | 772 | ||
786 | printk("Testing CPA: write protecting again\n"); | 773 | printk("Testing CPA: write protecting again\n"); |
787 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | 774 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
788 | global_flush_tlb(); | ||
789 | #endif | 775 | #endif |
790 | } | 776 | } |
791 | #endif | 777 | #endif |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4757be7b5e55..9b69fa54a831 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -610,22 +610,12 @@ void mark_rodata_ro(void) | |||
610 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", | 610 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
611 | (end - start) >> 10); | 611 | (end - start) >> 10); |
612 | 612 | ||
613 | /* | ||
614 | * set_memory_*() requires a global_flush_tlb() call after it. | ||
615 | * We do this after the printk so that if something went wrong in the | ||
616 | * change, the printk gets out at least to give a better debug hint | ||
617 | * of who is the culprit. | ||
618 | */ | ||
619 | global_flush_tlb(); | ||
620 | |||
621 | #ifdef CONFIG_CPA_DEBUG | 613 | #ifdef CONFIG_CPA_DEBUG |
622 | printk("Testing CPA: undo %lx-%lx\n", start, end); | 614 | printk("Testing CPA: undo %lx-%lx\n", start, end); |
623 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); | 615 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
624 | global_flush_tlb(); | ||
625 | 616 | ||
626 | printk("Testing CPA: again\n"); | 617 | printk("Testing CPA: again\n"); |
627 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); | 618 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
628 | global_flush_tlb(); | ||
629 | #endif | 619 | #endif |
630 | } | 620 | } |
631 | #endif | 621 | #endif |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index b86f66fa5185..6a9a1418bc98 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -96,8 +96,6 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size, | |||
96 | err = set_memory_wb(vaddr, nrpages); | 96 | err = set_memory_wb(vaddr, nrpages); |
97 | break; | 97 | break; |
98 | } | 98 | } |
99 | if (!err) | ||
100 | global_flush_tlb(); | ||
101 | 99 | ||
102 | return err; | 100 | return err; |
103 | } | 101 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e4d2b6930e61..a2d747c06147 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -23,6 +23,36 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
23 | } | 23 | } |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Flushing functions | ||
27 | */ | ||
28 | void clflush_cache_range(void *addr, int size) | ||
29 | { | ||
30 | int i; | ||
31 | |||
32 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | ||
33 | clflush(addr+i); | ||
34 | } | ||
35 | |||
36 | static void flush_kernel_map(void *arg) | ||
37 | { | ||
38 | /* | ||
39 | * Flush all to work around Errata in early athlons regarding | ||
40 | * large page flushing. | ||
41 | */ | ||
42 | __flush_tlb_all(); | ||
43 | |||
44 | if (boot_cpu_data.x86_model >= 4) | ||
45 | wbinvd(); | ||
46 | } | ||
47 | |||
48 | static void global_flush_tlb(void) | ||
49 | { | ||
50 | BUG_ON(irqs_disabled()); | ||
51 | |||
52 | on_each_cpu(flush_kernel_map, NULL, 1, 1); | ||
53 | } | ||
54 | |||
55 | /* | ||
26 | * Certain areas of memory on x86 require very specific protection flags, | 56 | * Certain areas of memory on x86 require very specific protection flags, |
27 | * for example the BIOS area or kernel text. Callers don't always get this | 57 | * for example the BIOS area or kernel text. Callers don't always get this |
28 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | 58 | * right (again, ioremap() on BIOS memory is not uncommon) so this function |
@@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages, | |||
328 | 358 | ||
329 | int set_memory_uc(unsigned long addr, int numpages) | 359 | int set_memory_uc(unsigned long addr, int numpages) |
330 | { | 360 | { |
331 | pgprot_t uncached; | 361 | int err; |
332 | 362 | ||
333 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 363 | err = change_page_attr_set(addr, numpages, |
334 | return change_page_attr_set(addr, numpages, uncached); | 364 | __pgprot(_PAGE_PCD | _PAGE_PWT)); |
365 | global_flush_tlb(); | ||
366 | return err; | ||
335 | } | 367 | } |
336 | EXPORT_SYMBOL(set_memory_uc); | 368 | EXPORT_SYMBOL(set_memory_uc); |
337 | 369 | ||
338 | int set_memory_wb(unsigned long addr, int numpages) | 370 | int set_memory_wb(unsigned long addr, int numpages) |
339 | { | 371 | { |
340 | pgprot_t uncached; | 372 | int err; |
341 | 373 | ||
342 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 374 | err = change_page_attr_clear(addr, numpages, |
343 | return change_page_attr_clear(addr, numpages, uncached); | 375 | __pgprot(_PAGE_PCD | _PAGE_PWT)); |
376 | global_flush_tlb(); | ||
377 | return err; | ||
344 | } | 378 | } |
345 | EXPORT_SYMBOL(set_memory_wb); | 379 | EXPORT_SYMBOL(set_memory_wb); |
346 | 380 | ||
347 | int set_memory_x(unsigned long addr, int numpages) | 381 | int set_memory_x(unsigned long addr, int numpages) |
348 | { | 382 | { |
349 | pgprot_t nx; | 383 | int err; |
350 | 384 | ||
351 | pgprot_val(nx) = _PAGE_NX; | 385 | err = change_page_attr_clear(addr, numpages, |
352 | return change_page_attr_clear(addr, numpages, nx); | 386 | __pgprot(_PAGE_NX)); |
387 | global_flush_tlb(); | ||
388 | return err; | ||
353 | } | 389 | } |
354 | EXPORT_SYMBOL(set_memory_x); | 390 | EXPORT_SYMBOL(set_memory_x); |
355 | 391 | ||
356 | int set_memory_nx(unsigned long addr, int numpages) | 392 | int set_memory_nx(unsigned long addr, int numpages) |
357 | { | 393 | { |
358 | pgprot_t nx; | 394 | int err; |
359 | 395 | ||
360 | pgprot_val(nx) = _PAGE_NX; | 396 | err = change_page_attr_set(addr, numpages, |
361 | return change_page_attr_set(addr, numpages, nx); | 397 | __pgprot(_PAGE_NX)); |
398 | global_flush_tlb(); | ||
399 | return err; | ||
362 | } | 400 | } |
363 | EXPORT_SYMBOL(set_memory_nx); | 401 | EXPORT_SYMBOL(set_memory_nx); |
364 | 402 | ||
365 | int set_memory_ro(unsigned long addr, int numpages) | 403 | int set_memory_ro(unsigned long addr, int numpages) |
366 | { | 404 | { |
367 | pgprot_t rw; | 405 | int err; |
368 | 406 | ||
369 | pgprot_val(rw) = _PAGE_RW; | 407 | err = change_page_attr_clear(addr, numpages, |
370 | return change_page_attr_clear(addr, numpages, rw); | 408 | __pgprot(_PAGE_RW)); |
409 | global_flush_tlb(); | ||
410 | return err; | ||
371 | } | 411 | } |
372 | 412 | ||
373 | int set_memory_rw(unsigned long addr, int numpages) | 413 | int set_memory_rw(unsigned long addr, int numpages) |
374 | { | 414 | { |
375 | pgprot_t rw; | 415 | int err; |
376 | 416 | ||
377 | pgprot_val(rw) = _PAGE_RW; | 417 | err = change_page_attr_set(addr, numpages, |
378 | return change_page_attr_set(addr, numpages, rw); | 418 | __pgprot(_PAGE_RW)); |
419 | global_flush_tlb(); | ||
420 | return err; | ||
379 | } | 421 | } |
380 | 422 | ||
381 | int set_memory_np(unsigned long addr, int numpages) | 423 | int set_memory_np(unsigned long addr, int numpages) |
382 | { | 424 | { |
383 | pgprot_t present; | 425 | int err; |
384 | 426 | ||
385 | pgprot_val(present) = _PAGE_PRESENT; | 427 | err = change_page_attr_clear(addr, numpages, |
386 | return change_page_attr_clear(addr, numpages, present); | 428 | __pgprot(_PAGE_PRESENT)); |
429 | global_flush_tlb(); | ||
430 | return err; | ||
387 | } | 431 | } |
388 | 432 | ||
389 | int set_pages_uc(struct page *page, int numpages) | 433 | int set_pages_uc(struct page *page, int numpages) |
390 | { | 434 | { |
391 | unsigned long addr = (unsigned long)page_address(page); | 435 | unsigned long addr = (unsigned long)page_address(page); |
392 | pgprot_t uncached; | ||
393 | 436 | ||
394 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 437 | return set_memory_uc(addr, numpages); |
395 | return change_page_attr_set(addr, numpages, uncached); | ||
396 | } | 438 | } |
397 | EXPORT_SYMBOL(set_pages_uc); | 439 | EXPORT_SYMBOL(set_pages_uc); |
398 | 440 | ||
399 | int set_pages_wb(struct page *page, int numpages) | 441 | int set_pages_wb(struct page *page, int numpages) |
400 | { | 442 | { |
401 | unsigned long addr = (unsigned long)page_address(page); | 443 | unsigned long addr = (unsigned long)page_address(page); |
402 | pgprot_t uncached; | ||
403 | 444 | ||
404 | pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; | 445 | return set_memory_wb(addr, numpages); |
405 | return change_page_attr_clear(addr, numpages, uncached); | ||
406 | } | 446 | } |
407 | EXPORT_SYMBOL(set_pages_wb); | 447 | EXPORT_SYMBOL(set_pages_wb); |
408 | 448 | ||
409 | int set_pages_x(struct page *page, int numpages) | 449 | int set_pages_x(struct page *page, int numpages) |
410 | { | 450 | { |
411 | unsigned long addr = (unsigned long)page_address(page); | 451 | unsigned long addr = (unsigned long)page_address(page); |
412 | pgprot_t nx; | ||
413 | 452 | ||
414 | pgprot_val(nx) = _PAGE_NX; | 453 | return set_memory_x(addr, numpages); |
415 | return change_page_attr_clear(addr, numpages, nx); | ||
416 | } | 454 | } |
417 | EXPORT_SYMBOL(set_pages_x); | 455 | EXPORT_SYMBOL(set_pages_x); |
418 | 456 | ||
419 | int set_pages_nx(struct page *page, int numpages) | 457 | int set_pages_nx(struct page *page, int numpages) |
420 | { | 458 | { |
421 | unsigned long addr = (unsigned long)page_address(page); | 459 | unsigned long addr = (unsigned long)page_address(page); |
422 | pgprot_t nx; | ||
423 | 460 | ||
424 | pgprot_val(nx) = _PAGE_NX; | 461 | return set_memory_nx(addr, numpages); |
425 | return change_page_attr_set(addr, numpages, nx); | ||
426 | } | 462 | } |
427 | EXPORT_SYMBOL(set_pages_nx); | 463 | EXPORT_SYMBOL(set_pages_nx); |
428 | 464 | ||
429 | int set_pages_ro(struct page *page, int numpages) | 465 | int set_pages_ro(struct page *page, int numpages) |
430 | { | 466 | { |
431 | unsigned long addr = (unsigned long)page_address(page); | 467 | unsigned long addr = (unsigned long)page_address(page); |
432 | pgprot_t rw; | ||
433 | 468 | ||
434 | pgprot_val(rw) = _PAGE_RW; | 469 | return set_memory_ro(addr, numpages); |
435 | return change_page_attr_clear(addr, numpages, rw); | ||
436 | } | 470 | } |
437 | 471 | ||
438 | int set_pages_rw(struct page *page, int numpages) | 472 | int set_pages_rw(struct page *page, int numpages) |
439 | { | 473 | { |
440 | unsigned long addr = (unsigned long)page_address(page); | 474 | unsigned long addr = (unsigned long)page_address(page); |
441 | pgprot_t rw; | ||
442 | |||
443 | pgprot_val(rw) = _PAGE_RW; | ||
444 | return change_page_attr_set(addr, numpages, rw); | ||
445 | } | ||
446 | |||
447 | void clflush_cache_range(void *addr, int size) | ||
448 | { | ||
449 | int i; | ||
450 | |||
451 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | ||
452 | clflush(addr+i); | ||
453 | } | ||
454 | 475 | ||
455 | static void flush_kernel_map(void *arg) | 476 | return set_memory_rw(addr, numpages); |
456 | { | ||
457 | /* | ||
458 | * Flush all to work around Errata in early athlons regarding | ||
459 | * large page flushing. | ||
460 | */ | ||
461 | __flush_tlb_all(); | ||
462 | |||
463 | if (boot_cpu_data.x86_model >= 4) | ||
464 | wbinvd(); | ||
465 | } | 477 | } |
466 | 478 | ||
467 | void global_flush_tlb(void) | ||
468 | { | ||
469 | BUG_ON(irqs_disabled()); | ||
470 | |||
471 | on_each_cpu(flush_kernel_map, NULL, 1, 1); | ||
472 | } | ||
473 | EXPORT_SYMBOL(global_flush_tlb); | ||
474 | 479 | ||
475 | #ifdef CONFIG_DEBUG_PAGEALLOC | 480 | #ifdef CONFIG_DEBUG_PAGEALLOC |
476 | 481 | ||