aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-06-16 13:16:12 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-16 16:16:16 -0400
commit8dab5241d06bfc9ee141ea78c56cde5070d7460d (patch)
treedd9dc3c64c17862b169f4cbe5fd4a108d960c920 /include
parent679ce0ace6b1a07043bc3b405a34ddccad808886 (diff)
Rework ptep_set_access_flags and fix sun4c
Some changes done a while ago to avoid pounding on ptep_set_access_flags and update_mmu_cache in some race situations break sun4c which requires update_mmu_cache() to always be called on minor faults. This patch reworks ptep_set_access_flags() semantics, implementations and callers so that it's now responsible for returning whether an update is necessary or not (basically whether the PTE actually changed). This allow fixing the sparc implementation to always return 1 on sun4c. [akpm@linux-foundation.org: fixes, cleanups] Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Hugh Dickins <hugh@veritas.com> Cc: David Miller <davem@davemloft.net> Cc: Mark Fortescue <mark@mtfhpc.demon.co.uk> Acked-by: William Lee Irwin III <wli@holomorphy.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--include/asm-i386/pgtable.h8
-rw-r--r--include/asm-ia64/pgtable.h25
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h12
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h12
-rw-r--r--include/asm-ppc/pgtable.h12
-rw-r--r--include/asm-s390/pgtable.h7
-rw-r--r--include/asm-sparc/pgtable.h11
-rw-r--r--include/asm-x86_64/pgtable.h14
9 files changed, 82 insertions, 36 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index dc8f99ee305f..7d7bcf990e99 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -27,13 +27,20 @@ do { \
27 * Largely same as above, but only sets the access flags (dirty, 27 * Largely same as above, but only sets the access flags (dirty,
28 * accessed, and writable). Furthermore, we know it always gets set 28 * accessed, and writable). Furthermore, we know it always gets set
29 * to a "more permissive" setting, which allows most architectures 29 * to a "more permissive" setting, which allows most architectures
30 * to optimize this. 30 * to optimize this. We return whether the PTE actually changed, which
31 * in turn instructs the caller to do things like update__mmu_cache.
32 * This used to be done in the caller, but sparc needs minor faults to
33 * force that call on sun4c so we changed this macro slightly
31 */ 34 */
32#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 35#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
33do { \ 36({ \
34 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 37 int __changed = !pte_same(*(__ptep), __entry); \
35 flush_tlb_page(__vma, __address); \ 38 if (__changed) { \
36} while (0) 39 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
40 flush_tlb_page(__vma, __address); \
41 } \
42 __changed; \
43})
37#endif 44#endif
38 45
39#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 46#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index d62bdb029efa..628fa7747d0c 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -285,13 +285,15 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
285 */ 285 */
286#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 286#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
287#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ 287#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
288do { \ 288({ \
289 if (dirty) { \ 289 int __changed = !pte_same(*(ptep), entry); \
290 if (__changed && dirty) { \
290 (ptep)->pte_low = (entry).pte_low; \ 291 (ptep)->pte_low = (entry).pte_low; \
291 pte_update_defer((vma)->vm_mm, (address), (ptep)); \ 292 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
292 flush_tlb_page(vma, address); \ 293 flush_tlb_page(vma, address); \
293 } \ 294 } \
294} while (0) 295 __changed; \
296})
295 297
296#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 298#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
297#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \ 299#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 670b706411b8..6580f31b3135 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -533,16 +533,23 @@ extern void lazy_mmu_prot_update (pte_t pte);
533 * daccess_bit in ivt.S). 533 * daccess_bit in ivt.S).
534 */ 534 */
535#ifdef CONFIG_SMP 535#ifdef CONFIG_SMP
536# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 536# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
537do { \ 537({ \
538 if (__safely_writable) { \ 538 int __changed = !pte_same(*(__ptep), __entry); \
539 set_pte(__ptep, __entry); \ 539 if (__changed && __safely_writable) { \
540 flush_tlb_page(__vma, __addr); \ 540 set_pte(__ptep, __entry); \
541 } \ 541 flush_tlb_page(__vma, __addr); \
542} while (0) 542 } \
543 __changed; \
544})
543#else 545#else
544# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 546# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
545 ptep_establish(__vma, __addr, __ptep, __entry) 547({ \
548 int __changed = !pte_same(*(__ptep), __entry); \
549 if (__changed) \
550 ptep_establish(__vma, __addr, __ptep, __entry); \
551 __changed; \
552})
546#endif 553#endif
547 554
548# ifdef CONFIG_VIRTUAL_MEM_MAP 555# ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index c863bdb2889c..7fb730c62f83 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -673,10 +673,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
673} 673}
674 674
675#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 675#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
676 do { \ 676({ \
677 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 677 int __changed = !pte_same(*(__ptep), __entry); \
678 flush_tlb_page_nohash(__vma, __address); \ 678 if (__changed) { \
679 } while(0) 679 __ptep_set_access_flags(__ptep, __entry, __dirty); \
680 flush_tlb_page_nohash(__vma, __address); \
681 } \
682 __changed; \
683})
680 684
681/* 685/*
682 * Macro to mark a page protection value as "uncacheable". 686 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 704c4e669fe0..3cfd98f44bfe 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -413,10 +413,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
413 :"cc"); 413 :"cc");
414} 414}
415#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 415#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
416 do { \ 416({ \
417 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 417 int __changed = !pte_same(*(__ptep), __entry); \
418 flush_tlb_page_nohash(__vma, __address); \ 418 if (__changed) { \
419 } while(0) 419 __ptep_set_access_flags(__ptep, __entry, __dirty); \
420 flush_tlb_page_nohash(__vma, __address); \
421 } \
422 __changed; \
423})
420 424
421/* 425/*
422 * Macro to mark a page protection value as "uncacheable". 426 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index bed452d4a5f0..9d0ce9ff5840 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -694,10 +694,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
694} 694}
695 695
696#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 696#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
697 do { \ 697({ \
698 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 698 int __changed = !pte_same(*(__ptep), __entry); \
699 flush_tlb_page_nohash(__vma, __address); \ 699 if (__changed) { \
700 } while(0) 700 __ptep_set_access_flags(__ptep, __entry, __dirty); \
701 flush_tlb_page_nohash(__vma, __address); \
702 } \
703 __changed; \
704})
701 705
702/* 706/*
703 * Macro to mark a page protection value as "uncacheable". 707 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 8fe8d42e64c3..0a307bb2f353 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -744,7 +744,12 @@ ptep_establish(struct vm_area_struct *vma,
744} 744}
745 745
746#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 746#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
747 ptep_establish(__vma, __address, __ptep, __entry) 747({ \
748 int __changed = !pte_same(*(__ptep), __entry); \
749 if (__changed) \
750 ptep_establish(__vma, __address, __ptep, __entry); \
751 __changed; \
752})
748 753
749/* 754/*
750 * Test and clear dirty bit in storage key. 755 * Test and clear dirty bit in storage key.
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 4f0a5ba0d6a0..59229aeba27b 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -446,6 +446,17 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
446#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 446#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
447#define GET_PFN(pfn) (pfn & 0x0fffffffUL) 447#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
448 448
449#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
450#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
451({ \
452 int __changed = !pte_same(*(__ptep), __entry); \
453 if (__changed) { \
454 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
455 flush_tlb_page(__vma, __address); \
456 } \
457 (sparc_cpu_model == sun4c) || __changed; \
458})
459
449#include <asm-generic/pgtable.h> 460#include <asm-generic/pgtable.h>
450 461
451#endif /* !(__ASSEMBLY__) */ 462#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 08b9831f2e14..0a71e0b9a619 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -395,12 +395,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
395 * bit at the same time. */ 395 * bit at the same time. */
396#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 396#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
397#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 397#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
398 do { \ 398({ \
399 if (__dirty) { \ 399 int __changed = !pte_same(*(__ptep), __entry); \
400 set_pte(__ptep, __entry); \ 400 if (__changed && __dirty) { \
401 flush_tlb_page(__vma, __address); \ 401 set_pte(__ptep, __entry); \
402 } \ 402 flush_tlb_page(__vma, __address); \
403 } while (0) 403 } \
404 __changed; \
405})
404 406
405/* Encode and de-code a swap entry */ 407/* Encode and de-code a swap entry */
406#define __swp_type(x) (((x).val >> 1) & 0x3f) 408#define __swp_type(x) (((x).val >> 1) & 0x3f)