aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--include/asm-i386/pgtable.h8
-rw-r--r--include/asm-ia64/pgtable.h25
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h12
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h12
-rw-r--r--include/asm-ppc/pgtable.h12
-rw-r--r--include/asm-s390/pgtable.h7
-rw-r--r--include/asm-sparc/pgtable.h11
-rw-r--r--include/asm-x86_64/pgtable.h14
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/memory.c13
11 files changed, 92 insertions, 46 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index dc8f99ee305f..7d7bcf990e99 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -27,13 +27,20 @@ do { \
27 * Largely same as above, but only sets the access flags (dirty, 27 * Largely same as above, but only sets the access flags (dirty,
28 * accessed, and writable). Furthermore, we know it always gets set 28 * accessed, and writable). Furthermore, we know it always gets set
29 * to a "more permissive" setting, which allows most architectures 29 * to a "more permissive" setting, which allows most architectures
30 * to optimize this. 30 * to optimize this. We return whether the PTE actually changed, which
31 * in turn instructs the caller to do things like update__mmu_cache.
32 * This used to be done in the caller, but sparc needs minor faults to
33 * force that call on sun4c so we changed this macro slightly
31 */ 34 */
32#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 35#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
33do { \ 36({ \
34 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 37 int __changed = !pte_same(*(__ptep), __entry); \
35 flush_tlb_page(__vma, __address); \ 38 if (__changed) { \
36} while (0) 39 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
40 flush_tlb_page(__vma, __address); \
41 } \
42 __changed; \
43})
37#endif 44#endif
38 45
39#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 46#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index d62bdb029efa..628fa7747d0c 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -285,13 +285,15 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
285 */ 285 */
286#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 286#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
287#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ 287#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
288do { \ 288({ \
289 if (dirty) { \ 289 int __changed = !pte_same(*(ptep), entry); \
290 if (__changed && dirty) { \
290 (ptep)->pte_low = (entry).pte_low; \ 291 (ptep)->pte_low = (entry).pte_low; \
291 pte_update_defer((vma)->vm_mm, (address), (ptep)); \ 292 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
292 flush_tlb_page(vma, address); \ 293 flush_tlb_page(vma, address); \
293 } \ 294 } \
294} while (0) 295 __changed; \
296})
295 297
296#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 298#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
297#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \ 299#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 670b706411b8..6580f31b3135 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -533,16 +533,23 @@ extern void lazy_mmu_prot_update (pte_t pte);
533 * daccess_bit in ivt.S). 533 * daccess_bit in ivt.S).
534 */ 534 */
535#ifdef CONFIG_SMP 535#ifdef CONFIG_SMP
536# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 536# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
537do { \ 537({ \
538 if (__safely_writable) { \ 538 int __changed = !pte_same(*(__ptep), __entry); \
539 set_pte(__ptep, __entry); \ 539 if (__changed && __safely_writable) { \
540 flush_tlb_page(__vma, __addr); \ 540 set_pte(__ptep, __entry); \
541 } \ 541 flush_tlb_page(__vma, __addr); \
542} while (0) 542 } \
543 __changed; \
544})
543#else 545#else
544# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 546# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
545 ptep_establish(__vma, __addr, __ptep, __entry) 547({ \
548 int __changed = !pte_same(*(__ptep), __entry); \
549 if (__changed) \
550 ptep_establish(__vma, __addr, __ptep, __entry); \
551 __changed; \
552})
546#endif 553#endif
547 554
548# ifdef CONFIG_VIRTUAL_MEM_MAP 555# ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index c863bdb2889c..7fb730c62f83 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -673,10 +673,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
673} 673}
674 674
675#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 675#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
676 do { \ 676({ \
677 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 677 int __changed = !pte_same(*(__ptep), __entry); \
678 flush_tlb_page_nohash(__vma, __address); \ 678 if (__changed) { \
679 } while(0) 679 __ptep_set_access_flags(__ptep, __entry, __dirty); \
680 flush_tlb_page_nohash(__vma, __address); \
681 } \
682 __changed; \
683})
680 684
681/* 685/*
682 * Macro to mark a page protection value as "uncacheable". 686 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 704c4e669fe0..3cfd98f44bfe 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -413,10 +413,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
413 :"cc"); 413 :"cc");
414} 414}
415#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 415#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
416 do { \ 416({ \
417 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 417 int __changed = !pte_same(*(__ptep), __entry); \
418 flush_tlb_page_nohash(__vma, __address); \ 418 if (__changed) { \
419 } while(0) 419 __ptep_set_access_flags(__ptep, __entry, __dirty); \
420 flush_tlb_page_nohash(__vma, __address); \
421 } \
422 __changed; \
423})
420 424
421/* 425/*
422 * Macro to mark a page protection value as "uncacheable". 426 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index bed452d4a5f0..9d0ce9ff5840 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -694,10 +694,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
694} 694}
695 695
696#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 696#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
697 do { \ 697({ \
698 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 698 int __changed = !pte_same(*(__ptep), __entry); \
699 flush_tlb_page_nohash(__vma, __address); \ 699 if (__changed) { \
700 } while(0) 700 __ptep_set_access_flags(__ptep, __entry, __dirty); \
701 flush_tlb_page_nohash(__vma, __address); \
702 } \
703 __changed; \
704})
701 705
702/* 706/*
703 * Macro to mark a page protection value as "uncacheable". 707 * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 8fe8d42e64c3..0a307bb2f353 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -744,7 +744,12 @@ ptep_establish(struct vm_area_struct *vma,
744} 744}
745 745
746#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 746#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
747 ptep_establish(__vma, __address, __ptep, __entry) 747({ \
748 int __changed = !pte_same(*(__ptep), __entry); \
749 if (__changed) \
750 ptep_establish(__vma, __address, __ptep, __entry); \
751 __changed; \
752})
748 753
749/* 754/*
750 * Test and clear dirty bit in storage key. 755 * Test and clear dirty bit in storage key.
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 4f0a5ba0d6a0..59229aeba27b 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -446,6 +446,17 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
446#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 446#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
447#define GET_PFN(pfn) (pfn & 0x0fffffffUL) 447#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
448 448
449#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
450#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
451({ \
452 int __changed = !pte_same(*(__ptep), __entry); \
453 if (__changed) { \
454 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
455 flush_tlb_page(__vma, __address); \
456 } \
457 (sparc_cpu_model == sun4c) || __changed; \
458})
459
449#include <asm-generic/pgtable.h> 460#include <asm-generic/pgtable.h>
450 461
451#endif /* !(__ASSEMBLY__) */ 462#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 08b9831f2e14..0a71e0b9a619 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -395,12 +395,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
395 * bit at the same time. */ 395 * bit at the same time. */
396#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 396#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
397#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 397#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
398 do { \ 398({ \
399 if (__dirty) { \ 399 int __changed = !pte_same(*(__ptep), __entry); \
400 set_pte(__ptep, __entry); \ 400 if (__changed && __dirty) { \
401 flush_tlb_page(__vma, __address); \ 401 set_pte(__ptep, __entry); \
402 } \ 402 flush_tlb_page(__vma, __address); \
403 } while (0) 403 } \
404 __changed; \
405})
404 406
405/* Encode and de-code a swap entry */ 407/* Encode and de-code a swap entry */
406#define __swp_type(x) (((x).val >> 1) & 0x3f) 408#define __swp_type(x) (((x).val >> 1) & 0x3f)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eb7180db3033..a45d1f0691ce 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -326,9 +326,10 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
326 pte_t entry; 326 pte_t entry;
327 327
328 entry = pte_mkwrite(pte_mkdirty(*ptep)); 328 entry = pte_mkwrite(pte_mkdirty(*ptep));
329 ptep_set_access_flags(vma, address, ptep, entry, 1); 329 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
330 update_mmu_cache(vma, address, entry); 330 update_mmu_cache(vma, address, entry);
331 lazy_mmu_prot_update(entry); 331 lazy_mmu_prot_update(entry);
332 }
332} 333}
333 334
334 335
diff --git a/mm/memory.c b/mm/memory.c
index cb94488ab96d..f64cbf9baa36 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1691,9 +1691,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1691 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1691 flush_cache_page(vma, address, pte_pfn(orig_pte));
1692 entry = pte_mkyoung(orig_pte); 1692 entry = pte_mkyoung(orig_pte);
1693 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1693 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1694 ptep_set_access_flags(vma, address, page_table, entry, 1); 1694 if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
1695 update_mmu_cache(vma, address, entry); 1695 update_mmu_cache(vma, address, entry);
1696 lazy_mmu_prot_update(entry); 1696 lazy_mmu_prot_update(entry);
1697 }
1697 ret |= VM_FAULT_WRITE; 1698 ret |= VM_FAULT_WRITE;
1698 goto unlock; 1699 goto unlock;
1699 } 1700 }
@@ -2525,10 +2526,9 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2525 pte_t *pte, pmd_t *pmd, int write_access) 2526 pte_t *pte, pmd_t *pmd, int write_access)
2526{ 2527{
2527 pte_t entry; 2528 pte_t entry;
2528 pte_t old_entry;
2529 spinlock_t *ptl; 2529 spinlock_t *ptl;
2530 2530
2531 old_entry = entry = *pte; 2531 entry = *pte;
2532 if (!pte_present(entry)) { 2532 if (!pte_present(entry)) {
2533 if (pte_none(entry)) { 2533 if (pte_none(entry)) {
2534 if (vma->vm_ops) { 2534 if (vma->vm_ops) {
@@ -2561,8 +2561,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2561 entry = pte_mkdirty(entry); 2561 entry = pte_mkdirty(entry);
2562 } 2562 }
2563 entry = pte_mkyoung(entry); 2563 entry = pte_mkyoung(entry);
2564 if (!pte_same(old_entry, entry)) { 2564 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
2565 ptep_set_access_flags(vma, address, pte, entry, write_access);
2566 update_mmu_cache(vma, address, entry); 2565 update_mmu_cache(vma, address, entry);
2567 lazy_mmu_prot_update(entry); 2566 lazy_mmu_prot_update(entry);
2568 } else { 2567 } else {