aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt6
-rw-r--r--arch/alpha/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h3
-rw-r--r--arch/arm/mm/fault-armv.c5
-rw-r--r--arch/avr32/include/asm/pgtable.h2
-rw-r--r--arch/avr32/mm/tlb.c4
-rw-r--r--arch/cris/include/asm/pgtable.h2
-rw-r--r--arch/frv/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/m32r/include/asm/tlbflush.h2
-rw-r--r--arch/m32r/mm/fault-nommu.c2
-rw-r--r--arch/m32r/mm/fault.c6
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h2
-rw-r--r--arch/microblaze/include/asm/tlbflush.h2
-rw-r--r--arch/mips/include/asm/pgtable.h3
-rw-r--r--arch/mn10300/include/asm/pgtable.h2
-rw-r--r--arch/mn10300/mm/mmu-context.c3
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/kernel/cache.c4
-rw-r--r--arch/powerpc/include/asm/pgtable.h2
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/score/include/asm/pgtable.h3
-rw-r--r--arch/sh/include/asm/pgtable.h3
-rw-r--r--arch/sh/mm/fault_32.c2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/init_64.c3
-rw-r--r--arch/sparc/mm/nosun4c.c2
-rw-r--r--arch/sparc/mm/srmmu.c6
-rw-r--r--arch/sparc/mm/sun4c.c6
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h2
-rw-r--r--arch/xtensa/mm/cache.c4
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate.c2
40 files changed, 69 insertions, 62 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index da42ab414c48..74a8b6fefa29 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -88,12 +88,12 @@ changes occur:
88 This is used primarily during fault processing. 88 This is used primarily during fault processing.
89 89
905) void update_mmu_cache(struct vm_area_struct *vma, 905) void update_mmu_cache(struct vm_area_struct *vma,
91 unsigned long address, pte_t pte) 91 unsigned long address, pte_t *ptep)
92 92
93 At the end of every page fault, this routine is invoked to 93 At the end of every page fault, this routine is invoked to
94 tell the architecture specific code that a translation 94 tell the architecture specific code that a translation
95 described by "pte" now exists at virtual address "address" 95 now exists at virtual address "address" for address space
96 for address space "vma->vm_mm", in the software page tables. 96 "vma->vm_mm", in the software page tables.
97 97
98 A port may use this information in any way it so chooses. 98 A port may use this information in any way it so chooses.
99 For example, it could use this event to pre-load TLB 99 For example, it could use this event to pre-load TLB
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 3f0c59f6d8aa..71a243294142 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -329,7 +329,7 @@ extern pgd_t swapper_pg_dir[1024];
329 * tables contain all the necessary information. 329 * tables contain all the necessary information.
330 */ 330 */
331extern inline void update_mmu_cache(struct vm_area_struct * vma, 331extern inline void update_mmu_cache(struct vm_area_struct * vma,
332 unsigned long address, pte_t pte) 332 unsigned long address, pte_t *ptep)
333{ 333{
334} 334}
335 335
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c2f1605de359..e085e2c545eb 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
529 * cache entries for the kernels virtual memory range are written 529 * cache entries for the kernels virtual memory range are written
530 * back to the page. 530 * back to the page.
531 */ 531 */
532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); 532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
533 pte_t *ptep);
533 534
534#endif 535#endif
535 536
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ae88f2c3a6df..c45f9bb318ad 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -149,9 +149,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne
149 * 149 *
150 * Note that the pte lock will be held. 150 * Note that the pte lock will be held.
151 */ 151 */
152void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 152void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
153 pte_t *ptep)
153{ 154{
154 unsigned long pfn = pte_pfn(pte); 155 unsigned long pfn = pte_pfn(*ptep);
155 struct address_space *mapping; 156 struct address_space *mapping;
156 struct page *page; 157 struct page *page;
157 158
diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h
index fecdda16f444..a9ae30c41e74 100644
--- a/arch/avr32/include/asm/pgtable.h
+++ b/arch/avr32/include/asm/pgtable.h
@@ -325,7 +325,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
325 325
326struct vm_area_struct; 326struct vm_area_struct;
327extern void update_mmu_cache(struct vm_area_struct * vma, 327extern void update_mmu_cache(struct vm_area_struct * vma,
328 unsigned long address, pte_t pte); 328 unsigned long address, pte_t *ptep);
329 329
330/* 330/*
331 * Encode and decode a swap entry 331 * Encode and decode a swap entry
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c
index 06677be98ffb..0da23109f817 100644
--- a/arch/avr32/mm/tlb.c
+++ b/arch/avr32/mm/tlb.c
@@ -101,7 +101,7 @@ static void update_dtlb(unsigned long address, pte_t pte)
101} 101}
102 102
103void update_mmu_cache(struct vm_area_struct *vma, 103void update_mmu_cache(struct vm_area_struct *vma,
104 unsigned long address, pte_t pte) 104 unsigned long address, pte_t *ptep)
105{ 105{
106 unsigned long flags; 106 unsigned long flags;
107 107
@@ -110,7 +110,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
110 return; 110 return;
111 111
112 local_irq_save(flags); 112 local_irq_save(flags);
113 update_dtlb(address, pte); 113 update_dtlb(address, *ptep);
114 local_irq_restore(flags); 114 local_irq_restore(flags);
115} 115}
116 116
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 1fcce00f01f4..99ea6cd1b143 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -270,7 +270,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
270 * Actually I am not sure on what this could be used for. 270 * Actually I am not sure on what this could be used for.
271 */ 271 */
272static inline void update_mmu_cache(struct vm_area_struct * vma, 272static inline void update_mmu_cache(struct vm_area_struct * vma,
273 unsigned long address, pte_t pte) 273 unsigned long address, pte_t *ptep)
274{ 274{
275} 275}
276 276
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index 22c60692b551..c18b0d32e636 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -505,7 +505,7 @@ static inline int pte_file(pte_t pte)
505/* 505/*
506 * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache 506 * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache
507 */ 507 */
508static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 508static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
509{ 509{
510 struct mm_struct *mm; 510 struct mm_struct *mm;
511 unsigned long ampr; 511 unsigned long ampr;
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 69bf13857a9f..c3286f42e501 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -462,7 +462,7 @@ pte_same (pte_t a, pte_t b)
462 return pte_val(a) == pte_val(b); 462 return pte_val(a) == pte_val(b);
463} 463}
464 464
465#define update_mmu_cache(vma, address, pte) do { } while (0) 465#define update_mmu_cache(vma, address, ptep) do { } while (0)
466 466
467extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 467extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
468extern void paging_init (void); 468extern void paging_init (void);
diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h
index 0ef95307784e..92614b0ccf17 100644
--- a/arch/m32r/include/asm/tlbflush.h
+++ b/arch/m32r/include/asm/tlbflush.h
@@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void)
92 ); 92 );
93} 93}
94 94
95extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 95extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
96 96
97#endif /* _ASM_M32R_TLBFLUSH_H */ 97#endif /* _ASM_M32R_TLBFLUSH_H */
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c
index 88469178ea6b..888aab1157ed 100644
--- a/arch/m32r/mm/fault-nommu.c
+++ b/arch/m32r/mm/fault-nommu.c
@@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
95 * update_mmu_cache() 95 * update_mmu_cache()
96 *======================================================================*/ 96 *======================================================================*/
97void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 97void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
98 pte_t pte) 98 pte_t *ptep)
99{ 99{
100 BUG(); 100 BUG();
101} 101}
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 7274b47f4c22..28ee389e5f5a 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -336,7 +336,7 @@ vmalloc_fault:
336 336
337 addr = (address & PAGE_MASK); 337 addr = (address & PAGE_MASK);
338 set_thread_fault_code(error_code); 338 set_thread_fault_code(error_code);
339 update_mmu_cache(NULL, addr, *pte_k); 339 update_mmu_cache(NULL, addr, pte_k);
340 set_thread_fault_code(0); 340 set_thread_fault_code(0);
341 return; 341 return;
342 } 342 }
@@ -349,7 +349,7 @@ vmalloc_fault:
349#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) 349#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
350#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) 350#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
351void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, 351void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
352 pte_t pte) 352 pte_t *ptep)
353{ 353{
354 volatile unsigned long *entry1, *entry2; 354 volatile unsigned long *entry1, *entry2;
355 unsigned long pte_data, flags; 355 unsigned long pte_data, flags;
@@ -365,7 +365,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
365 365
366 vaddr = (vaddr & PAGE_MASK) | get_asid(); 366 vaddr = (vaddr & PAGE_MASK) | get_asid();
367 367
368 pte_data = pte_val(pte); 368 pte_data = pte_val(*ptep);
369 369
370#ifdef CONFIG_CHIP_OPSP 370#ifdef CONFIG_CHIP_OPSP
371 entry1 = (unsigned long *)ITLB_BASE; 371 entry1 = (unsigned long *)ITLB_BASE;
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index aca0e28581c7..87174c904d2b 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -115,7 +115,7 @@ extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
115 * they are updated on demand. 115 * they are updated on demand.
116 */ 116 */
117static inline void update_mmu_cache(struct vm_area_struct *vma, 117static inline void update_mmu_cache(struct vm_area_struct *vma,
118 unsigned long address, pte_t pte) 118 unsigned long address, pte_t *ptep)
119{ 119{
120} 120}
121 121
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index eb31a0e8a772..10ec70cd8735 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -38,7 +38,7 @@ static inline void local_flush_tlb_range(struct vm_area_struct *vma,
38 38
39#define flush_tlb_kernel_range(start, end) do { } while (0) 39#define flush_tlb_kernel_range(start, end) do { } while (0)
40 40
41#define update_mmu_cache(vma, addr, pte) do { } while (0) 41#define update_mmu_cache(vma, addr, ptep) do { } while (0)
42 42
43#define flush_tlb_all local_flush_tlb_all 43#define flush_tlb_all local_flush_tlb_all
44#define flush_tlb_mm local_flush_tlb_mm 44#define flush_tlb_mm local_flush_tlb_mm
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 1854336e56a2..c56bf8afc099 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -362,8 +362,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
362 pte_t pte); 362 pte_t pte);
363 363
364static inline void update_mmu_cache(struct vm_area_struct *vma, 364static inline void update_mmu_cache(struct vm_area_struct *vma,
365 unsigned long address, pte_t pte) 365 unsigned long address, pte_t *ptep)
366{ 366{
367 pte_t pte = *ptep;
367 __update_tlb(vma, address, pte); 368 __update_tlb(vma, address, pte);
368 __update_cache(vma, address, pte); 369 __update_cache(vma, address, pte);
369} 370}
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index 6dc30fc827c4..16d88577f3e0 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -466,7 +466,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
466 * the kernel page tables containing the necessary information by tlb-mn10300.S 466 * the kernel page tables containing the necessary information by tlb-mn10300.S
467 */ 467 */
468extern void update_mmu_cache(struct vm_area_struct *vma, 468extern void update_mmu_cache(struct vm_area_struct *vma,
469 unsigned long address, pte_t pte); 469 unsigned long address, pte_t *ptep);
470 470
471#endif /* !__ASSEMBLY__ */ 471#endif /* !__ASSEMBLY__ */
472 472
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 31c9d27a75ae..36ba02191d40 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -51,9 +51,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
51/* 51/*
52 * preemptively set a TLB entry 52 * preemptively set a TLB entry
53 */ 53 */
54void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 54void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
55{ 55{
56 unsigned long pteu, ptel, cnx, flags; 56 unsigned long pteu, ptel, cnx, flags;
57 pte_t pte = *ptep;
57 58
58 addr &= PAGE_MASK; 59 addr &= PAGE_MASK;
59 ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2); 60 ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index a27d2e200fb2..01c15035e783 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -410,7 +410,7 @@ extern void paging_init (void);
410 410
411#define PG_dcache_dirty PG_arch_1 411#define PG_dcache_dirty PG_arch_1
412 412
413extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 413extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
414 414
415/* Encode and de-code a swap entry */ 415/* Encode and de-code a swap entry */
416 416
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index b6ed34de14e1..1054baa2fc69 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -68,9 +68,9 @@ flush_cache_all_local(void)
68EXPORT_SYMBOL(flush_cache_all_local); 68EXPORT_SYMBOL(flush_cache_all_local);
69 69
70void 70void
71update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 71update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
72{ 72{
73 struct page *page = pte_page(pte); 73 struct page *page = pte_page(*ptep);
74 74
75 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && 75 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
76 test_bit(PG_dcache_dirty, &page->flags)) { 76 test_bit(PG_dcache_dirty, &page->flags)) {
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 21207e54825b..89f158731ce3 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -209,7 +209,7 @@ extern void paging_init(void);
209 * corresponding HPTE into the hash table ahead of time, instead of 209 * corresponding HPTE into the hash table ahead of time, instead of
210 * waiting for the inevitable extra hash-table miss exception. 210 * waiting for the inevitable extra hash-table miss exception.
211 */ 211 */
212extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 212extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
213 213
214extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, 214extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
215 unsigned long end, int write, struct page **pages, int *nr); 215 unsigned long end, int write, struct page **pages, int *nr);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b9b152558f9c..311224cdb7ad 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -494,13 +494,13 @@ EXPORT_SYMBOL(flush_icache_user_range);
494 * This must always be called with the pte lock held. 494 * This must always be called with the pte lock held.
495 */ 495 */
496void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 496void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
497 pte_t pte) 497 pte_t *ptep)
498{ 498{
499#ifdef CONFIG_PPC_STD_MMU 499#ifdef CONFIG_PPC_STD_MMU
500 unsigned long access = 0, trap; 500 unsigned long access = 0, trap;
501 501
502 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 502 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
503 if (!pte_young(pte) || address >= TASK_SIZE) 503 if (!pte_young(*ptep) || address >= TASK_SIZE)
504 return; 504 return;
505 505
506 /* We try to figure out if we are coming from an instruction 506 /* We try to figure out if we are coming from an instruction
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e2fa79cf0614..9b5b9189c15e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -43,7 +43,7 @@ extern void vmem_map_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page 43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information. 44 * tables contain all the necessary information.
45 */ 45 */
46#define update_mmu_cache(vma, address, pte) do { } while (0) 46#define update_mmu_cache(vma, address, ptep) do { } while (0)
47 47
48/* 48/*
49 * ZERO_PAGE is a global shared page that is always zero: used 49 * ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index 674934b40170..ccf38f06c57d 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -272,8 +272,9 @@ extern void __update_cache(struct vm_area_struct *vma,
272 unsigned long address, pte_t pte); 272 unsigned long address, pte_t pte);
273 273
274static inline void update_mmu_cache(struct vm_area_struct *vma, 274static inline void update_mmu_cache(struct vm_area_struct *vma,
275 unsigned long address, pte_t pte) 275 unsigned long address, pte_t *ptep)
276{ 276{
277 pte_t pte = *ptep;
277 __update_tlb(vma, address, pte); 278 __update_tlb(vma, address, pte);
278 __update_cache(vma, address, pte); 279 __update_cache(vma, address, pte);
279} 280}
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index ba3046e4f06f..1ff93ac1aa44 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -165,8 +165,9 @@ extern void __update_tlb(struct vm_area_struct *vma,
165 unsigned long address, pte_t pte); 165 unsigned long address, pte_t pte);
166 166
167static inline void 167static inline void
168update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 168update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
169{ 169{
170 pte_t pte = *ptep;
170 __update_cache(vma, address, pte); 171 __update_cache(vma, address, pte);
171 __update_tlb(vma, address, pte); 172 __update_tlb(vma, address, pte);
172} 173}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 47530104e0ad..1677b5ee191d 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -371,7 +371,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
371 local_flush_tlb_one(get_asid(), address & PAGE_MASK); 371 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
372#endif 372#endif
373 373
374 update_mmu_cache(NULL, address, entry); 374 update_mmu_cache(NULL, address, pte);
375 375
376 return 0; 376 return 0;
377} 377}
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index e0cabe790ec1..77f906d8cc21 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -330,9 +330,9 @@ BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
330#define FAULT_CODE_WRITE 0x2 330#define FAULT_CODE_WRITE 0x2
331#define FAULT_CODE_USER 0x4 331#define FAULT_CODE_USER 0x4
332 332
333BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t) 333BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *)
334 334
335#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte) 335#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep)
336 336
337BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long, 337BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
338 unsigned long, unsigned int) 338 unsigned long, unsigned int)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index f3cb790fa2ae..f5b5fa76c02d 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -706,7 +706,7 @@ extern unsigned long find_ecache_flush_span(unsigned long size);
706#define mmu_unlockarea(vaddr, len) do { } while(0) 706#define mmu_unlockarea(vaddr, len) do { } while(0)
707 707
708struct vm_area_struct; 708struct vm_area_struct;
709extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 709extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
710 710
711/* Encode and de-code a swap entry */ 711/* Encode and de-code a swap entry */
712#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) 712#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b99f81c4906f..43e20efb2511 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -370,7 +370,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
370 unsigned long address) 370 unsigned long address)
371{ 371{
372 extern void sun4c_update_mmu_cache(struct vm_area_struct *, 372 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
373 unsigned long,pte_t); 373 unsigned long,pte_t *);
374 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); 374 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
375 struct task_struct *tsk = current; 375 struct task_struct *tsk = current;
376 struct mm_struct *mm = tsk->mm; 376 struct mm_struct *mm = tsk->mm;
@@ -447,7 +447,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
447 * on the CPU and doing a shrink_mmap() on this vma. 447 * on the CPU and doing a shrink_mmap() on this vma.
448 */ 448 */
449 sun4c_update_mmu_cache (find_vma(current->mm, address), address, 449 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
450 *ptep); 450 ptep);
451 else 451 else
452 do_sparc_fault(regs, text_fault, write, address); 452 do_sparc_fault(regs, text_fault, write, address);
453} 453}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1886d37d411b..9245a822a2f1 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -289,12 +289,13 @@ static void flush_dcache(unsigned long pfn)
289 } 289 }
290} 290}
291 291
292void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 292void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
293{ 293{
294 struct mm_struct *mm; 294 struct mm_struct *mm;
295 struct tsb *tsb; 295 struct tsb *tsb;
296 unsigned long tag, flags; 296 unsigned long tag, flags;
297 unsigned long tsb_index, tsb_hash_shift; 297 unsigned long tsb_index, tsb_hash_shift;
298 pte_t pte = *ptep;
298 299
299 if (tlb_type != hypervisor) { 300 if (tlb_type != hypervisor) {
300 unsigned long pfn = pte_pfn(pte); 301 unsigned long pfn = pte_pfn(pte);
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c
index 196263f895b7..4e62c27147c4 100644
--- a/arch/sparc/mm/nosun4c.c
+++ b/arch/sparc/mm/nosun4c.c
@@ -62,7 +62,7 @@ pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
62 return NULL; 62 return NULL;
63} 63}
64 64
65void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 65void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
66{ 66{
67} 67}
68 68
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 367321a030dd..df49b200ca4c 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -694,7 +694,7 @@ extern void tsunami_setup_blockops(void);
694 * The following code is a deadwood that may be necessary when 694 * The following code is a deadwood that may be necessary when
695 * we start to make precise page flushes again. --zaitcev 695 * we start to make precise page flushes again. --zaitcev
696 */ 696 */
697static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) 697static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
698{ 698{
699#if 0 699#if 0
700 static unsigned long last; 700 static unsigned long last;
@@ -703,10 +703,10 @@ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad
703 703
704 if (address == last) { 704 if (address == last) {
705 val = srmmu_hwprobe(address); 705 val = srmmu_hwprobe(address);
706 if (val != 0 && pte_val(pte) != val) { 706 if (val != 0 && pte_val(*ptep) != val) {
707 printk("swift_update_mmu_cache: " 707 printk("swift_update_mmu_cache: "
708 "addr %lx put %08x probed %08x from %p\n", 708 "addr %lx put %08x probed %08x from %p\n",
709 address, pte_val(pte), val, 709 address, pte_val(*ptep), val,
710 __builtin_return_address(0)); 710 __builtin_return_address(0));
711 srmmu_flush_whole_tlb(); 711 srmmu_flush_whole_tlb();
712 } 712 }
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index a89baf0d875a..18652534b91a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1887,7 +1887,7 @@ static void sun4c_check_pgt_cache(int low, int high)
1887/* An experiment, turn off by default for now... -DaveM */ 1887/* An experiment, turn off by default for now... -DaveM */
1888#define SUN4C_PRELOAD_PSEG 1888#define SUN4C_PRELOAD_PSEG
1889 1889
1890void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 1890void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
1891{ 1891{
1892 unsigned long flags; 1892 unsigned long flags;
1893 int pseg; 1893 int pseg;
@@ -1929,7 +1929,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
1929 start += PAGE_SIZE; 1929 start += PAGE_SIZE;
1930 } 1930 }
1931#ifndef SUN4C_PRELOAD_PSEG 1931#ifndef SUN4C_PRELOAD_PSEG
1932 sun4c_put_pte(address, pte_val(pte)); 1932 sun4c_put_pte(address, pte_val(*ptep));
1933#endif 1933#endif
1934 local_irq_restore(flags); 1934 local_irq_restore(flags);
1935 return; 1935 return;
@@ -1940,7 +1940,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
1940 add_lru(entry); 1940 add_lru(entry);
1941 } 1941 }
1942 1942
1943 sun4c_put_pte(address, pte_val(pte)); 1943 sun4c_put_pte(address, pte_val(*ptep));
1944 local_irq_restore(flags); 1944 local_irq_restore(flags);
1945} 1945}
1946 1946
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 9ce3f165111a..a9f7251b4a8d 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -345,7 +345,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
345struct mm_struct; 345struct mm_struct;
346extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 346extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
347 347
348#define update_mmu_cache(vma,address,pte) do ; while (0) 348#define update_mmu_cache(vma,address,ptep) do ; while (0)
349 349
350/* Encode and de-code a swap entry */ 350/* Encode and de-code a swap entry */
351#define __swp_type(x) (((x).val >> 4) & 0x3f) 351#define __swp_type(x) (((x).val >> 4) & 0x3f)
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 01fd9461d323..a28668396508 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -80,7 +80,7 @@ do { \
80 * The i386 doesn't have any external MMU info: the kernel page 80 * The i386 doesn't have any external MMU info: the kernel page
81 * tables contain all the necessary information. 81 * tables contain all the necessary information.
82 */ 82 */
83#define update_mmu_cache(vma, address, pte) do { } while (0) 83#define update_mmu_cache(vma, address, ptep) do { } while (0)
84 84
85#endif /* !__ASSEMBLY__ */ 85#endif /* !__ASSEMBLY__ */
86 86
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index c57a30117149..181be528c612 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -129,7 +129,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
129#define pte_unmap(pte) /* NOP */ 129#define pte_unmap(pte) /* NOP */
130#define pte_unmap_nested(pte) /* NOP */ 130#define pte_unmap_nested(pte) /* NOP */
131 131
132#define update_mmu_cache(vma, address, pte) do { } while (0) 132#define update_mmu_cache(vma, address, ptep) do { } while (0)
133 133
134/* Encode and de-code a swap entry */ 134/* Encode and de-code a swap entry */
135#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 135#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index a138770c358e..76bf35554117 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -394,7 +394,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
394#define kern_addr_valid(addr) (1) 394#define kern_addr_valid(addr) (1)
395 395
396extern void update_mmu_cache(struct vm_area_struct * vma, 396extern void update_mmu_cache(struct vm_area_struct * vma,
397 unsigned long address, pte_t pte); 397 unsigned long address, pte_t *ptep);
398 398
399/* 399/*
400 * remap a physical page `pfn' of size `size' with page protection `prot' 400 * remap a physical page `pfn' of size `size' with page protection `prot'
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 3ba990c67676..85df4655d326 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -147,9 +147,9 @@ void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
147#endif 147#endif
148 148
149void 149void
150update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) 150update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
151{ 151{
152 unsigned long pfn = pte_pfn(pte); 152 unsigned long pfn = pte_pfn(*ptep);
153 struct page *page; 153 struct page *page;
154 154
155 if (!pfn_valid(pfn)) 155 if (!pfn_valid(pfn))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e91b81b63670..94cd94df56e3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2088,7 +2088,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
2088 2088
2089 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 2089 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2090 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 2090 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2091 update_mmu_cache(vma, address, entry); 2091 update_mmu_cache(vma, address, ptep);
2092 } 2092 }
2093} 2093}
2094 2094
@@ -2559,7 +2559,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2559 entry = pte_mkyoung(entry); 2559 entry = pte_mkyoung(entry);
2560 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2560 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2561 flags & FAULT_FLAG_WRITE)) 2561 flags & FAULT_FLAG_WRITE))
2562 update_mmu_cache(vma, address, entry); 2562 update_mmu_cache(vma, address, ptep);
2563 2563
2564out_page_table_lock: 2564out_page_table_lock:
2565 spin_unlock(&mm->page_table_lock); 2565 spin_unlock(&mm->page_table_lock);
diff --git a/mm/memory.c b/mm/memory.c
index 09e4b1be7b67..72fb5f39bccc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1593 /* Ok, finally just insert the thing.. */ 1593 /* Ok, finally just insert the thing.. */
1594 entry = pte_mkspecial(pfn_pte(pfn, prot)); 1594 entry = pte_mkspecial(pfn_pte(pfn, prot));
1595 set_pte_at(mm, addr, pte, entry); 1595 set_pte_at(mm, addr, pte, entry);
1596 update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ 1596 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1597 1597
1598 retval = 0; 1598 retval = 0;
1599out_unlock: 1599out_unlock:
@@ -2116,7 +2116,7 @@ reuse:
2116 entry = pte_mkyoung(orig_pte); 2116 entry = pte_mkyoung(orig_pte);
2117 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2117 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2118 if (ptep_set_access_flags(vma, address, page_table, entry,1)) 2118 if (ptep_set_access_flags(vma, address, page_table, entry,1))
2119 update_mmu_cache(vma, address, entry); 2119 update_mmu_cache(vma, address, page_table);
2120 ret |= VM_FAULT_WRITE; 2120 ret |= VM_FAULT_WRITE;
2121 goto unlock; 2121 goto unlock;
2122 } 2122 }
@@ -2185,7 +2185,7 @@ gotten:
2185 * new page to be mapped directly into the secondary page table. 2185 * new page to be mapped directly into the secondary page table.
2186 */ 2186 */
2187 set_pte_at_notify(mm, address, page_table, entry); 2187 set_pte_at_notify(mm, address, page_table, entry);
2188 update_mmu_cache(vma, address, entry); 2188 update_mmu_cache(vma, address, page_table);
2189 if (old_page) { 2189 if (old_page) {
2190 /* 2190 /*
2191 * Only after switching the pte to the new page may 2191 * Only after switching the pte to the new page may
@@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2629 } 2629 }
2630 2630
2631 /* No need to invalidate - it was non-present before */ 2631 /* No need to invalidate - it was non-present before */
2632 update_mmu_cache(vma, address, pte); 2632 update_mmu_cache(vma, address, page_table);
2633unlock: 2633unlock:
2634 pte_unmap_unlock(page_table, ptl); 2634 pte_unmap_unlock(page_table, ptl);
2635out: 2635out:
@@ -2694,7 +2694,7 @@ setpte:
2694 set_pte_at(mm, address, page_table, entry); 2694 set_pte_at(mm, address, page_table, entry);
2695 2695
2696 /* No need to invalidate - it was non-present before */ 2696 /* No need to invalidate - it was non-present before */
2697 update_mmu_cache(vma, address, entry); 2697 update_mmu_cache(vma, address, page_table);
2698unlock: 2698unlock:
2699 pte_unmap_unlock(page_table, ptl); 2699 pte_unmap_unlock(page_table, ptl);
2700 return 0; 2700 return 0;
@@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2855 set_pte_at(mm, address, page_table, entry); 2855 set_pte_at(mm, address, page_table, entry);
2856 2856
2857 /* no need to invalidate: a not-present page won't be cached */ 2857 /* no need to invalidate: a not-present page won't be cached */
2858 update_mmu_cache(vma, address, entry); 2858 update_mmu_cache(vma, address, page_table);
2859 } else { 2859 } else {
2860 if (charged) 2860 if (charged)
2861 mem_cgroup_uncharge_page(page); 2861 mem_cgroup_uncharge_page(page);
@@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2992 } 2992 }
2993 entry = pte_mkyoung(entry); 2993 entry = pte_mkyoung(entry);
2994 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { 2994 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
2995 update_mmu_cache(vma, address, entry); 2995 update_mmu_cache(vma, address, pte);
2996 } else { 2996 } else {
2997 /* 2997 /*
2998 * This is needed only for protection faults but the arch code 2998 * This is needed only for protection faults but the arch code
diff --git a/mm/migrate.c b/mm/migrate.c
index efddbf0926b2..e58e5da25b91 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -134,7 +134,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
134 page_add_file_rmap(new); 134 page_add_file_rmap(new);
135 135
136 /* No need to invalidate - it was non-present before */ 136 /* No need to invalidate - it was non-present before */
137 update_mmu_cache(vma, addr, pte); 137 update_mmu_cache(vma, addr, ptep);
138unlock: 138unlock:
139 pte_unmap_unlock(ptep, ptl); 139 pte_unmap_unlock(ptep, ptl);
140out: 140out: