aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt6
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/asm-ia64/pgtable.h44
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/migrate.c1
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/rmap.c1
9 files changed, 33 insertions, 39 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 866b76139420..552cabac0608 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -133,12 +133,6 @@ changes occur:
133 The ia64 sn2 platform is one example of a platform 133 The ia64 sn2 platform is one example of a platform
134 that uses this interface. 134 that uses this interface.
135 135
1368) void lazy_mmu_prot_update(pte_t pte)
137 This interface is called whenever the protection on
138 any user PTEs change. This interface provides a notification
139 to architecture specific code to take appropriate action.
140
141
142Next, we have the cache flushing interfaces. In general, when Linux 136Next, we have the cache flushing interfaces. In general, when Linux
143is changing an existing virtual-->physical mapping to a new value, 137is changing an existing virtual-->physical mapping to a new value,
144the sequence will be in one of the following forms: 138the sequence will be in one of the following forms:
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index c14abefabafa..a096b30734f3 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -54,15 +54,12 @@ struct page *zero_page_memmap_ptr; /* map entry for zero page */
54EXPORT_SYMBOL(zero_page_memmap_ptr); 54EXPORT_SYMBOL(zero_page_memmap_ptr);
55 55
56void 56void
57lazy_mmu_prot_update (pte_t pte) 57__ia64_sync_icache_dcache (pte_t pte)
58{ 58{
59 unsigned long addr; 59 unsigned long addr;
60 struct page *page; 60 struct page *page;
61 unsigned long order; 61 unsigned long order;
62 62
63 if (!pte_exec(pte))
64 return; /* not an executable page... */
65
66 page = pte_page(pte); 63 page = pte_page(pte);
67 addr = (unsigned long) page_address(page); 64 addr = (unsigned long) page_address(page);
68 65
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5f0d797d33fd..44ef329531c3 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -125,10 +125,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
125#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) 125#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
126#endif 126#endif
127 127
128#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
129#define lazy_mmu_prot_update(pte) do { } while (0)
130#endif
131
132#ifndef __HAVE_ARCH_MOVE_PTE 128#ifndef __HAVE_ARCH_MOVE_PTE
133#define move_pte(pte, prot, old_addr, new_addr) (pte) 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
134#endif 130#endif
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 6b4216edbb9c..0971ec90807e 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -223,12 +223,6 @@ ia64_phys_addr_valid (unsigned long addr)
223 * page table. 223 * page table.
224 */ 224 */
225 225
226/*
227 * On some architectures, special things need to be done when setting
228 * the PTE in a page table. Nothing special needs to be on IA-64.
229 */
230#define set_pte(ptep, pteval) (*(ptep) = (pteval))
231#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
232 226
233#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 227#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
234#ifdef CONFIG_VIRTUAL_MEM_MAP 228#ifdef CONFIG_VIRTUAL_MEM_MAP
@@ -321,6 +315,36 @@ ia64_phys_addr_valid (unsigned long addr)
321#define pte_mkhuge(pte) (__pte(pte_val(pte))) 315#define pte_mkhuge(pte) (__pte(pte_val(pte)))
322 316
323/* 317/*
318 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
319 * sync icache and dcache when we insert *new* executable page.
320 * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
321 * if necessary.
322 *
323 * set_pte() is also called by the kernel, but we can expect that the kernel
324 * flushes icache explicitly if necessary.
325 */
326#define pte_present_exec_user(pte)\
327 ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
328 (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
329
330extern void __ia64_sync_icache_dcache(pte_t pteval);
331static inline void set_pte(pte_t *ptep, pte_t pteval)
332{
333 /* page is present && page is user && page is executable
334 * && (page swapin or new page or page migraton
335 * || copy_on_write with page copying.)
336 */
337 if (pte_present_exec_user(pteval) &&
338 (!pte_present(*ptep) ||
339 pte_pfn(*ptep) != pte_pfn(pteval)))
340 /* load_module() calles flush_icache_range() explicitly*/
341 __ia64_sync_icache_dcache(pteval);
342 *ptep = pteval;
343}
344
345#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
346
347/*
324 * Make page protection values cacheable, uncacheable, or write- 348 * Make page protection values cacheable, uncacheable, or write-
325 * combining. Note that "protection" is really a misnomer here as the 349 * combining. Note that "protection" is really a misnomer here as the
326 * protection value contains the memory attribute bits, dirty bits, and 350 * protection value contains the memory attribute bits, dirty bits, and
@@ -489,12 +513,6 @@ extern struct page *zero_page_memmap_ptr;
489#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) 513#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
490#endif 514#endif
491 515
492/*
493 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
494 * information. However, we use this routine to take care of any (delayed) i-cache
495 * flushing that may be necessary.
496 */
497extern void lazy_mmu_prot_update (pte_t pte);
498 516
499#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 517#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
500/* 518/*
@@ -584,7 +602,7 @@ extern void lazy_mmu_prot_update (pte_t pte);
584#define __HAVE_ARCH_PTEP_SET_WRPROTECT 602#define __HAVE_ARCH_PTEP_SET_WRPROTECT
585#define __HAVE_ARCH_PTE_SAME 603#define __HAVE_ARCH_PTE_SAME
586#define __HAVE_ARCH_PGD_OFFSET_GATE 604#define __HAVE_ARCH_PGD_OFFSET_GATE
587#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE 605
588 606
589#ifndef CONFIG_PGTABLE_4 607#ifndef CONFIG_PGTABLE_4
590#include <asm-generic/pgtable-nopud.h> 608#include <asm-generic/pgtable-nopud.h>
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eab8c428cc93..06fd80149e47 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -355,7 +355,6 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
355 entry = pte_mkwrite(pte_mkdirty(*ptep)); 355 entry = pte_mkwrite(pte_mkdirty(*ptep));
356 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 356 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
357 update_mmu_cache(vma, address, entry); 357 update_mmu_cache(vma, address, entry);
358 lazy_mmu_prot_update(entry);
359 } 358 }
360} 359}
361 360
@@ -708,7 +707,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
708 pte = huge_ptep_get_and_clear(mm, address, ptep); 707 pte = huge_ptep_get_and_clear(mm, address, ptep);
709 pte = pte_mkhuge(pte_modify(pte, newprot)); 708 pte = pte_mkhuge(pte_modify(pte, newprot));
710 set_huge_pte_at(mm, address, ptep, pte); 709 set_huge_pte_at(mm, address, ptep, pte);
711 lazy_mmu_prot_update(pte);
712 } 710 }
713 } 711 }
714 spin_unlock(&mm->page_table_lock); 712 spin_unlock(&mm->page_table_lock);
diff --git a/mm/memory.c b/mm/memory.c
index b3c42f0f65c2..bd16dcaeefb8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1611,10 +1611,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1611 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1611 flush_cache_page(vma, address, pte_pfn(orig_pte));
1612 entry = pte_mkyoung(orig_pte); 1612 entry = pte_mkyoung(orig_pte);
1613 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1613 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1614 if (ptep_set_access_flags(vma, address, page_table, entry,1)) { 1614 if (ptep_set_access_flags(vma, address, page_table, entry,1))
1615 update_mmu_cache(vma, address, entry); 1615 update_mmu_cache(vma, address, entry);
1616 lazy_mmu_prot_update(entry);
1617 }
1618 ret |= VM_FAULT_WRITE; 1616 ret |= VM_FAULT_WRITE;
1619 goto unlock; 1617 goto unlock;
1620 } 1618 }
@@ -1650,7 +1648,6 @@ gotten:
1650 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1648 flush_cache_page(vma, address, pte_pfn(orig_pte));
1651 entry = mk_pte(new_page, vma->vm_page_prot); 1649 entry = mk_pte(new_page, vma->vm_page_prot);
1652 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1650 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1653 lazy_mmu_prot_update(entry);
1654 /* 1651 /*
1655 * Clear the pte entry and flush it first, before updating the 1652 * Clear the pte entry and flush it first, before updating the
1656 * pte with the new entry. This will avoid a race condition 1653 * pte with the new entry. This will avoid a race condition
@@ -2180,7 +2177,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2180 2177
2181 /* No need to invalidate - it was non-present before */ 2178 /* No need to invalidate - it was non-present before */
2182 update_mmu_cache(vma, address, entry); 2179 update_mmu_cache(vma, address, entry);
2183 lazy_mmu_prot_update(entry);
2184unlock: 2180unlock:
2185 pte_unmap_unlock(page_table, ptl); 2181 pte_unmap_unlock(page_table, ptl);
2186 return 0; 2182 return 0;
@@ -2333,7 +2329,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2333 2329
2334 /* no need to invalidate: a not-present page won't be cached */ 2330 /* no need to invalidate: a not-present page won't be cached */
2335 update_mmu_cache(vma, address, entry); 2331 update_mmu_cache(vma, address, entry);
2336 lazy_mmu_prot_update(entry);
2337 } else { 2332 } else {
2338 if (anon) 2333 if (anon)
2339 page_cache_release(page); 2334 page_cache_release(page);
@@ -2505,7 +2500,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2505 entry = pte_mkyoung(entry); 2500 entry = pte_mkyoung(entry);
2506 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2501 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
2507 update_mmu_cache(vma, address, entry); 2502 update_mmu_cache(vma, address, entry);
2508 lazy_mmu_prot_update(entry);
2509 } else { 2503 } else {
2510 /* 2504 /*
2511 * This is needed only for protection faults but the arch code 2505 * This is needed only for protection faults but the arch code
diff --git a/mm/migrate.c b/mm/migrate.c
index ea11ddb58275..06d0877a66ef 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -181,7 +181,6 @@ static void remove_migration_pte(struct vm_area_struct *vma,
181 181
182 /* No need to invalidate - it was non-present before */ 182 /* No need to invalidate - it was non-present before */
183 update_mmu_cache(vma, addr, pte); 183 update_mmu_cache(vma, addr, pte);
184 lazy_mmu_prot_update(pte);
185 184
186out: 185out:
187 pte_unmap_unlock(ptep, ptl); 186 pte_unmap_unlock(ptep, ptl);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e8346c30abec..1d4d69790e59 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -53,7 +53,6 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
53 if (dirty_accountable && pte_dirty(ptent)) 53 if (dirty_accountable && pte_dirty(ptent))
54 ptent = pte_mkwrite(ptent); 54 ptent = pte_mkwrite(ptent);
55 set_pte_at(mm, addr, pte, ptent); 55 set_pte_at(mm, addr, pte, ptent);
56 lazy_mmu_prot_update(ptent);
57#ifdef CONFIG_MIGRATION 56#ifdef CONFIG_MIGRATION
58 } else if (!pte_file(oldpte)) { 57 } else if (!pte_file(oldpte)) {
59 swp_entry_t entry = pte_to_swp_entry(oldpte); 58 swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/mm/rmap.c b/mm/rmap.c
index 41ac39749ef4..2b9f413c9c00 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -436,7 +436,6 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
436 entry = pte_wrprotect(entry); 436 entry = pte_wrprotect(entry);
437 entry = pte_mkclean(entry); 437 entry = pte_mkclean(entry);
438 set_pte_at(mm, address, pte, entry); 438 set_pte_at(mm, address, pte, entry);
439 lazy_mmu_prot_update(entry);
440 ret = 1; 439 ret = 1;
441 } 440 }
442 441