diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2007-10-16 04:25:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:59 -0400 |
commit | 954ffcb35f5aca428661d29b96c4eee82b3c19cd (patch) | |
tree | 2dd8aaf26a8ae81b461b6d5d824ae8744690e483 /mm | |
parent | 97ee052461446526e1de7236497e6f1b1ffedf8c (diff) |
flush icache before set_pte() on ia64: flush icache at set_pte
Current ia64 kernel flushes icache by lazy_mmu_prot_update() *after*
set_pte(). This is too late. This patch removes lazy_mmu_prot_update and
add modfied set_pte() for flushing if necessary.
This patch flush icache of a page when
new pte has exec bit.
&& new pte has present bit
&& new pte is user's page.
&& (old *ptep is not present
|| new pte's pfn is not same to old *ptep's ptn)
&& new pte's page has no Pg_arch_1 bit.
Pg_arch_1 is set when a page is cache consistent.
I think this condition checks are much easier to understand than considering
"Where sync_icache_dcache() should be inserted ?".
pte_user() for ia64 was removed by http://lkml.org/lkml/2007/6/12/67 as
clean-up. So, I added it again.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 8 | ||||
-rw-r--r-- | mm/migrate.c | 1 | ||||
-rw-r--r-- | mm/mprotect.c | 1 | ||||
-rw-r--r-- | mm/rmap.c | 1 |
5 files changed, 1 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eab8c428cc93..06fd80149e47 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -355,7 +355,6 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, | |||
355 | entry = pte_mkwrite(pte_mkdirty(*ptep)); | 355 | entry = pte_mkwrite(pte_mkdirty(*ptep)); |
356 | if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { | 356 | if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { |
357 | update_mmu_cache(vma, address, entry); | 357 | update_mmu_cache(vma, address, entry); |
358 | lazy_mmu_prot_update(entry); | ||
359 | } | 358 | } |
360 | } | 359 | } |
361 | 360 | ||
@@ -708,7 +707,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
708 | pte = huge_ptep_get_and_clear(mm, address, ptep); | 707 | pte = huge_ptep_get_and_clear(mm, address, ptep); |
709 | pte = pte_mkhuge(pte_modify(pte, newprot)); | 708 | pte = pte_mkhuge(pte_modify(pte, newprot)); |
710 | set_huge_pte_at(mm, address, ptep, pte); | 709 | set_huge_pte_at(mm, address, ptep, pte); |
711 | lazy_mmu_prot_update(pte); | ||
712 | } | 710 | } |
713 | } | 711 | } |
714 | spin_unlock(&mm->page_table_lock); | 712 | spin_unlock(&mm->page_table_lock); |
diff --git a/mm/memory.c b/mm/memory.c index b3c42f0f65c2..bd16dcaeefb8 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1611,10 +1611,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1611 | flush_cache_page(vma, address, pte_pfn(orig_pte)); | 1611 | flush_cache_page(vma, address, pte_pfn(orig_pte)); |
1612 | entry = pte_mkyoung(orig_pte); | 1612 | entry = pte_mkyoung(orig_pte); |
1613 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1613 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
1614 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) { | 1614 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) |
1615 | update_mmu_cache(vma, address, entry); | 1615 | update_mmu_cache(vma, address, entry); |
1616 | lazy_mmu_prot_update(entry); | ||
1617 | } | ||
1618 | ret |= VM_FAULT_WRITE; | 1616 | ret |= VM_FAULT_WRITE; |
1619 | goto unlock; | 1617 | goto unlock; |
1620 | } | 1618 | } |
@@ -1650,7 +1648,6 @@ gotten: | |||
1650 | flush_cache_page(vma, address, pte_pfn(orig_pte)); | 1648 | flush_cache_page(vma, address, pte_pfn(orig_pte)); |
1651 | entry = mk_pte(new_page, vma->vm_page_prot); | 1649 | entry = mk_pte(new_page, vma->vm_page_prot); |
1652 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1650 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
1653 | lazy_mmu_prot_update(entry); | ||
1654 | /* | 1651 | /* |
1655 | * Clear the pte entry and flush it first, before updating the | 1652 | * Clear the pte entry and flush it first, before updating the |
1656 | * pte with the new entry. This will avoid a race condition | 1653 | * pte with the new entry. This will avoid a race condition |
@@ -2180,7 +2177,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2180 | 2177 | ||
2181 | /* No need to invalidate - it was non-present before */ | 2178 | /* No need to invalidate - it was non-present before */ |
2182 | update_mmu_cache(vma, address, entry); | 2179 | update_mmu_cache(vma, address, entry); |
2183 | lazy_mmu_prot_update(entry); | ||
2184 | unlock: | 2180 | unlock: |
2185 | pte_unmap_unlock(page_table, ptl); | 2181 | pte_unmap_unlock(page_table, ptl); |
2186 | return 0; | 2182 | return 0; |
@@ -2333,7 +2329,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2333 | 2329 | ||
2334 | /* no need to invalidate: a not-present page won't be cached */ | 2330 | /* no need to invalidate: a not-present page won't be cached */ |
2335 | update_mmu_cache(vma, address, entry); | 2331 | update_mmu_cache(vma, address, entry); |
2336 | lazy_mmu_prot_update(entry); | ||
2337 | } else { | 2332 | } else { |
2338 | if (anon) | 2333 | if (anon) |
2339 | page_cache_release(page); | 2334 | page_cache_release(page); |
@@ -2505,7 +2500,6 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2505 | entry = pte_mkyoung(entry); | 2500 | entry = pte_mkyoung(entry); |
2506 | if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { | 2501 | if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { |
2507 | update_mmu_cache(vma, address, entry); | 2502 | update_mmu_cache(vma, address, entry); |
2508 | lazy_mmu_prot_update(entry); | ||
2509 | } else { | 2503 | } else { |
2510 | /* | 2504 | /* |
2511 | * This is needed only for protection faults but the arch code | 2505 | * This is needed only for protection faults but the arch code |
diff --git a/mm/migrate.c b/mm/migrate.c index ea11ddb58275..06d0877a66ef 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -181,7 +181,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, | |||
181 | 181 | ||
182 | /* No need to invalidate - it was non-present before */ | 182 | /* No need to invalidate - it was non-present before */ |
183 | update_mmu_cache(vma, addr, pte); | 183 | update_mmu_cache(vma, addr, pte); |
184 | lazy_mmu_prot_update(pte); | ||
185 | 184 | ||
186 | out: | 185 | out: |
187 | pte_unmap_unlock(ptep, ptl); | 186 | pte_unmap_unlock(ptep, ptl); |
diff --git a/mm/mprotect.c b/mm/mprotect.c index e8346c30abec..1d4d69790e59 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -53,7 +53,6 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
53 | if (dirty_accountable && pte_dirty(ptent)) | 53 | if (dirty_accountable && pte_dirty(ptent)) |
54 | ptent = pte_mkwrite(ptent); | 54 | ptent = pte_mkwrite(ptent); |
55 | set_pte_at(mm, addr, pte, ptent); | 55 | set_pte_at(mm, addr, pte, ptent); |
56 | lazy_mmu_prot_update(ptent); | ||
57 | #ifdef CONFIG_MIGRATION | 56 | #ifdef CONFIG_MIGRATION |
58 | } else if (!pte_file(oldpte)) { | 57 | } else if (!pte_file(oldpte)) { |
59 | swp_entry_t entry = pte_to_swp_entry(oldpte); | 58 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
@@ -436,7 +436,6 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | |||
436 | entry = pte_wrprotect(entry); | 436 | entry = pte_wrprotect(entry); |
437 | entry = pte_mkclean(entry); | 437 | entry = pte_mkclean(entry); |
438 | set_pte_at(mm, address, pte, entry); | 438 | set_pte_at(mm, address, pte, entry); |
439 | lazy_mmu_prot_update(entry); | ||
440 | ret = 1; | 439 | ret = 1; |
441 | } | 440 | } |
442 | 441 | ||