diff options
| author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-06-20 04:51:03 -0400 |
|---|---|---|
| committer | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-06-20 04:51:03 -0400 |
| commit | 8830f04a092b47f3d246271b24685cd9eab82027 (patch) | |
| tree | 8258af450ec736fd0ff3cf0864eb5407b3f8b5ff | |
| parent | d411b845dcc8e1d97e8b02a345e765af5134700f (diff) | |
[PATCH] ARM: Fix delayed dcache flush for ARMv6 non-aliasing caches
flush_dcache_page() did nothing for these caches, but since they
suffer from I/D cache coherency issues, we need to ensure that data
is written back to RAM.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/mm/fault-armv.c | 31 | ||||
| -rw-r--r-- | arch/arm/mm/flush.c | 44 |
2 files changed, 29 insertions, 46 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 01967ddeef53..be4ab3d73c91 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
| @@ -77,9 +77,8 @@ no_pmd: | |||
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static void | 79 | static void |
| 80 | make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) | 80 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) |
| 81 | { | 81 | { |
| 82 | struct address_space *mapping = page_mapping(page); | ||
| 83 | struct mm_struct *mm = vma->vm_mm; | 82 | struct mm_struct *mm = vma->vm_mm; |
| 84 | struct vm_area_struct *mpnt; | 83 | struct vm_area_struct *mpnt; |
| 85 | struct prio_tree_iter iter; | 84 | struct prio_tree_iter iter; |
| @@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, | |||
| 87 | pgoff_t pgoff; | 86 | pgoff_t pgoff; |
| 88 | int aliases = 0; | 87 | int aliases = 0; |
| 89 | 88 | ||
| 90 | if (!mapping) | ||
| 91 | return; | ||
| 92 | |||
| 93 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); | 89 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); |
| 94 | 90 | ||
| 95 | /* | 91 | /* |
| @@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, | |||
| 115 | if (aliases) | 111 | if (aliases) |
| 116 | adjust_pte(vma, addr); | 112 | adjust_pte(vma, addr); |
| 117 | else | 113 | else |
| 118 | flush_cache_page(vma, addr, page_to_pfn(page)); | 114 | flush_cache_page(vma, addr, pfn); |
| 119 | } | 115 | } |
| 120 | 116 | ||
| 117 | void __flush_dcache_page(struct address_space *mapping, struct page *page); | ||
| 118 | |||
| 121 | /* | 119 | /* |
| 122 | * Take care of architecture specific things when placing a new PTE into | 120 | * Take care of architecture specific things when placing a new PTE into |
| 123 | * a page table, or changing an existing PTE. Basically, there are two | 121 | * a page table, or changing an existing PTE. Basically, there are two |
| @@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, | |||
| 134 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 132 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
| 135 | { | 133 | { |
| 136 | unsigned long pfn = pte_pfn(pte); | 134 | unsigned long pfn = pte_pfn(pte); |
| 135 | struct address_space *mapping; | ||
| 137 | struct page *page; | 136 | struct page *page; |
| 138 | 137 | ||
| 139 | if (!pfn_valid(pfn)) | 138 | if (!pfn_valid(pfn)) |
| 140 | return; | 139 | return; |
| 140 | |||
| 141 | page = pfn_to_page(pfn); | 141 | page = pfn_to_page(pfn); |
| 142 | if (page_mapping(page)) { | 142 | mapping = page_mapping(page); |
| 143 | if (mapping) { | ||
| 143 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | 144 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); |
| 144 | 145 | ||
| 145 | if (dirty) { | 146 | if (dirty) |
| 146 | /* | 147 | __flush_dcache_page(mapping, page); |
| 147 | * This is our first userspace mapping of this page. | ||
| 148 | * Ensure that the physical page is coherent with | ||
| 149 | * the kernel mapping. | ||
| 150 | * | ||
| 151 | * FIXME: only need to do this on VIVT and aliasing | ||
| 152 | * VIPT cache architectures. We can do that | ||
| 153 | * by choosing whether to set this bit... | ||
| 154 | */ | ||
| 155 | __cpuc_flush_dcache_page(page_address(page)); | ||
| 156 | } | ||
| 157 | 148 | ||
| 158 | if (cache_is_vivt()) | 149 | if (cache_is_vivt()) |
| 159 | make_coherent(vma, addr, page, dirty); | 150 | make_coherent(mapping, vma, addr, pfn); |
| 160 | } | 151 | } |
| 161 | } | 152 | } |
| 162 | 153 | ||
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 4085ed983e46..191788fb18d1 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
| 37 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | 37 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
| 38 | #endif | 38 | #endif |
| 39 | 39 | ||
| 40 | static void __flush_dcache_page(struct address_space *mapping, struct page *page) | 40 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
| 41 | { | 41 | { |
| 42 | struct mm_struct *mm = current->active_mm; | ||
| 43 | struct vm_area_struct *mpnt; | ||
| 44 | struct prio_tree_iter iter; | ||
| 45 | pgoff_t pgoff; | ||
| 46 | |||
| 47 | /* | 42 | /* |
| 48 | * Writeback any data associated with the kernel mapping of this | 43 | * Writeback any data associated with the kernel mapping of this |
| 49 | * page. This ensures that data in the physical page is mutually | 44 | * page. This ensures that data in the physical page is mutually |
| @@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
| 52 | __cpuc_flush_dcache_page(page_address(page)); | 47 | __cpuc_flush_dcache_page(page_address(page)); |
| 53 | 48 | ||
| 54 | /* | 49 | /* |
| 55 | * If there's no mapping pointer here, then this page isn't | 50 | * If this is a page cache page, and we have an aliasing VIPT cache, |
| 56 | * visible to userspace yet, so there are no cache lines | 51 | * we only need to do one flush - which would be at the relevant |
| 57 | * associated with any other aliases. | ||
| 58 | */ | ||
| 59 | if (!mapping) | ||
| 60 | return; | ||
| 61 | |||
| 62 | /* | ||
| 63 | * This is a page cache page. If we have a VIPT cache, we | ||
| 64 | * only need to do one flush - which would be at the relevant | ||
| 65 | * userspace colour, which is congruent with page->index. | 52 | * userspace colour, which is congruent with page->index. |
| 66 | */ | 53 | */ |
| 67 | if (cache_is_vipt()) { | 54 | if (mapping && cache_is_vipt_aliasing()) |
| 68 | if (cache_is_vipt_aliasing()) | 55 | flush_pfn_alias(page_to_pfn(page), |
| 69 | flush_pfn_alias(page_to_pfn(page), | 56 | page->index << PAGE_CACHE_SHIFT); |
| 70 | page->index << PAGE_CACHE_SHIFT); | 57 | } |
| 71 | return; | 58 | |
| 72 | } | 59 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) |
| 60 | { | ||
| 61 | struct mm_struct *mm = current->active_mm; | ||
| 62 | struct vm_area_struct *mpnt; | ||
| 63 | struct prio_tree_iter iter; | ||
| 64 | pgoff_t pgoff; | ||
| 73 | 65 | ||
| 74 | /* | 66 | /* |
| 75 | * There are possible user space mappings of this page: | 67 | * There are possible user space mappings of this page: |
| @@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page) | |||
| 116 | { | 108 | { |
| 117 | struct address_space *mapping = page_mapping(page); | 109 | struct address_space *mapping = page_mapping(page); |
| 118 | 110 | ||
| 119 | if (cache_is_vipt_nonaliasing()) | ||
| 120 | return; | ||
| 121 | |||
| 122 | if (mapping && !mapping_mapped(mapping)) | 111 | if (mapping && !mapping_mapped(mapping)) |
| 123 | set_bit(PG_dcache_dirty, &page->flags); | 112 | set_bit(PG_dcache_dirty, &page->flags); |
| 124 | else | 113 | else { |
| 125 | __flush_dcache_page(mapping, page); | 114 | __flush_dcache_page(mapping, page); |
| 115 | if (mapping && cache_is_vivt()) | ||
| 116 | __flush_dcache_aliases(mapping, page); | ||
| 117 | } | ||
| 126 | } | 118 | } |
| 127 | EXPORT_SYMBOL(flush_dcache_page); | 119 | EXPORT_SYMBOL(flush_dcache_page); |
