diff options
| author | Markus Pietrek <Markus.Pietrek@emtrion.de> | 2009-12-24 01:12:02 -0500 |
|---|---|---|
| committer | Paul Mundt <lethal@linux-sh.org> | 2009-12-24 01:12:02 -0500 |
| commit | 76382b5bdb77c29ab430e1b82ef1c604c8dd113b (patch) | |
| tree | 98a672c96d10f1426eb6d3bc4c525e34a974627c | |
| parent | 9503e891d28e91961026c778ec251dfe886626fb (diff) | |
sh: Ensure all PG_dcache_dirty pages are written back.
With some of the cache rework an address aliasing optimization was added,
but this managed to fail on certain mappings resulting in pages with
PG_dcache_dirty set never writing back their dcache lines. This patch
reverts to the earlier behaviour of simply always writing back when the
dirty bit is set.
Signed-off-by: Markus Pietrek <Markus.Pietrek@emtrion.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
| -rw-r--r-- | arch/sh/mm/cache.c | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index e9415d3ea94a..b8607fa7ae12 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
| @@ -133,12 +133,8 @@ void __update_cache(struct vm_area_struct *vma, | |||
| 133 | page = pfn_to_page(pfn); | 133 | page = pfn_to_page(pfn); |
| 134 | if (pfn_valid(pfn)) { | 134 | if (pfn_valid(pfn)) { |
| 135 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | 135 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); |
| 136 | if (dirty) { | 136 | if (dirty) |
| 137 | unsigned long addr = (unsigned long)page_address(page); | 137 | __flush_purge_region(page_address(page), PAGE_SIZE); |
| 138 | |||
| 139 | if (pages_do_alias(addr, address & PAGE_MASK)) | ||
| 140 | __flush_purge_region((void *)addr, PAGE_SIZE); | ||
| 141 | } | ||
| 142 | } | 138 | } |
| 143 | } | 139 | } |
| 144 | 140 | ||
