diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-04-04 22:16:14 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-04-17 11:55:01 -0400 |
commit | dd0f67f4747797f36f0c6bab7fed6a1f2448476d (patch) | |
tree | 7da151104d3de9b2848f8e05d1764587760cb6b7 /arch/arm/mm | |
parent | ae8a8b9553bd3906af74ff4e8d763904d20ab4e5 (diff) |
ARM: 7693/1: mm: clean-up in order to reduce to call kmap_high_get()
In kmap_atomic(), kmap_high_get() is invoked for checking already
mapped area. In __flush_dcache_page() and dma_cache_maint_page(),
we explicitly call kmap_high_get() before kmap_atomic()
when cache_is_vipt(), so kmap_high_get() can be invoked twice.
This is useless operation, so remove one.
v2: change cache_is_vipt() to cache_is_vipt_nonaliasing() in order to
be self-documented
Acked-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 15 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 15 |
2 files changed, 17 insertions, 13 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c7e3759f16d3..b47dd48d8634 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -822,16 +822,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
822 | if (PageHighMem(page)) { | 822 | if (PageHighMem(page)) { |
823 | if (len + offset > PAGE_SIZE) | 823 | if (len + offset > PAGE_SIZE) |
824 | len = PAGE_SIZE - offset; | 824 | len = PAGE_SIZE - offset; |
825 | vaddr = kmap_high_get(page); | 825 | |
826 | if (vaddr) { | 826 | if (cache_is_vipt_nonaliasing()) { |
827 | vaddr += offset; | ||
828 | op(vaddr, len, dir); | ||
829 | kunmap_high(page); | ||
830 | } else if (cache_is_vipt()) { | ||
831 | /* unmapped pages might still be cached */ | ||
832 | vaddr = kmap_atomic(page); | 827 | vaddr = kmap_atomic(page); |
833 | op(vaddr + offset, len, dir); | 828 | op(vaddr + offset, len, dir); |
834 | kunmap_atomic(vaddr); | 829 | kunmap_atomic(vaddr); |
830 | } else { | ||
831 | vaddr = kmap_high_get(page); | ||
832 | if (vaddr) { | ||
833 | op(vaddr + offset, len, dir); | ||
834 | kunmap_high(page); | ||
835 | } | ||
835 | } | 836 | } |
836 | } else { | 837 | } else { |
837 | vaddr = page_address(page) + offset; | 838 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1c8f7f564175..0d473cce501c 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
170 | if (!PageHighMem(page)) { | 170 | if (!PageHighMem(page)) { |
171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | 171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
172 | } else { | 172 | } else { |
173 | void *addr = kmap_high_get(page); | 173 | void *addr; |
174 | if (addr) { | 174 | |
175 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 175 | if (cache_is_vipt_nonaliasing()) { |
176 | kunmap_high(page); | ||
177 | } else if (cache_is_vipt()) { | ||
178 | /* unmapped pages might still be cached */ | ||
179 | addr = kmap_atomic(page); | 176 | addr = kmap_atomic(page); |
180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 177 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
181 | kunmap_atomic(addr); | 178 | kunmap_atomic(addr); |
179 | } else { | ||
180 | addr = kmap_high_get(page); | ||
181 | if (addr) { | ||
182 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
183 | kunmap_high(page); | ||
184 | } | ||
182 | } | 185 | } |
183 | } | 186 | } |
184 | 187 | ||