aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-10-25 07:25:50 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-01 13:20:07 -0500
commitb7dc0b2cfc6e9bc7270915c642a8a8e999b6095e (patch)
tree68853dc2d48f9abcd5eadab0dc181ad8b0e8ac59
parent2f0b192633f1fbf253b21c90938733491549edae (diff)
ARM: Avoid evaluating page_address() multiple times
page_address() is a function call rather than a macro, and so: if (page_address(page)) do_something(page_address(page)); results in two calls to this function. This is unnecessary; remove the duplication. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/flush.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index a480f161a4bb..43474d8752a6 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -111,6 +111,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
111 111
112void __flush_dcache_page(struct address_space *mapping, struct page *page) 112void __flush_dcache_page(struct address_space *mapping, struct page *page)
113{ 113{
114 void *addr = page_address(page);
115
114 /* 116 /*
115 * Writeback any data associated with the kernel mapping of this 117 * Writeback any data associated with the kernel mapping of this
116 * page. This ensures that data in the physical page is mutually 118 * page. This ensures that data in the physical page is mutually
@@ -121,9 +123,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
121 * kmap_atomic() doesn't set the page virtual address, and 123 * kmap_atomic() doesn't set the page virtual address, and
122 * kunmap_atomic() takes care of cache flushing already. 124 * kunmap_atomic() takes care of cache flushing already.
123 */ 125 */
124 if (page_address(page)) 126 if (addr)
125#endif 127#endif
126 __cpuc_flush_dcache_page(page_address(page)); 128 __cpuc_flush_dcache_page(addr);
127 129
128 /* 130 /*
129 * If this is a page cache page, and we have an aliasing VIPT cache, 131 * If this is a page cache page, and we have an aliasing VIPT cache,