aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJianyu Zhan <nasa4836@gmail.com>2014-01-21 18:49:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:44 -0500
commitece86e222db48d04bda218a2be70e384518bb08c (patch)
tree7cc9c61ca2f0f7112c2ed3698133aa559b6604ef
parentd80be7c75136fb58ed7264ac8a49dd917ace77a1 (diff)
mm/vmalloc: interchage the implementation of vmalloc_to_{pfn,page}
Currently we are implementing vmalloc_to_pfn() as a wrapper around vmalloc_to_page(), which is implemented as follow: 1. walks the page talbes to generates the corresponding pfn, 2. then converts the pfn to struct page, 3. returns it. And vmalloc_to_pfn() re-wraps vmalloc_to_page() to get the pfn. This seems too circuitous, so this patch reverses the way: implement vmalloc_to_page() as a wrapper around vmalloc_to_pfn(). This makes vmalloc_to_pfn() and vmalloc_to_page() slightly more efficient. No functional change. Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Cc: Vladimir Murzin <murzin.v@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmalloc.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0fdf96803c5b..e4f0db2a3eae 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
220} 220}
221 221
222/* 222/*
223 * Walk a vmap address to the struct page it maps. 223 * Walk a vmap address to the physical pfn it maps to.
224 */ 224 */
225struct page *vmalloc_to_page(const void *vmalloc_addr) 225unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
226{ 226{
227 unsigned long addr = (unsigned long) vmalloc_addr; 227 unsigned long addr = (unsigned long) vmalloc_addr;
228 struct page *page = NULL; 228 unsigned long pfn = 0;
229 pgd_t *pgd = pgd_offset_k(addr); 229 pgd_t *pgd = pgd_offset_k(addr);
230 230
231 /* 231 /*
@@ -244,23 +244,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
244 ptep = pte_offset_map(pmd, addr); 244 ptep = pte_offset_map(pmd, addr);
245 pte = *ptep; 245 pte = *ptep;
246 if (pte_present(pte)) 246 if (pte_present(pte))
247 page = pte_page(pte); 247 pfn = pte_pfn(pte);
248 pte_unmap(ptep); 248 pte_unmap(ptep);
249 } 249 }
250 } 250 }
251 } 251 }
252 return page; 252 return pfn;
253} 253}
254EXPORT_SYMBOL(vmalloc_to_page); 254EXPORT_SYMBOL(vmalloc_to_pfn);
255 255
256/* 256/*
257 * Map a vmalloc()-space virtual address to the physical page frame number. 257 * Map a vmalloc()-space virtual address to the struct page.
258 */ 258 */
259unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 259struct page *vmalloc_to_page(const void *vmalloc_addr)
260{ 260{
261 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 261 return pfn_to_page(vmalloc_to_pfn(vmalloc_addr));
262} 262}
263EXPORT_SYMBOL(vmalloc_to_pfn); 263EXPORT_SYMBOL(vmalloc_to_page);
264 264
265 265
266/*** Global kva allocator ***/ 266/*** Global kva allocator ***/