aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
commitdf32e43a54d04eda35d2859beaf90e3864d53288 (patch)
tree7a61cf658b2949bd426285eb9902be7758ced1ba /mm/vmalloc.c
parentfbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff)
parent78d5506e82b21a1a1de68c24182db2c2fe521422 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton: - a couple of misc things - inotify/fsnotify work from Jan - ocfs2 updates (partial) - about half of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm/migrate: remove unused function, fail_migrate_page() mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages mm/migrate: correct failure handling if !hugepage_migration_support() mm/migrate: add comment about permanent failure path mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure mm: compaction: reset scanner positions immediately when they meet mm: compaction: do not mark unmovable pageblocks as skipped in async compaction mm: compaction: detect when scanners meet in isolate_freepages mm: compaction: reset cached scanner pfn's before reading them mm: compaction: encapsulate defer reset logic mm: compaction: trace compaction begin and end memcg, oom: lock mem_cgroup_print_oom_info sched: add tracepoints related to NUMA task migration mm: numa: do not automatically migrate KSM pages mm: numa: trace tasks that fail migration due to rate limiting mm: numa: limit scope of lock for NUMA migrate rate limiting mm: numa: make NUMA-migrate related functions static lib/show_mem.c: show num_poisoned_pages when oom mm/hwpoison: add '#' to hwpoison_inject mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter ...
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0fdf96803c5b..e4f0db2a3eae 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
220} 220}
221 221
222/* 222/*
223 * Walk a vmap address to the struct page it maps. 223 * Walk a vmap address to the physical pfn it maps to.
224 */ 224 */
225struct page *vmalloc_to_page(const void *vmalloc_addr) 225unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
226{ 226{
227 unsigned long addr = (unsigned long) vmalloc_addr; 227 unsigned long addr = (unsigned long) vmalloc_addr;
228 struct page *page = NULL; 228 unsigned long pfn = 0;
229 pgd_t *pgd = pgd_offset_k(addr); 229 pgd_t *pgd = pgd_offset_k(addr);
230 230
231 /* 231 /*
@@ -244,23 +244,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
244 ptep = pte_offset_map(pmd, addr); 244 ptep = pte_offset_map(pmd, addr);
245 pte = *ptep; 245 pte = *ptep;
246 if (pte_present(pte)) 246 if (pte_present(pte))
247 page = pte_page(pte); 247 pfn = pte_pfn(pte);
248 pte_unmap(ptep); 248 pte_unmap(ptep);
249 } 249 }
250 } 250 }
251 } 251 }
252 return page; 252 return pfn;
253} 253}
254EXPORT_SYMBOL(vmalloc_to_page); 254EXPORT_SYMBOL(vmalloc_to_pfn);
255 255
256/* 256/*
257 * Map a vmalloc()-space virtual address to the physical page frame number. 257 * Map a vmalloc()-space virtual address to the struct page.
258 */ 258 */
259unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 259struct page *vmalloc_to_page(const void *vmalloc_addr)
260{ 260{
261 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 261 return pfn_to_page(vmalloc_to_pfn(vmalloc_addr));
262} 262}
263EXPORT_SYMBOL(vmalloc_to_pfn); 263EXPORT_SYMBOL(vmalloc_to_page);
264 264
265 265
266/*** Global kva allocator ***/ 266/*** Global kva allocator ***/