diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-04-28 05:12:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:21 -0400 |
commit | 2301696932b55e2ea2085cefc84f7b94fa2dd54b (patch) | |
tree | 4a564951b4bd34feee922e59cac12962ac0f73c8 | |
parent | a10aa579878fc6f9cd17455067380bbdf1d53c91 (diff) |
vmallocinfo: add caller information
Add caller information so that /proc/vmallocinfo shows where the allocation
request for a slice of vmalloc memory originated.
Results in output like this:
0xffffc20000000000-0xffffc20000801000 8392704 alloc_large_system_hash+0x127/0x246 pages=2048 vmalloc vpages
0xffffc20000801000-0xffffc20000806000 20480 alloc_large_system_hash+0x127/0x246 pages=4 vmalloc
0xffffc20000806000-0xffffc20000c07000 4198400 alloc_large_system_hash+0x127/0x246 pages=1024 vmalloc vpages
0xffffc20000c07000-0xffffc20000c0a000 12288 alloc_large_system_hash+0x127/0x246 pages=2 vmalloc
0xffffc20000c0a000-0xffffc20000c0c000 8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c0c000-0xffffc20000c0f000 12288 acpi_os_map_memory+0x13/0x1c phys=cff64000 ioremap
0xffffc20000c10000-0xffffc20000c15000 20480 acpi_os_map_memory+0x13/0x1c phys=cff65000 ioremap
0xffffc20000c16000-0xffffc20000c18000 8192 acpi_os_map_memory+0x13/0x1c phys=cff69000 ioremap
0xffffc20000c18000-0xffffc20000c1a000 8192 acpi_os_map_memory+0x13/0x1c phys=fed1f000 ioremap
0xffffc20000c1a000-0xffffc20000c1c000 8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c1c000-0xffffc20000c1e000 8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c1e000-0xffffc20000c20000 8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c20000-0xffffc20000c22000 8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c22000-0xffffc20000c24000 8192 acpi_os_map_memory+0x13/0x1c phys=cff68000 ioremap
0xffffc20000c24000-0xffffc20000c26000 8192 acpi_os_map_memory+0x13/0x1c phys=e0081000 ioremap
0xffffc20000c26000-0xffffc20000c28000 8192 acpi_os_map_memory+0x13/0x1c phys=e0080000 ioremap
0xffffc20000c28000-0xffffc20000c2d000 20480 alloc_large_system_hash+0x127/0x246 pages=4 vmalloc
0xffffc20000c2d000-0xffffc20000c31000 16384 tcp_init+0xd5/0x31c pages=3 vmalloc
0xffffc20000c31000-0xffffc20000c34000 12288 alloc_large_system_hash+0x127/0x246 pages=2 vmalloc
0xffffc20000c34000-0xffffc20000c36000 8192 init_vdso_vars+0xde/0x1f1
0xffffc20000c36000-0xffffc20000c38000 8192 pci_iomap+0x8a/0xb4 phys=d8e00000 ioremap
0xffffc20000c38000-0xffffc20000c3a000 8192 usb_hcd_pci_probe+0x139/0x295 [usbcore] phys=d8e00000 ioremap
0xffffc20000c3a000-0xffffc20000c3e000 16384 sys_swapon+0x509/0xa15 pages=3 vmalloc
0xffffc20000c40000-0xffffc20000c61000 135168 e1000_probe+0x1c4/0xa32 phys=d8a20000 ioremap
0xffffc20000c61000-0xffffc20000c6a000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20000c6a000-0xffffc20000c73000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20000c73000-0xffffc20000c7c000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20000c7c000-0xffffc20000c7f000 12288 e1000e_setup_tx_resources+0x29/0xbe pages=2 vmalloc
0xffffc20000c80000-0xffffc20001481000 8392704 pci_mmcfg_arch_init+0x90/0x118 phys=e0000000 ioremap
0xffffc20001481000-0xffffc20001682000 2101248 alloc_large_system_hash+0x127/0x246 pages=512 vmalloc
0xffffc20001682000-0xffffc20001e83000 8392704 alloc_large_system_hash+0x127/0x246 pages=2048 vmalloc vpages
0xffffc20001e83000-0xffffc20002204000 3674112 alloc_large_system_hash+0x127/0x246 pages=896 vmalloc vpages
0xffffc20002204000-0xffffc2000220d000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc2000220d000-0xffffc20002216000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20002216000-0xffffc2000221f000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc2000221f000-0xffffc20002228000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20002228000-0xffffc20002231000 36864 _xfs_buf_map_pages+0x8e/0xc0 vmap
0xffffc20002231000-0xffffc20002234000 12288 e1000e_setup_rx_resources+0x35/0x122 pages=2 vmalloc
0xffffc20002240000-0xffffc20002261000 135168 e1000_probe+0x1c4/0xa32 phys=d8a60000 ioremap
0xffffc20002261000-0xffffc2000270c000 4894720 sys_swapon+0x509/0xa15 pages=1194 vmalloc vpages
0xffffffffa0000000-0xffffffffa0022000 139264 module_alloc+0x4f/0x55 pages=33 vmalloc
0xffffffffa0022000-0xffffffffa0029000 28672 module_alloc+0x4f/0x55 pages=6 vmalloc
0xffffffffa002b000-0xffffffffa0034000 36864 module_alloc+0x4f/0x55 pages=8 vmalloc
0xffffffffa0034000-0xffffffffa003d000 36864 module_alloc+0x4f/0x55 pages=8 vmalloc
0xffffffffa003d000-0xffffffffa0049000 49152 module_alloc+0x4f/0x55 pages=11 vmalloc
0xffffffffa0049000-0xffffffffa0050000 28672 module_alloc+0x4f/0x55 pages=6 vmalloc
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/x86/mm/ioremap.c | 15 | ||||
-rw-r--r-- | include/linux/vmalloc.h | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 65 |
3 files changed, 59 insertions, 24 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d176b23110cc..804de18abcc2 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, | |||
117 | * have to convert them into an offset in a page-aligned mapping, but the | 117 | * have to convert them into an offset in a page-aligned mapping, but the |
118 | * caller shouldn't need to know that small detail. | 118 | * caller shouldn't need to know that small detail. |
119 | */ | 119 | */ |
120 | static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | 120 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
121 | unsigned long prot_val) | 121 | unsigned long size, unsigned long prot_val, void *caller) |
122 | { | 122 | { |
123 | unsigned long pfn, offset, vaddr; | 123 | unsigned long pfn, offset, vaddr; |
124 | resource_size_t last_addr; | 124 | resource_size_t last_addr; |
@@ -212,7 +212,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
212 | /* | 212 | /* |
213 | * Ok, go for it.. | 213 | * Ok, go for it.. |
214 | */ | 214 | */ |
215 | area = get_vm_area(size, VM_IOREMAP); | 215 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
216 | if (!area) | 216 | if (!area) |
217 | return NULL; | 217 | return NULL; |
218 | area->phys_addr = phys_addr; | 218 | area->phys_addr = phys_addr; |
@@ -255,7 +255,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
255 | */ | 255 | */ |
256 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | 256 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
257 | { | 257 | { |
258 | return __ioremap(phys_addr, size, _PAGE_CACHE_UC); | 258 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC, |
259 | __builtin_return_address(0)); | ||
259 | } | 260 | } |
260 | EXPORT_SYMBOL(ioremap_nocache); | 261 | EXPORT_SYMBOL(ioremap_nocache); |
261 | 262 | ||
@@ -272,7 +273,8 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
272 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) | 273 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) |
273 | { | 274 | { |
274 | if (pat_wc_enabled) | 275 | if (pat_wc_enabled) |
275 | return __ioremap(phys_addr, size, _PAGE_CACHE_WC); | 276 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
277 | __builtin_return_address(0)); | ||
276 | else | 278 | else |
277 | return ioremap_nocache(phys_addr, size); | 279 | return ioremap_nocache(phys_addr, size); |
278 | } | 280 | } |
@@ -280,7 +282,8 @@ EXPORT_SYMBOL(ioremap_wc); | |||
280 | 282 | ||
281 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | 283 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
282 | { | 284 | { |
283 | return __ioremap(phys_addr, size, _PAGE_CACHE_WB); | 285 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, |
286 | __builtin_return_address(0)); | ||
284 | } | 287 | } |
285 | EXPORT_SYMBOL(ioremap_cache); | 288 | EXPORT_SYMBOL(ioremap_cache); |
286 | 289 | ||
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7f3adfda337a..364789aae9f3 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -31,6 +31,7 @@ struct vm_struct { | |||
31 | struct page **pages; | 31 | struct page **pages; |
32 | unsigned int nr_pages; | 32 | unsigned int nr_pages; |
33 | unsigned long phys_addr; | 33 | unsigned long phys_addr; |
34 | void *caller; | ||
34 | }; | 35 | }; |
35 | 36 | ||
36 | /* | 37 | /* |
@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area) | |||
66 | } | 67 | } |
67 | 68 | ||
68 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); | 69 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
70 | extern struct vm_struct *get_vm_area_caller(unsigned long size, | ||
71 | unsigned long flags, void *caller); | ||
69 | extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | 72 | extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
70 | unsigned long start, unsigned long end); | 73 | unsigned long start, unsigned long end); |
71 | extern struct vm_struct *get_vm_area_node(unsigned long size, | 74 | extern struct vm_struct *get_vm_area_node(unsigned long size, |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index afa550f66537..e33e0ae69ad1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/kallsyms.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
@@ -25,7 +26,7 @@ DEFINE_RWLOCK(vmlist_lock); | |||
25 | struct vm_struct *vmlist; | 26 | struct vm_struct *vmlist; |
26 | 27 | ||
27 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 28 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
28 | int node); | 29 | int node, void *caller); |
29 | 30 | ||
30 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | 31 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
31 | { | 32 | { |
@@ -204,9 +205,9 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) | |||
204 | } | 205 | } |
205 | EXPORT_SYMBOL(vmalloc_to_pfn); | 206 | EXPORT_SYMBOL(vmalloc_to_pfn); |
206 | 207 | ||
207 | static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, | 208 | static struct vm_struct * |
208 | unsigned long start, unsigned long end, | 209 | __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start, |
209 | int node, gfp_t gfp_mask) | 210 | unsigned long end, int node, gfp_t gfp_mask, void *caller) |
210 | { | 211 | { |
211 | struct vm_struct **p, *tmp, *area; | 212 | struct vm_struct **p, *tmp, *area; |
212 | unsigned long align = 1; | 213 | unsigned long align = 1; |
@@ -269,6 +270,7 @@ found: | |||
269 | area->pages = NULL; | 270 | area->pages = NULL; |
270 | area->nr_pages = 0; | 271 | area->nr_pages = 0; |
271 | area->phys_addr = 0; | 272 | area->phys_addr = 0; |
273 | area->caller = caller; | ||
272 | write_unlock(&vmlist_lock); | 274 | write_unlock(&vmlist_lock); |
273 | 275 | ||
274 | return area; | 276 | return area; |
@@ -284,7 +286,8 @@ out: | |||
284 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | 286 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
285 | unsigned long start, unsigned long end) | 287 | unsigned long start, unsigned long end) |
286 | { | 288 | { |
287 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); | 289 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, |
290 | __builtin_return_address(0)); | ||
288 | } | 291 | } |
289 | EXPORT_SYMBOL_GPL(__get_vm_area); | 292 | EXPORT_SYMBOL_GPL(__get_vm_area); |
290 | 293 | ||
@@ -299,14 +302,22 @@ EXPORT_SYMBOL_GPL(__get_vm_area); | |||
299 | */ | 302 | */ |
300 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | 303 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
301 | { | 304 | { |
302 | return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); | 305 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, |
306 | -1, GFP_KERNEL, __builtin_return_address(0)); | ||
307 | } | ||
308 | |||
309 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | ||
310 | void *caller) | ||
311 | { | ||
312 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, | ||
313 | -1, GFP_KERNEL, caller); | ||
303 | } | 314 | } |
304 | 315 | ||
305 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, | 316 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, |
306 | int node, gfp_t gfp_mask) | 317 | int node, gfp_t gfp_mask) |
307 | { | 318 | { |
308 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, | 319 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, |
309 | gfp_mask); | 320 | gfp_mask, __builtin_return_address(0)); |
310 | } | 321 | } |
311 | 322 | ||
312 | /* Caller must hold vmlist_lock */ | 323 | /* Caller must hold vmlist_lock */ |
@@ -455,9 +466,11 @@ void *vmap(struct page **pages, unsigned int count, | |||
455 | if (count > num_physpages) | 466 | if (count > num_physpages) |
456 | return NULL; | 467 | return NULL; |
457 | 468 | ||
458 | area = get_vm_area((count << PAGE_SHIFT), flags); | 469 | area = get_vm_area_caller((count << PAGE_SHIFT), flags, |
470 | __builtin_return_address(0)); | ||
459 | if (!area) | 471 | if (!area) |
460 | return NULL; | 472 | return NULL; |
473 | |||
461 | if (map_vm_area(area, prot, &pages)) { | 474 | if (map_vm_area(area, prot, &pages)) { |
462 | vunmap(area->addr); | 475 | vunmap(area->addr); |
463 | return NULL; | 476 | return NULL; |
@@ -468,7 +481,7 @@ void *vmap(struct page **pages, unsigned int count, | |||
468 | EXPORT_SYMBOL(vmap); | 481 | EXPORT_SYMBOL(vmap); |
469 | 482 | ||
470 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | 483 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
471 | pgprot_t prot, int node) | 484 | pgprot_t prot, int node, void *caller) |
472 | { | 485 | { |
473 | struct page **pages; | 486 | struct page **pages; |
474 | unsigned int nr_pages, array_size, i; | 487 | unsigned int nr_pages, array_size, i; |
@@ -480,7 +493,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
480 | /* Please note that the recursion is strictly bounded. */ | 493 | /* Please note that the recursion is strictly bounded. */ |
481 | if (array_size > PAGE_SIZE) { | 494 | if (array_size > PAGE_SIZE) { |
482 | pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, | 495 | pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, |
483 | PAGE_KERNEL, node); | 496 | PAGE_KERNEL, node, caller); |
484 | area->flags |= VM_VPAGES; | 497 | area->flags |= VM_VPAGES; |
485 | } else { | 498 | } else { |
486 | pages = kmalloc_node(array_size, | 499 | pages = kmalloc_node(array_size, |
@@ -488,6 +501,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
488 | node); | 501 | node); |
489 | } | 502 | } |
490 | area->pages = pages; | 503 | area->pages = pages; |
504 | area->caller = caller; | ||
491 | if (!area->pages) { | 505 | if (!area->pages) { |
492 | remove_vm_area(area->addr); | 506 | remove_vm_area(area->addr); |
493 | kfree(area); | 507 | kfree(area); |
@@ -521,7 +535,8 @@ fail: | |||
521 | 535 | ||
522 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | 536 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) |
523 | { | 537 | { |
524 | return __vmalloc_area_node(area, gfp_mask, prot, -1); | 538 | return __vmalloc_area_node(area, gfp_mask, prot, -1, |
539 | __builtin_return_address(0)); | ||
525 | } | 540 | } |
526 | 541 | ||
527 | /** | 542 | /** |
@@ -536,7 +551,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
536 | * kernel virtual space, using a pagetable protection of @prot. | 551 | * kernel virtual space, using a pagetable protection of @prot. |
537 | */ | 552 | */ |
538 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 553 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
539 | int node) | 554 | int node, void *caller) |
540 | { | 555 | { |
541 | struct vm_struct *area; | 556 | struct vm_struct *area; |
542 | 557 | ||
@@ -544,16 +559,19 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
544 | if (!size || (size >> PAGE_SHIFT) > num_physpages) | 559 | if (!size || (size >> PAGE_SHIFT) > num_physpages) |
545 | return NULL; | 560 | return NULL; |
546 | 561 | ||
547 | area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask); | 562 | area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, |
563 | node, gfp_mask, caller); | ||
564 | |||
548 | if (!area) | 565 | if (!area) |
549 | return NULL; | 566 | return NULL; |
550 | 567 | ||
551 | return __vmalloc_area_node(area, gfp_mask, prot, node); | 568 | return __vmalloc_area_node(area, gfp_mask, prot, node, caller); |
552 | } | 569 | } |
553 | 570 | ||
554 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 571 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
555 | { | 572 | { |
556 | return __vmalloc_node(size, gfp_mask, prot, -1); | 573 | return __vmalloc_node(size, gfp_mask, prot, -1, |
574 | __builtin_return_address(0)); | ||
557 | } | 575 | } |
558 | EXPORT_SYMBOL(__vmalloc); | 576 | EXPORT_SYMBOL(__vmalloc); |
559 | 577 | ||
@@ -568,7 +586,8 @@ EXPORT_SYMBOL(__vmalloc); | |||
568 | */ | 586 | */ |
569 | void *vmalloc(unsigned long size) | 587 | void *vmalloc(unsigned long size) |
570 | { | 588 | { |
571 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | 589 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
590 | -1, __builtin_return_address(0)); | ||
572 | } | 591 | } |
573 | EXPORT_SYMBOL(vmalloc); | 592 | EXPORT_SYMBOL(vmalloc); |
574 | 593 | ||
@@ -608,7 +627,8 @@ EXPORT_SYMBOL(vmalloc_user); | |||
608 | */ | 627 | */ |
609 | void *vmalloc_node(unsigned long size, int node) | 628 | void *vmalloc_node(unsigned long size, int node) |
610 | { | 629 | { |
611 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); | 630 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
631 | node, __builtin_return_address(0)); | ||
612 | } | 632 | } |
613 | EXPORT_SYMBOL(vmalloc_node); | 633 | EXPORT_SYMBOL(vmalloc_node); |
614 | 634 | ||
@@ -843,7 +863,8 @@ struct vm_struct *alloc_vm_area(size_t size) | |||
843 | { | 863 | { |
844 | struct vm_struct *area; | 864 | struct vm_struct *area; |
845 | 865 | ||
846 | area = get_vm_area(size, VM_IOREMAP); | 866 | area = get_vm_area_caller(size, VM_IOREMAP, |
867 | __builtin_return_address(0)); | ||
847 | if (area == NULL) | 868 | if (area == NULL) |
848 | return NULL; | 869 | return NULL; |
849 | 870 | ||
@@ -914,6 +935,14 @@ static int s_show(struct seq_file *m, void *p) | |||
914 | seq_printf(m, "0x%p-0x%p %7ld", | 935 | seq_printf(m, "0x%p-0x%p %7ld", |
915 | v->addr, v->addr + v->size, v->size); | 936 | v->addr, v->addr + v->size, v->size); |
916 | 937 | ||
938 | if (v->caller) { | ||
939 | char buff[2 * KSYM_NAME_LEN]; | ||
940 | |||
941 | seq_putc(m, ' '); | ||
942 | sprint_symbol(buff, (unsigned long)v->caller); | ||
943 | seq_puts(m, buff); | ||
944 | } | ||
945 | |||
917 | if (v->nr_pages) | 946 | if (v->nr_pages) |
918 | seq_printf(m, " pages=%d", v->nr_pages); | 947 | seq_printf(m, " pages=%d", v->nr_pages); |
919 | 948 | ||