aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c65
1 files changed, 47 insertions, 18 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index afa550f66537..e33e0ae69ad1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/kallsyms.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
@@ -25,7 +26,7 @@ DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist; 26struct vm_struct *vmlist;
26 27
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 28static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node); 29 int node, void *caller);
29 30
30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 31static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{ 32{
@@ -204,9 +205,9 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
204} 205}
205EXPORT_SYMBOL(vmalloc_to_pfn); 206EXPORT_SYMBOL(vmalloc_to_pfn);
206 207
207static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 208static struct vm_struct *
208 unsigned long start, unsigned long end, 209__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
209 int node, gfp_t gfp_mask) 210 unsigned long end, int node, gfp_t gfp_mask, void *caller)
210{ 211{
211 struct vm_struct **p, *tmp, *area; 212 struct vm_struct **p, *tmp, *area;
212 unsigned long align = 1; 213 unsigned long align = 1;
@@ -269,6 +270,7 @@ found:
269 area->pages = NULL; 270 area->pages = NULL;
270 area->nr_pages = 0; 271 area->nr_pages = 0;
271 area->phys_addr = 0; 272 area->phys_addr = 0;
273 area->caller = caller;
272 write_unlock(&vmlist_lock); 274 write_unlock(&vmlist_lock);
273 275
274 return area; 276 return area;
@@ -284,7 +286,8 @@ out:
284struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 286struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
285 unsigned long start, unsigned long end) 287 unsigned long start, unsigned long end)
286{ 288{
287 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); 289 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
290 __builtin_return_address(0));
288} 291}
289EXPORT_SYMBOL_GPL(__get_vm_area); 292EXPORT_SYMBOL_GPL(__get_vm_area);
290 293
@@ -299,14 +302,22 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
299 */ 302 */
300struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 303struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
301{ 304{
302 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 305 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
306 -1, GFP_KERNEL, __builtin_return_address(0));
307}
308
309struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
310 void *caller)
311{
312 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
313 -1, GFP_KERNEL, caller);
303} 314}
304 315
305struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 316struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
306 int node, gfp_t gfp_mask) 317 int node, gfp_t gfp_mask)
307{ 318{
308 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 319 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
309 gfp_mask); 320 gfp_mask, __builtin_return_address(0));
310} 321}
311 322
312/* Caller must hold vmlist_lock */ 323/* Caller must hold vmlist_lock */
@@ -455,9 +466,11 @@ void *vmap(struct page **pages, unsigned int count,
455 if (count > num_physpages) 466 if (count > num_physpages)
456 return NULL; 467 return NULL;
457 468
458 area = get_vm_area((count << PAGE_SHIFT), flags); 469 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
470 __builtin_return_address(0));
459 if (!area) 471 if (!area)
460 return NULL; 472 return NULL;
473
461 if (map_vm_area(area, prot, &pages)) { 474 if (map_vm_area(area, prot, &pages)) {
462 vunmap(area->addr); 475 vunmap(area->addr);
463 return NULL; 476 return NULL;
@@ -468,7 +481,7 @@ void *vmap(struct page **pages, unsigned int count,
468EXPORT_SYMBOL(vmap); 481EXPORT_SYMBOL(vmap);
469 482
470static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 483static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
471 pgprot_t prot, int node) 484 pgprot_t prot, int node, void *caller)
472{ 485{
473 struct page **pages; 486 struct page **pages;
474 unsigned int nr_pages, array_size, i; 487 unsigned int nr_pages, array_size, i;
@@ -480,7 +493,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
480 /* Please note that the recursion is strictly bounded. */ 493 /* Please note that the recursion is strictly bounded. */
481 if (array_size > PAGE_SIZE) { 494 if (array_size > PAGE_SIZE) {
482 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 495 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
483 PAGE_KERNEL, node); 496 PAGE_KERNEL, node, caller);
484 area->flags |= VM_VPAGES; 497 area->flags |= VM_VPAGES;
485 } else { 498 } else {
486 pages = kmalloc_node(array_size, 499 pages = kmalloc_node(array_size,
@@ -488,6 +501,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
488 node); 501 node);
489 } 502 }
490 area->pages = pages; 503 area->pages = pages;
504 area->caller = caller;
491 if (!area->pages) { 505 if (!area->pages) {
492 remove_vm_area(area->addr); 506 remove_vm_area(area->addr);
493 kfree(area); 507 kfree(area);
@@ -521,7 +535,8 @@ fail:
521 535
522void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 536void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
523{ 537{
524 return __vmalloc_area_node(area, gfp_mask, prot, -1); 538 return __vmalloc_area_node(area, gfp_mask, prot, -1,
539 __builtin_return_address(0));
525} 540}
526 541
527/** 542/**
@@ -536,7 +551,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
536 * kernel virtual space, using a pagetable protection of @prot. 551 * kernel virtual space, using a pagetable protection of @prot.
537 */ 552 */
538static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 553static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
539 int node) 554 int node, void *caller)
540{ 555{
541 struct vm_struct *area; 556 struct vm_struct *area;
542 557
@@ -544,16 +559,19 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
544 if (!size || (size >> PAGE_SHIFT) > num_physpages) 559 if (!size || (size >> PAGE_SHIFT) > num_physpages)
545 return NULL; 560 return NULL;
546 561
547 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask); 562 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
563 node, gfp_mask, caller);
564
548 if (!area) 565 if (!area)
549 return NULL; 566 return NULL;
550 567
551 return __vmalloc_area_node(area, gfp_mask, prot, node); 568 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
552} 569}
553 570
554void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 571void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
555{ 572{
556 return __vmalloc_node(size, gfp_mask, prot, -1); 573 return __vmalloc_node(size, gfp_mask, prot, -1,
574 __builtin_return_address(0));
557} 575}
558EXPORT_SYMBOL(__vmalloc); 576EXPORT_SYMBOL(__vmalloc);
559 577
@@ -568,7 +586,8 @@ EXPORT_SYMBOL(__vmalloc);
568 */ 586 */
569void *vmalloc(unsigned long size) 587void *vmalloc(unsigned long size)
570{ 588{
571 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 589 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
590 -1, __builtin_return_address(0));
572} 591}
573EXPORT_SYMBOL(vmalloc); 592EXPORT_SYMBOL(vmalloc);
574 593
@@ -608,7 +627,8 @@ EXPORT_SYMBOL(vmalloc_user);
608 */ 627 */
609void *vmalloc_node(unsigned long size, int node) 628void *vmalloc_node(unsigned long size, int node)
610{ 629{
611 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); 630 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
631 node, __builtin_return_address(0));
612} 632}
613EXPORT_SYMBOL(vmalloc_node); 633EXPORT_SYMBOL(vmalloc_node);
614 634
@@ -843,7 +863,8 @@ struct vm_struct *alloc_vm_area(size_t size)
843{ 863{
844 struct vm_struct *area; 864 struct vm_struct *area;
845 865
846 area = get_vm_area(size, VM_IOREMAP); 866 area = get_vm_area_caller(size, VM_IOREMAP,
867 __builtin_return_address(0));
847 if (area == NULL) 868 if (area == NULL)
848 return NULL; 869 return NULL;
849 870
@@ -914,6 +935,14 @@ static int s_show(struct seq_file *m, void *p)
914 seq_printf(m, "0x%p-0x%p %7ld", 935 seq_printf(m, "0x%p-0x%p %7ld",
915 v->addr, v->addr + v->size, v->size); 936 v->addr, v->addr + v->size, v->size);
916 937
938 if (v->caller) {
939 char buff[2 * KSYM_NAME_LEN];
940
941 seq_putc(m, ' ');
942 sprint_symbol(buff, (unsigned long)v->caller);
943 seq_puts(m, buff);
944 }
945
917 if (v->nr_pages) 946 if (v->nr_pages)
918 seq_printf(m, " pages=%d", v->nr_pages); 947 seq_printf(m, " pages=%d", v->nr_pages);
919 948