aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c47
1 files changed, 20 insertions, 27 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35f8553f893a..1ac191ce5641 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,9 @@
24DEFINE_RWLOCK(vmlist_lock); 24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist; 25struct vm_struct *vmlist;
26 26
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node);
29
27static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28{ 31{
29 pte_t *pte; 32 pte_t *pte;
@@ -238,7 +241,6 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
238 241
239/** 242/**
240 * get_vm_area - reserve a contingous kernel virtual area 243 * get_vm_area - reserve a contingous kernel virtual area
241 *
242 * @size: size of the area 244 * @size: size of the area
243 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 245 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
244 * 246 *
@@ -270,7 +272,7 @@ static struct vm_struct *__find_vm_area(void *addr)
270} 272}
271 273
272/* Caller must hold vmlist_lock */ 274/* Caller must hold vmlist_lock */
273struct vm_struct *__remove_vm_area(void *addr) 275static struct vm_struct *__remove_vm_area(void *addr)
274{ 276{
275 struct vm_struct **p, *tmp; 277 struct vm_struct **p, *tmp;
276 278
@@ -293,7 +295,6 @@ found:
293 295
294/** 296/**
295 * remove_vm_area - find and remove a contingous kernel virtual area 297 * remove_vm_area - find and remove a contingous kernel virtual area
296 *
297 * @addr: base address 298 * @addr: base address
298 * 299 *
299 * Search for the kernel VM area starting at @addr, and remove it. 300 * Search for the kernel VM area starting at @addr, and remove it.
@@ -330,6 +331,8 @@ void __vunmap(void *addr, int deallocate_pages)
330 return; 331 return;
331 } 332 }
332 333
334 debug_check_no_locks_freed(addr, area->size);
335
333 if (deallocate_pages) { 336 if (deallocate_pages) {
334 int i; 337 int i;
335 338
@@ -338,7 +341,7 @@ void __vunmap(void *addr, int deallocate_pages)
338 __free_page(area->pages[i]); 341 __free_page(area->pages[i]);
339 } 342 }
340 343
341 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) 344 if (area->flags & VM_VPAGES)
342 vfree(area->pages); 345 vfree(area->pages);
343 else 346 else
344 kfree(area->pages); 347 kfree(area->pages);
@@ -350,7 +353,6 @@ void __vunmap(void *addr, int deallocate_pages)
350 353
351/** 354/**
352 * vfree - release memory allocated by vmalloc() 355 * vfree - release memory allocated by vmalloc()
353 *
354 * @addr: memory base address 356 * @addr: memory base address
355 * 357 *
356 * Free the virtually contiguous memory area starting at @addr, as 358 * Free the virtually contiguous memory area starting at @addr, as
@@ -368,7 +370,6 @@ EXPORT_SYMBOL(vfree);
368 370
369/** 371/**
370 * vunmap - release virtual mapping obtained by vmap() 372 * vunmap - release virtual mapping obtained by vmap()
371 *
372 * @addr: memory base address 373 * @addr: memory base address
373 * 374 *
374 * Free the virtually contiguous memory area starting at @addr, 375 * Free the virtually contiguous memory area starting at @addr,
@@ -385,7 +386,6 @@ EXPORT_SYMBOL(vunmap);
385 386
386/** 387/**
387 * vmap - map an array of pages into virtually contiguous space 388 * vmap - map an array of pages into virtually contiguous space
388 *
389 * @pages: array of page pointers 389 * @pages: array of page pointers
390 * @count: number of pages to map 390 * @count: number of pages to map
391 * @flags: vm_area->flags 391 * @flags: vm_area->flags
@@ -425,9 +425,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
425 425
426 area->nr_pages = nr_pages; 426 area->nr_pages = nr_pages;
427 /* Please note that the recursion is strictly bounded. */ 427 /* Please note that the recursion is strictly bounded. */
428 if (array_size > PAGE_SIZE) 428 if (array_size > PAGE_SIZE) {
429 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 429 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
430 else 430 area->flags |= VM_VPAGES;
431 } else
431 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 432 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
432 area->pages = pages; 433 area->pages = pages;
433 if (!area->pages) { 434 if (!area->pages) {
@@ -465,7 +466,6 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
465 466
466/** 467/**
467 * __vmalloc_node - allocate virtually contiguous memory 468 * __vmalloc_node - allocate virtually contiguous memory
468 *
469 * @size: allocation size 469 * @size: allocation size
470 * @gfp_mask: flags for the page level allocator 470 * @gfp_mask: flags for the page level allocator
471 * @prot: protection mask for the allocated pages 471 * @prot: protection mask for the allocated pages
@@ -475,8 +475,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
475 * allocator with @gfp_mask flags. Map them into contiguous 475 * allocator with @gfp_mask flags. Map them into contiguous
476 * kernel virtual space, using a pagetable protection of @prot. 476 * kernel virtual space, using a pagetable protection of @prot.
477 */ 477 */
478void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 478static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
479 int node) 479 int node)
480{ 480{
481 struct vm_struct *area; 481 struct vm_struct *area;
482 482
@@ -490,7 +490,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
490 490
491 return __vmalloc_area_node(area, gfp_mask, prot, node); 491 return __vmalloc_area_node(area, gfp_mask, prot, node);
492} 492}
493EXPORT_SYMBOL(__vmalloc_node);
494 493
495void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 494void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
496{ 495{
@@ -500,9 +499,7 @@ EXPORT_SYMBOL(__vmalloc);
500 499
501/** 500/**
502 * vmalloc - allocate virtually contiguous memory 501 * vmalloc - allocate virtually contiguous memory
503 *
504 * @size: allocation size 502 * @size: allocation size
505 *
506 * Allocate enough pages to cover @size from the page level 503 * Allocate enough pages to cover @size from the page level
507 * allocator and map them into contiguous kernel virtual space. 504 * allocator and map them into contiguous kernel virtual space.
508 * 505 *
@@ -516,11 +513,11 @@ void *vmalloc(unsigned long size)
516EXPORT_SYMBOL(vmalloc); 513EXPORT_SYMBOL(vmalloc);
517 514
518/** 515/**
519 * vmalloc_user - allocate virtually contiguous memory which has 516 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
520 * been zeroed so it can be mapped to userspace without 517 * @size: allocation size
521 * leaking data.
522 * 518 *
523 * @size: allocation size 519 * The resulting memory area is zeroed so it can be mapped to userspace
520 * without leaking data.
524 */ 521 */
525void *vmalloc_user(unsigned long size) 522void *vmalloc_user(unsigned long size)
526{ 523{
@@ -539,7 +536,6 @@ EXPORT_SYMBOL(vmalloc_user);
539 536
540/** 537/**
541 * vmalloc_node - allocate memory on a specific node 538 * vmalloc_node - allocate memory on a specific node
542 *
543 * @size: allocation size 539 * @size: allocation size
544 * @node: numa node 540 * @node: numa node
545 * 541 *
@@ -561,7 +557,6 @@ EXPORT_SYMBOL(vmalloc_node);
561 557
562/** 558/**
563 * vmalloc_exec - allocate virtually contiguous, executable memory 559 * vmalloc_exec - allocate virtually contiguous, executable memory
564 *
565 * @size: allocation size 560 * @size: allocation size
566 * 561 *
567 * Kernel-internal function to allocate enough pages to cover @size 562 * Kernel-internal function to allocate enough pages to cover @size
@@ -579,7 +574,6 @@ void *vmalloc_exec(unsigned long size)
579 574
580/** 575/**
581 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 576 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
582 *
583 * @size: allocation size 577 * @size: allocation size
584 * 578 *
585 * Allocate enough 32bit PA addressable pages to cover @size from the 579 * Allocate enough 32bit PA addressable pages to cover @size from the
@@ -592,11 +586,11 @@ void *vmalloc_32(unsigned long size)
592EXPORT_SYMBOL(vmalloc_32); 586EXPORT_SYMBOL(vmalloc_32);
593 587
594/** 588/**
595 * vmalloc_32_user - allocate virtually contiguous memory (32bit 589 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
596 * addressable) which is zeroed so it can be
597 * mapped to userspace without leaking data.
598 *
599 * @size: allocation size 590 * @size: allocation size
591 *
592 * The resulting memory area is 32bit addressable and zeroed so it can be
593 * mapped to userspace without leaking data.
600 */ 594 */
601void *vmalloc_32_user(unsigned long size) 595void *vmalloc_32_user(unsigned long size)
602{ 596{
@@ -690,7 +684,6 @@ finished:
690 684
691/** 685/**
692 * remap_vmalloc_range - map vmalloc pages to userspace 686 * remap_vmalloc_range - map vmalloc pages to userspace
693 *
694 * @vma: vma to cover (map full range of vma) 687 * @vma: vma to cover (map full range of vma)
695 * @addr: vmalloc memory 688 * @addr: vmalloc memory
696 * @pgoff: number of pages into addr before first page to map 689 * @pgoff: number of pages into addr before first page to map