diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 38 |
1 files changed, 14 insertions, 24 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 266162d2ba28..1ac191ce5641 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -24,6 +24,9 @@ | |||
| 24 | DEFINE_RWLOCK(vmlist_lock); | 24 | DEFINE_RWLOCK(vmlist_lock); |
| 25 | struct vm_struct *vmlist; | 25 | struct vm_struct *vmlist; |
| 26 | 26 | ||
| 27 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | ||
| 28 | int node); | ||
| 29 | |||
| 27 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | 30 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
| 28 | { | 31 | { |
| 29 | pte_t *pte; | 32 | pte_t *pte; |
| @@ -238,7 +241,6 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | |||
| 238 | 241 | ||
| 239 | /** | 242 | /** |
| 240 | * get_vm_area - reserve a contingous kernel virtual area | 243 | * get_vm_area - reserve a contingous kernel virtual area |
| 241 | * | ||
| 242 | * @size: size of the area | 244 | * @size: size of the area |
| 243 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | 245 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC |
| 244 | * | 246 | * |
| @@ -270,7 +272,7 @@ static struct vm_struct *__find_vm_area(void *addr) | |||
| 270 | } | 272 | } |
| 271 | 273 | ||
| 272 | /* Caller must hold vmlist_lock */ | 274 | /* Caller must hold vmlist_lock */ |
| 273 | struct vm_struct *__remove_vm_area(void *addr) | 275 | static struct vm_struct *__remove_vm_area(void *addr) |
| 274 | { | 276 | { |
| 275 | struct vm_struct **p, *tmp; | 277 | struct vm_struct **p, *tmp; |
| 276 | 278 | ||
| @@ -293,7 +295,6 @@ found: | |||
| 293 | 295 | ||
| 294 | /** | 296 | /** |
| 295 | * remove_vm_area - find and remove a contingous kernel virtual area | 297 | * remove_vm_area - find and remove a contingous kernel virtual area |
| 296 | * | ||
| 297 | * @addr: base address | 298 | * @addr: base address |
| 298 | * | 299 | * |
| 299 | * Search for the kernel VM area starting at @addr, and remove it. | 300 | * Search for the kernel VM area starting at @addr, and remove it. |
| @@ -352,7 +353,6 @@ void __vunmap(void *addr, int deallocate_pages) | |||
| 352 | 353 | ||
| 353 | /** | 354 | /** |
| 354 | * vfree - release memory allocated by vmalloc() | 355 | * vfree - release memory allocated by vmalloc() |
| 355 | * | ||
| 356 | * @addr: memory base address | 356 | * @addr: memory base address |
| 357 | * | 357 | * |
| 358 | * Free the virtually contiguous memory area starting at @addr, as | 358 | * Free the virtually contiguous memory area starting at @addr, as |
| @@ -370,7 +370,6 @@ EXPORT_SYMBOL(vfree); | |||
| 370 | 370 | ||
| 371 | /** | 371 | /** |
| 372 | * vunmap - release virtual mapping obtained by vmap() | 372 | * vunmap - release virtual mapping obtained by vmap() |
| 373 | * | ||
| 374 | * @addr: memory base address | 373 | * @addr: memory base address |
| 375 | * | 374 | * |
| 376 | * Free the virtually contiguous memory area starting at @addr, | 375 | * Free the virtually contiguous memory area starting at @addr, |
| @@ -387,7 +386,6 @@ EXPORT_SYMBOL(vunmap); | |||
| 387 | 386 | ||
| 388 | /** | 387 | /** |
| 389 | * vmap - map an array of pages into virtually contiguous space | 388 | * vmap - map an array of pages into virtually contiguous space |
| 390 | * | ||
| 391 | * @pages: array of page pointers | 389 | * @pages: array of page pointers |
| 392 | * @count: number of pages to map | 390 | * @count: number of pages to map |
| 393 | * @flags: vm_area->flags | 391 | * @flags: vm_area->flags |
| @@ -468,7 +466,6 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
| 468 | 466 | ||
| 469 | /** | 467 | /** |
| 470 | * __vmalloc_node - allocate virtually contiguous memory | 468 | * __vmalloc_node - allocate virtually contiguous memory |
| 471 | * | ||
| 472 | * @size: allocation size | 469 | * @size: allocation size |
| 473 | * @gfp_mask: flags for the page level allocator | 470 | * @gfp_mask: flags for the page level allocator |
| 474 | * @prot: protection mask for the allocated pages | 471 | * @prot: protection mask for the allocated pages |
| @@ -478,8 +475,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
| 478 | * allocator with @gfp_mask flags. Map them into contiguous | 475 | * allocator with @gfp_mask flags. Map them into contiguous |
| 479 | * kernel virtual space, using a pagetable protection of @prot. | 476 | * kernel virtual space, using a pagetable protection of @prot. |
| 480 | */ | 477 | */ |
| 481 | void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 478 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
| 482 | int node) | 479 | int node) |
| 483 | { | 480 | { |
| 484 | struct vm_struct *area; | 481 | struct vm_struct *area; |
| 485 | 482 | ||
| @@ -493,7 +490,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
| 493 | 490 | ||
| 494 | return __vmalloc_area_node(area, gfp_mask, prot, node); | 491 | return __vmalloc_area_node(area, gfp_mask, prot, node); |
| 495 | } | 492 | } |
| 496 | EXPORT_SYMBOL(__vmalloc_node); | ||
| 497 | 493 | ||
| 498 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 494 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
| 499 | { | 495 | { |
| @@ -503,9 +499,7 @@ EXPORT_SYMBOL(__vmalloc); | |||
| 503 | 499 | ||
| 504 | /** | 500 | /** |
| 505 | * vmalloc - allocate virtually contiguous memory | 501 | * vmalloc - allocate virtually contiguous memory |
| 506 | * | ||
| 507 | * @size: allocation size | 502 | * @size: allocation size |
| 508 | * | ||
| 509 | * Allocate enough pages to cover @size from the page level | 503 | * Allocate enough pages to cover @size from the page level |
| 510 | * allocator and map them into contiguous kernel virtual space. | 504 | * allocator and map them into contiguous kernel virtual space. |
| 511 | * | 505 | * |
| @@ -519,11 +513,11 @@ void *vmalloc(unsigned long size) | |||
| 519 | EXPORT_SYMBOL(vmalloc); | 513 | EXPORT_SYMBOL(vmalloc); |
| 520 | 514 | ||
| 521 | /** | 515 | /** |
| 522 | * vmalloc_user - allocate virtually contiguous memory which has | 516 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace |
| 523 | * been zeroed so it can be mapped to userspace without | 517 | * @size: allocation size |
| 524 | * leaking data. | ||
| 525 | * | 518 | * |
| 526 | * @size: allocation size | 519 | * The resulting memory area is zeroed so it can be mapped to userspace |
| 520 | * without leaking data. | ||
| 527 | */ | 521 | */ |
| 528 | void *vmalloc_user(unsigned long size) | 522 | void *vmalloc_user(unsigned long size) |
| 529 | { | 523 | { |
| @@ -542,7 +536,6 @@ EXPORT_SYMBOL(vmalloc_user); | |||
| 542 | 536 | ||
| 543 | /** | 537 | /** |
| 544 | * vmalloc_node - allocate memory on a specific node | 538 | * vmalloc_node - allocate memory on a specific node |
| 545 | * | ||
| 546 | * @size: allocation size | 539 | * @size: allocation size |
| 547 | * @node: numa node | 540 | * @node: numa node |
| 548 | * | 541 | * |
| @@ -564,7 +557,6 @@ EXPORT_SYMBOL(vmalloc_node); | |||
| 564 | 557 | ||
| 565 | /** | 558 | /** |
| 566 | * vmalloc_exec - allocate virtually contiguous, executable memory | 559 | * vmalloc_exec - allocate virtually contiguous, executable memory |
| 567 | * | ||
| 568 | * @size: allocation size | 560 | * @size: allocation size |
| 569 | * | 561 | * |
| 570 | * Kernel-internal function to allocate enough pages to cover @size | 562 | * Kernel-internal function to allocate enough pages to cover @size |
| @@ -582,7 +574,6 @@ void *vmalloc_exec(unsigned long size) | |||
| 582 | 574 | ||
| 583 | /** | 575 | /** |
| 584 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | 576 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
| 585 | * | ||
| 586 | * @size: allocation size | 577 | * @size: allocation size |
| 587 | * | 578 | * |
| 588 | * Allocate enough 32bit PA addressable pages to cover @size from the | 579 | * Allocate enough 32bit PA addressable pages to cover @size from the |
| @@ -595,11 +586,11 @@ void *vmalloc_32(unsigned long size) | |||
| 595 | EXPORT_SYMBOL(vmalloc_32); | 586 | EXPORT_SYMBOL(vmalloc_32); |
| 596 | 587 | ||
| 597 | /** | 588 | /** |
| 598 | * vmalloc_32_user - allocate virtually contiguous memory (32bit | 589 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
| 599 | * addressable) which is zeroed so it can be | ||
| 600 | * mapped to userspace without leaking data. | ||
| 601 | * | ||
| 602 | * @size: allocation size | 590 | * @size: allocation size |
| 591 | * | ||
| 592 | * The resulting memory area is 32bit addressable and zeroed so it can be | ||
| 593 | * mapped to userspace without leaking data. | ||
| 603 | */ | 594 | */ |
| 604 | void *vmalloc_32_user(unsigned long size) | 595 | void *vmalloc_32_user(unsigned long size) |
| 605 | { | 596 | { |
| @@ -693,7 +684,6 @@ finished: | |||
| 693 | 684 | ||
| 694 | /** | 685 | /** |
| 695 | * remap_vmalloc_range - map vmalloc pages to userspace | 686 | * remap_vmalloc_range - map vmalloc pages to userspace |
| 696 | * | ||
| 697 | * @vma: vma to cover (map full range of vma) | 687 | * @vma: vma to cover (map full range of vma) |
| 698 | * @addr: vmalloc memory | 688 | * @addr: vmalloc memory |
| 699 | * @pgoff: number of pages into addr before first page to map | 689 | * @pgoff: number of pages into addr before first page to map |
