aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-30 01:56:31 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-10-30 01:56:31 -0500
commit81cfb8864c73230eb1c37753aba517db15cf4d8f (patch)
tree649ff25543834cf9983ea41b93126bea97d75475 /mm/vmalloc.c
parent0169e284f6b6b263cc7c2ed25986b96cd6fda610 (diff)
parent9f75e1eff3edb2bb07349b94c28f4f2a6c66ca43 (diff)
Merge branch 'master'
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c77
1 files changed, 58 insertions, 19 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1150229b6366..54a90e83cb31 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -5,6 +5,7 @@
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
8 */ 9 */
9 10
10#include <linux/mm.h> 11#include <linux/mm.h>
@@ -88,7 +89,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
88{ 89{
89 pte_t *pte; 90 pte_t *pte;
90 91
91 pte = pte_alloc_kernel(&init_mm, pmd, addr); 92 pte = pte_alloc_kernel(pmd, addr);
92 if (!pte) 93 if (!pte)
93 return -ENOMEM; 94 return -ENOMEM;
94 do { 95 do {
@@ -146,20 +147,18 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
146 147
147 BUG_ON(addr >= end); 148 BUG_ON(addr >= end);
148 pgd = pgd_offset_k(addr); 149 pgd = pgd_offset_k(addr);
149 spin_lock(&init_mm.page_table_lock);
150 do { 150 do {
151 next = pgd_addr_end(addr, end); 151 next = pgd_addr_end(addr, end);
152 err = vmap_pud_range(pgd, addr, next, prot, pages); 152 err = vmap_pud_range(pgd, addr, next, prot, pages);
153 if (err) 153 if (err)
154 break; 154 break;
155 } while (pgd++, addr = next, addr != end); 155 } while (pgd++, addr = next, addr != end);
156 spin_unlock(&init_mm.page_table_lock);
157 flush_cache_vmap((unsigned long) area->addr, end); 156 flush_cache_vmap((unsigned long) area->addr, end);
158 return err; 157 return err;
159} 158}
160 159
161struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 160struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
162 unsigned long start, unsigned long end) 161 unsigned long start, unsigned long end, int node)
163{ 162{
164 struct vm_struct **p, *tmp, *area; 163 struct vm_struct **p, *tmp, *area;
165 unsigned long align = 1; 164 unsigned long align = 1;
@@ -178,7 +177,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
178 addr = ALIGN(start, align); 177 addr = ALIGN(start, align);
179 size = PAGE_ALIGN(size); 178 size = PAGE_ALIGN(size);
180 179
181 area = kmalloc(sizeof(*area), GFP_KERNEL); 180 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
182 if (unlikely(!area)) 181 if (unlikely(!area))
183 return NULL; 182 return NULL;
184 183
@@ -231,6 +230,12 @@ out:
231 return NULL; 230 return NULL;
232} 231}
233 232
233struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
234 unsigned long start, unsigned long end)
235{
236 return __get_vm_area_node(size, flags, start, end, -1);
237}
238
234/** 239/**
235 * get_vm_area - reserve a contingous kernel virtual area 240 * get_vm_area - reserve a contingous kernel virtual area
236 * 241 *
@@ -246,6 +251,11 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 251 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
247} 252}
248 253
254struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
255{
256 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
257}
258
249/* Caller must hold vmlist_lock */ 259/* Caller must hold vmlist_lock */
250struct vm_struct *__remove_vm_area(void *addr) 260struct vm_struct *__remove_vm_area(void *addr)
251{ 261{
@@ -342,7 +352,6 @@ void vfree(void *addr)
342 BUG_ON(in_interrupt()); 352 BUG_ON(in_interrupt());
343 __vunmap(addr, 1); 353 __vunmap(addr, 1);
344} 354}
345
346EXPORT_SYMBOL(vfree); 355EXPORT_SYMBOL(vfree);
347 356
348/** 357/**
@@ -360,7 +369,6 @@ void vunmap(void *addr)
360 BUG_ON(in_interrupt()); 369 BUG_ON(in_interrupt());
361 __vunmap(addr, 0); 370 __vunmap(addr, 0);
362} 371}
363
364EXPORT_SYMBOL(vunmap); 372EXPORT_SYMBOL(vunmap);
365 373
366/** 374/**
@@ -392,10 +400,10 @@ void *vmap(struct page **pages, unsigned int count,
392 400
393 return area->addr; 401 return area->addr;
394} 402}
395
396EXPORT_SYMBOL(vmap); 403EXPORT_SYMBOL(vmap);
397 404
398void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 405void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
406 pgprot_t prot, int node)
399{ 407{
400 struct page **pages; 408 struct page **pages;
401 unsigned int nr_pages, array_size, i; 409 unsigned int nr_pages, array_size, i;
@@ -406,9 +414,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
406 area->nr_pages = nr_pages; 414 area->nr_pages = nr_pages;
407 /* Please note that the recursion is strictly bounded. */ 415 /* Please note that the recursion is strictly bounded. */
408 if (array_size > PAGE_SIZE) 416 if (array_size > PAGE_SIZE)
409 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); 417 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
410 else 418 else
411 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); 419 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
412 area->pages = pages; 420 area->pages = pages;
413 if (!area->pages) { 421 if (!area->pages) {
414 remove_vm_area(area->addr); 422 remove_vm_area(area->addr);
@@ -418,7 +426,10 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
418 memset(area->pages, 0, array_size); 426 memset(area->pages, 0, array_size);
419 427
420 for (i = 0; i < area->nr_pages; i++) { 428 for (i = 0; i < area->nr_pages; i++) {
421 area->pages[i] = alloc_page(gfp_mask); 429 if (node < 0)
430 area->pages[i] = alloc_page(gfp_mask);
431 else
432 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
422 if (unlikely(!area->pages[i])) { 433 if (unlikely(!area->pages[i])) {
423 /* Successfully allocated i pages, free them in __vunmap() */ 434 /* Successfully allocated i pages, free them in __vunmap() */
424 area->nr_pages = i; 435 area->nr_pages = i;
@@ -435,18 +446,25 @@ fail:
435 return NULL; 446 return NULL;
436} 447}
437 448
449void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
450{
451 return __vmalloc_area_node(area, gfp_mask, prot, -1);
452}
453
438/** 454/**
439 * __vmalloc - allocate virtually contiguous memory 455 * __vmalloc_node - allocate virtually contiguous memory
440 * 456 *
441 * @size: allocation size 457 * @size: allocation size
442 * @gfp_mask: flags for the page level allocator 458 * @gfp_mask: flags for the page level allocator
443 * @prot: protection mask for the allocated pages 459 * @prot: protection mask for the allocated pages
460 * @node node to use for allocation or -1
444 * 461 *
445 * Allocate enough pages to cover @size from the page level 462 * Allocate enough pages to cover @size from the page level
446 * allocator with @gfp_mask flags. Map them into contiguous 463 * allocator with @gfp_mask flags. Map them into contiguous
447 * kernel virtual space, using a pagetable protection of @prot. 464 * kernel virtual space, using a pagetable protection of @prot.
448 */ 465 */
449void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 466void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
467 int node)
450{ 468{
451 struct vm_struct *area; 469 struct vm_struct *area;
452 470
@@ -454,13 +472,18 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
454 if (!size || (size >> PAGE_SHIFT) > num_physpages) 472 if (!size || (size >> PAGE_SHIFT) > num_physpages)
455 return NULL; 473 return NULL;
456 474
457 area = get_vm_area(size, VM_ALLOC); 475 area = get_vm_area_node(size, VM_ALLOC, node);
458 if (!area) 476 if (!area)
459 return NULL; 477 return NULL;
460 478
461 return __vmalloc_area(area, gfp_mask, prot); 479 return __vmalloc_area_node(area, gfp_mask, prot, node);
462} 480}
481EXPORT_SYMBOL(__vmalloc_node);
463 482
483void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
484{
485 return __vmalloc_node(size, gfp_mask, prot, -1);
486}
464EXPORT_SYMBOL(__vmalloc); 487EXPORT_SYMBOL(__vmalloc);
465 488
466/** 489/**
@@ -478,9 +501,26 @@ void *vmalloc(unsigned long size)
478{ 501{
479 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 502 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
480} 503}
481
482EXPORT_SYMBOL(vmalloc); 504EXPORT_SYMBOL(vmalloc);
483 505
506/**
507 * vmalloc_node - allocate memory on a specific node
508 *
509 * @size: allocation size
510 * @node; numa node
511 *
512 * Allocate enough pages to cover @size from the page level
513 * allocator and map them into contiguous kernel virtual space.
514 *
515 * For tight cotrol over page level allocator and protection flags
516 * use __vmalloc() instead.
517 */
518void *vmalloc_node(unsigned long size, int node)
519{
520 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
521}
522EXPORT_SYMBOL(vmalloc_node);
523
484#ifndef PAGE_KERNEL_EXEC 524#ifndef PAGE_KERNEL_EXEC
485# define PAGE_KERNEL_EXEC PAGE_KERNEL 525# define PAGE_KERNEL_EXEC PAGE_KERNEL
486#endif 526#endif
@@ -515,7 +555,6 @@ void *vmalloc_32(unsigned long size)
515{ 555{
516 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 556 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
517} 557}
518
519EXPORT_SYMBOL(vmalloc_32); 558EXPORT_SYMBOL(vmalloc_32);
520 559
521long vread(char *buf, char *addr, unsigned long count) 560long vread(char *buf, char *addr, unsigned long count)