aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2005-10-29 21:15:41 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:35 -0400
commit930fc45a49ddebe7555cc5c837d82b9c27e65ff4 (patch)
treef0e4b32bd4e2f951c4eb1bc1fcdeefdcbb8e6195
parentbe15cd72d256e5eb3261a781b8507fac83ab33f6 (diff)
[PATCH] vmalloc_node
This patch adds vmalloc_node(size, node) -> Allocate necessary memory on the specified node and get_vm_area_node(size, flags, node) and the other functions that it depends on. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/vmalloc.h8
-rw-r--r--mm/vmalloc.c73
2 files changed, 64 insertions, 17 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3701a0673d2c..1d5577b2b752 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -32,10 +32,14 @@ struct vm_struct {
32 * Highlevel APIs for driver use 32 * Highlevel APIs for driver use
33 */ 33 */
34extern void *vmalloc(unsigned long size); 34extern void *vmalloc(unsigned long size);
35extern void *vmalloc_node(unsigned long size, int node);
35extern void *vmalloc_exec(unsigned long size); 36extern void *vmalloc_exec(unsigned long size);
36extern void *vmalloc_32(unsigned long size); 37extern void *vmalloc_32(unsigned long size);
37extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 38extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
38extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot); 39extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
40 pgprot_t prot);
41extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
42 pgprot_t prot, int node);
39extern void vfree(void *addr); 43extern void vfree(void *addr);
40 44
41extern void *vmap(struct page **pages, unsigned int count, 45extern void *vmap(struct page **pages, unsigned int count,
@@ -48,6 +52,8 @@ extern void vunmap(void *addr);
48extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 52extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
49extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 53extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
50 unsigned long start, unsigned long end); 54 unsigned long start, unsigned long end);
55extern struct vm_struct *get_vm_area_node(unsigned long size,
56 unsigned long flags, int node);
51extern struct vm_struct *remove_vm_area(void *addr); 57extern struct vm_struct *remove_vm_area(void *addr);
52extern struct vm_struct *__remove_vm_area(void *addr); 58extern struct vm_struct *__remove_vm_area(void *addr);
53extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 59extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1150229b6366..5e9120598799 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -5,6 +5,7 @@
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
8 */ 9 */
9 10
10#include <linux/mm.h> 11#include <linux/mm.h>
@@ -158,8 +159,8 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
158 return err; 159 return err;
159} 160}
160 161
161struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 162struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
162 unsigned long start, unsigned long end) 163 unsigned long start, unsigned long end, int node)
163{ 164{
164 struct vm_struct **p, *tmp, *area; 165 struct vm_struct **p, *tmp, *area;
165 unsigned long align = 1; 166 unsigned long align = 1;
@@ -178,7 +179,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
178 addr = ALIGN(start, align); 179 addr = ALIGN(start, align);
179 size = PAGE_ALIGN(size); 180 size = PAGE_ALIGN(size);
180 181
181 area = kmalloc(sizeof(*area), GFP_KERNEL); 182 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
182 if (unlikely(!area)) 183 if (unlikely(!area))
183 return NULL; 184 return NULL;
184 185
@@ -231,6 +232,12 @@ out:
231 return NULL; 232 return NULL;
232} 233}
233 234
235struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
236 unsigned long start, unsigned long end)
237{
238 return __get_vm_area_node(size, flags, start, end, -1);
239}
240
234/** 241/**
235 * get_vm_area - reserve a contingous kernel virtual area 242 * get_vm_area - reserve a contingous kernel virtual area
236 * 243 *
@@ -246,6 +253,11 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 253 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
247} 254}
248 255
256struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
257{
258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
259}
260
249/* Caller must hold vmlist_lock */ 261/* Caller must hold vmlist_lock */
250struct vm_struct *__remove_vm_area(void *addr) 262struct vm_struct *__remove_vm_area(void *addr)
251{ 263{
@@ -342,7 +354,6 @@ void vfree(void *addr)
342 BUG_ON(in_interrupt()); 354 BUG_ON(in_interrupt());
343 __vunmap(addr, 1); 355 __vunmap(addr, 1);
344} 356}
345
346EXPORT_SYMBOL(vfree); 357EXPORT_SYMBOL(vfree);
347 358
348/** 359/**
@@ -360,7 +371,6 @@ void vunmap(void *addr)
360 BUG_ON(in_interrupt()); 371 BUG_ON(in_interrupt());
361 __vunmap(addr, 0); 372 __vunmap(addr, 0);
362} 373}
363
364EXPORT_SYMBOL(vunmap); 374EXPORT_SYMBOL(vunmap);
365 375
366/** 376/**
@@ -392,10 +402,10 @@ void *vmap(struct page **pages, unsigned int count,
392 402
393 return area->addr; 403 return area->addr;
394} 404}
395
396EXPORT_SYMBOL(vmap); 405EXPORT_SYMBOL(vmap);
397 406
398void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 407void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
408 pgprot_t prot, int node)
399{ 409{
400 struct page **pages; 410 struct page **pages;
401 unsigned int nr_pages, array_size, i; 411 unsigned int nr_pages, array_size, i;
@@ -406,9 +416,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
406 area->nr_pages = nr_pages; 416 area->nr_pages = nr_pages;
407 /* Please note that the recursion is strictly bounded. */ 417 /* Please note that the recursion is strictly bounded. */
408 if (array_size > PAGE_SIZE) 418 if (array_size > PAGE_SIZE)
409 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); 419 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
410 else 420 else
411 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); 421 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
412 area->pages = pages; 422 area->pages = pages;
413 if (!area->pages) { 423 if (!area->pages) {
414 remove_vm_area(area->addr); 424 remove_vm_area(area->addr);
@@ -418,7 +428,10 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
418 memset(area->pages, 0, array_size); 428 memset(area->pages, 0, array_size);
419 429
420 for (i = 0; i < area->nr_pages; i++) { 430 for (i = 0; i < area->nr_pages; i++) {
421 area->pages[i] = alloc_page(gfp_mask); 431 if (node < 0)
432 area->pages[i] = alloc_page(gfp_mask);
433 else
434 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
422 if (unlikely(!area->pages[i])) { 435 if (unlikely(!area->pages[i])) {
423 /* Successfully allocated i pages, free them in __vunmap() */ 436 /* Successfully allocated i pages, free them in __vunmap() */
424 area->nr_pages = i; 437 area->nr_pages = i;
@@ -435,18 +448,25 @@ fail:
435 return NULL; 448 return NULL;
436} 449}
437 450
451void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
452{
453 return __vmalloc_area_node(area, gfp_mask, prot, -1);
454}
455
438/** 456/**
439 * __vmalloc - allocate virtually contiguous memory 457 * __vmalloc_node - allocate virtually contiguous memory
440 * 458 *
441 * @size: allocation size 459 * @size: allocation size
442 * @gfp_mask: flags for the page level allocator 460 * @gfp_mask: flags for the page level allocator
443 * @prot: protection mask for the allocated pages 461 * @prot: protection mask for the allocated pages
462 * @node node to use for allocation or -1
444 * 463 *
445 * Allocate enough pages to cover @size from the page level 464 * Allocate enough pages to cover @size from the page level
446 * allocator with @gfp_mask flags. Map them into contiguous 465 * allocator with @gfp_mask flags. Map them into contiguous
447 * kernel virtual space, using a pagetable protection of @prot. 466 * kernel virtual space, using a pagetable protection of @prot.
448 */ 467 */
449void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 468void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
469 int node)
450{ 470{
451 struct vm_struct *area; 471 struct vm_struct *area;
452 472
@@ -454,13 +474,18 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
454 if (!size || (size >> PAGE_SHIFT) > num_physpages) 474 if (!size || (size >> PAGE_SHIFT) > num_physpages)
455 return NULL; 475 return NULL;
456 476
457 area = get_vm_area(size, VM_ALLOC); 477 area = get_vm_area_node(size, VM_ALLOC, node);
458 if (!area) 478 if (!area)
459 return NULL; 479 return NULL;
460 480
461 return __vmalloc_area(area, gfp_mask, prot); 481 return __vmalloc_area_node(area, gfp_mask, prot, node);
462} 482}
483EXPORT_SYMBOL(__vmalloc_node);
463 484
485void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
486{
487 return __vmalloc_node(size, gfp_mask, prot, -1);
488}
464EXPORT_SYMBOL(__vmalloc); 489EXPORT_SYMBOL(__vmalloc);
465 490
466/** 491/**
@@ -478,9 +503,26 @@ void *vmalloc(unsigned long size)
478{ 503{
479 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 504 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
480} 505}
481
482EXPORT_SYMBOL(vmalloc); 506EXPORT_SYMBOL(vmalloc);
483 507
508/**
509 * vmalloc_node - allocate memory on a specific node
510 *
511 * @size: allocation size
512 * @node; numa node
513 *
514 * Allocate enough pages to cover @size from the page level
515 * allocator and map them into contiguous kernel virtual space.
516 *
517 * For tight cotrol over page level allocator and protection flags
518 * use __vmalloc() instead.
519 */
520void *vmalloc_node(unsigned long size, int node)
521{
522 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
523}
524EXPORT_SYMBOL(vmalloc_node);
525
484#ifndef PAGE_KERNEL_EXEC 526#ifndef PAGE_KERNEL_EXEC
485# define PAGE_KERNEL_EXEC PAGE_KERNEL 527# define PAGE_KERNEL_EXEC PAGE_KERNEL
486#endif 528#endif
@@ -515,7 +557,6 @@ void *vmalloc_32(unsigned long size)
515{ 557{
516 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 558 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
517} 559}
518
519EXPORT_SYMBOL(vmalloc_32); 560EXPORT_SYMBOL(vmalloc_32);
520 561
521long vread(char *buf, char *addr, unsigned long count) 562long vread(char *buf, char *addr, unsigned long count)