aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2005-10-07 02:46:04 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-08 18:00:57 -0400
commitdd0fc66fb33cd610bc1a5db8a5e232d34879b4d7 (patch)
tree51f96a9db96293b352e358f66032e1f4ff79fafb /mm
parent3b0e77bd144203a507eb191f7117d2c5004ea1de (diff)
[PATCH] gfp flags annotations - part 1
- added typedef unsigned int __nocast gfp_t; - replaced __nocast uses for gfp flags with gfp_t - it gives exactly the same warnings as far as sparse is concerned, doesn't change generated code (from gcc point of view we replaced unsigned int with typedef) and documents what's going on far better. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/mempool.c6
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c34
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmalloc.c4
11 files changed, 37 insertions, 41 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 400911599468..90e1861e2da0 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,7 +30,7 @@
30 30
31static mempool_t *page_pool, *isa_page_pool; 31static mempool_t *page_pool, *isa_page_pool;
32 32
33static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) 33static void *page_pool_alloc(gfp_t gfp_mask, void *data)
34{ 34{
35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 35 unsigned int gfp = gfp_mask | (unsigned int) (long) data;
36 36
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9033f0859aa8..37af443eb094 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -687,7 +687,7 @@ get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned lo
687} 687}
688 688
689/* Return a zonelist representing a mempolicy */ 689/* Return a zonelist representing a mempolicy */
690static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) 690static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
691{ 691{
692 int nd; 692 int nd;
693 693
@@ -751,7 +751,7 @@ static unsigned offset_il_node(struct mempolicy *pol,
751 751
752/* Allocate a page in interleaved policy. 752/* Allocate a page in interleaved policy.
753 Own path because it needs to do special accounting. */ 753 Own path because it needs to do special accounting. */
754static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) 754static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid)
755{ 755{
756 struct zonelist *zl; 756 struct zonelist *zl;
757 struct page *page; 757 struct page *page;
@@ -789,7 +789,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or
789 * Should be called with the mm_sem of the vma hold. 789 * Should be called with the mm_sem of the vma hold.
790 */ 790 */
791struct page * 791struct page *
792alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) 792alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
793{ 793{
794 struct mempolicy *pol = get_vma_policy(current, vma, addr); 794 struct mempolicy *pol = get_vma_policy(current, vma, addr);
795 795
@@ -832,7 +832,7 @@ alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned l
832 * 1) it's ok to take cpuset_sem (can WAIT), and 832 * 1) it's ok to take cpuset_sem (can WAIT), and
833 * 2) allocating for current task (not interrupt). 833 * 2) allocating for current task (not interrupt).
834 */ 834 */
835struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) 835struct page *alloc_pages_current(gfp_t gfp, unsigned order)
836{ 836{
837 struct mempolicy *pol = current->mempolicy; 837 struct mempolicy *pol = current->mempolicy;
838 838
diff --git a/mm/mempool.c b/mm/mempool.c
index 65f2957b8d51..9e377ea700b2 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(mempool_create_node);
112 * while this function is running. mempool_alloc() & mempool_free() 112 * while this function is running. mempool_alloc() & mempool_free()
113 * might be called (eg. from IRQ contexts) while this function executes. 113 * might be called (eg. from IRQ contexts) while this function executes.
114 */ 114 */
115int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) 115int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
116{ 116{
117 void *element; 117 void *element;
118 void **new_elements; 118 void **new_elements;
@@ -200,7 +200,7 @@ EXPORT_SYMBOL(mempool_destroy);
200 * *never* fails when called from process contexts. (it might 200 * *never* fails when called from process contexts. (it might
201 * fail if called from an IRQ context.) 201 * fail if called from an IRQ context.)
202 */ 202 */
203void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) 203void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
204{ 204{
205 void *element; 205 void *element;
206 unsigned long flags; 206 unsigned long flags;
@@ -276,7 +276,7 @@ EXPORT_SYMBOL(mempool_free);
276/* 276/*
277 * A commonly used alloc and free fn. 277 * A commonly used alloc and free fn.
278 */ 278 */
279void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) 279void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
280{ 280{
281 kmem_cache_t *mem = (kmem_cache_t *) pool_data; 281 kmem_cache_t *mem = (kmem_cache_t *) pool_data;
282 return kmem_cache_alloc(mem, gfp_mask); 282 return kmem_cache_alloc(mem, gfp_mask);
diff --git a/mm/nommu.c b/mm/nommu.c
index 064d70442895..0ef241ae3763 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -157,8 +157,7 @@ void vfree(void *addr)
157 kfree(addr); 157 kfree(addr);
158} 158}
159 159
160void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, 160void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
161 pgprot_t prot)
162{ 161{
163 /* 162 /*
164 * kmalloc doesn't like __GFP_HIGHMEM for some reason 163 * kmalloc doesn't like __GFP_HIGHMEM for some reason
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ac3bf33e5370..d348b9035955 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -263,7 +263,7 @@ static struct mm_struct *oom_kill_process(struct task_struct *p)
263 * OR try to be smart about which process to kill. Note that we 263 * OR try to be smart about which process to kill. Note that we
264 * don't have to be perfect here, we just have to be good. 264 * don't have to be perfect here, we just have to be good.
265 */ 265 */
266void out_of_memory(unsigned int __nocast gfp_mask, int order) 266void out_of_memory(gfp_t gfp_mask, int order)
267{ 267{
268 struct mm_struct *mm = NULL; 268 struct mm_struct *mm = NULL;
269 task_t * p; 269 task_t * p;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae2903339e71..cc1fe2672a31 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -671,7 +671,7 @@ void fastcall free_cold_page(struct page *page)
671 free_hot_cold_page(page, 1); 671 free_hot_cold_page(page, 1);
672} 672}
673 673
674static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) 674static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
675{ 675{
676 int i; 676 int i;
677 677
@@ -686,7 +686,7 @@ static inline void prep_zero_page(struct page *page, int order, unsigned int __n
686 * or two. 686 * or two.
687 */ 687 */
688static struct page * 688static struct page *
689buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) 689buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
690{ 690{
691 unsigned long flags; 691 unsigned long flags;
692 struct page *page = NULL; 692 struct page *page = NULL;
@@ -761,7 +761,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
761} 761}
762 762
763static inline int 763static inline int
764should_reclaim_zone(struct zone *z, unsigned int gfp_mask) 764should_reclaim_zone(struct zone *z, gfp_t gfp_mask)
765{ 765{
766 if (!z->reclaim_pages) 766 if (!z->reclaim_pages)
767 return 0; 767 return 0;
@@ -774,7 +774,7 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
774 * This is the 'heart' of the zoned buddy allocator. 774 * This is the 'heart' of the zoned buddy allocator.
775 */ 775 */
776struct page * fastcall 776struct page * fastcall
777__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, 777__alloc_pages(gfp_t gfp_mask, unsigned int order,
778 struct zonelist *zonelist) 778 struct zonelist *zonelist)
779{ 779{
780 const int wait = gfp_mask & __GFP_WAIT; 780 const int wait = gfp_mask & __GFP_WAIT;
@@ -977,7 +977,7 @@ EXPORT_SYMBOL(__alloc_pages);
977/* 977/*
978 * Common helper functions. 978 * Common helper functions.
979 */ 979 */
980fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) 980fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
981{ 981{
982 struct page * page; 982 struct page * page;
983 page = alloc_pages(gfp_mask, order); 983 page = alloc_pages(gfp_mask, order);
@@ -988,7 +988,7 @@ fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned
988 988
989EXPORT_SYMBOL(__get_free_pages); 989EXPORT_SYMBOL(__get_free_pages);
990 990
991fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) 991fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
992{ 992{
993 struct page * page; 993 struct page * page;
994 994
diff --git a/mm/page_io.c b/mm/page_io.c
index 2e605a19ce57..330e00d6db00 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,7 +19,7 @@
19#include <linux/writeback.h> 19#include <linux/writeback.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21 21
22static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, 22static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
23 struct page *page, bio_end_io_t end_io) 23 struct page *page, bio_end_io_t end_io)
24{ 24{
25 struct bio *bio; 25 struct bio *bio;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1f7aeb210c7b..ea064d89cda9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -921,8 +921,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
921} 921}
922 922
923static inline struct page * 923static inline struct page *
924shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, 924shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
925 unsigned long idx)
926{ 925{
927 return alloc_page(gfp | __GFP_ZERO); 926 return alloc_page(gfp | __GFP_ZERO);
928} 927}
diff --git a/mm/slab.c b/mm/slab.c
index 5cbbdfa6dd0e..d05c678bceb3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -650,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
650 return cachep->array[smp_processor_id()]; 650 return cachep->array[smp_processor_id()];
651} 651}
652 652
653static inline kmem_cache_t *__find_general_cachep(size_t size, 653static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
654 unsigned int __nocast gfpflags)
655{ 654{
656 struct cache_sizes *csizep = malloc_sizes; 655 struct cache_sizes *csizep = malloc_sizes;
657 656
@@ -675,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
675 return csizep->cs_cachep; 674 return csizep->cs_cachep;
676} 675}
677 676
678kmem_cache_t *kmem_find_general_cachep(size_t size, 677kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
679 unsigned int __nocast gfpflags)
680{ 678{
681 return __find_general_cachep(size, gfpflags); 679 return __find_general_cachep(size, gfpflags);
682} 680}
@@ -1185,7 +1183,7 @@ __initcall(cpucache_init);
1185 * did not request dmaable memory, we might get it, but that 1183 * did not request dmaable memory, we might get it, but that
1186 * would be relatively rare and ignorable. 1184 * would be relatively rare and ignorable.
1187 */ 1185 */
1188static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 1186static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
1189{ 1187{
1190 struct page *page; 1188 struct page *page;
1191 void *addr; 1189 void *addr;
@@ -2048,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2048 2046
2049/* Get the memory for a slab management obj. */ 2047/* Get the memory for a slab management obj. */
2050static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2048static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
2051 int colour_off, unsigned int __nocast local_flags) 2049 int colour_off, gfp_t local_flags)
2052{ 2050{
2053 struct slab *slabp; 2051 struct slab *slabp;
2054 2052
@@ -2149,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
2149 * Grow (by 1) the number of slabs within a cache. This is called by 2147 * Grow (by 1) the number of slabs within a cache. This is called by
2150 * kmem_cache_alloc() when there are no active objs left in a cache. 2148 * kmem_cache_alloc() when there are no active objs left in a cache.
2151 */ 2149 */
2152static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2150static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2153{ 2151{
2154 struct slab *slabp; 2152 struct slab *slabp;
2155 void *objp; 2153 void *objp;
@@ -2356,7 +2354,7 @@ bad:
2356#define check_slabp(x,y) do { } while(0) 2354#define check_slabp(x,y) do { } while(0)
2357#endif 2355#endif
2358 2356
2359static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) 2357static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
2360{ 2358{
2361 int batchcount; 2359 int batchcount;
2362 struct kmem_list3 *l3; 2360 struct kmem_list3 *l3;
@@ -2456,7 +2454,7 @@ alloc_done:
2456} 2454}
2457 2455
2458static inline void 2456static inline void
2459cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) 2457cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
2460{ 2458{
2461 might_sleep_if(flags & __GFP_WAIT); 2459 might_sleep_if(flags & __GFP_WAIT);
2462#if DEBUG 2460#if DEBUG
@@ -2467,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
2467#if DEBUG 2465#if DEBUG
2468static void * 2466static void *
2469cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2467cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2470 unsigned int __nocast flags, void *objp, void *caller) 2468 gfp_t flags, void *objp, void *caller)
2471{ 2469{
2472 if (!objp) 2470 if (!objp)
2473 return objp; 2471 return objp;
@@ -2510,7 +2508,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2510#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2508#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2511#endif 2509#endif
2512 2510
2513static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2511static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2514{ 2512{
2515 void* objp; 2513 void* objp;
2516 struct array_cache *ac; 2514 struct array_cache *ac;
@@ -2528,7 +2526,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast
2528 return objp; 2526 return objp;
2529} 2527}
2530 2528
2531static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2529static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2532{ 2530{
2533 unsigned long save_flags; 2531 unsigned long save_flags;
2534 void* objp; 2532 void* objp;
@@ -2787,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2787 * Allocate an object from this cache. The flags are only relevant 2785 * Allocate an object from this cache. The flags are only relevant
2788 * if the cache has no available objects. 2786 * if the cache has no available objects.
2789 */ 2787 */
2790void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2788void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2791{ 2789{
2792 return __cache_alloc(cachep, flags); 2790 return __cache_alloc(cachep, flags);
2793} 2791}
@@ -2848,7 +2846,7 @@ out:
2848 * New and improved: it will now make sure that the object gets 2846 * New and improved: it will now make sure that the object gets
2849 * put on the correct node list so that there is no false sharing. 2847 * put on the correct node list so that there is no false sharing.
2850 */ 2848 */
2851void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2849void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2852{ 2850{
2853 unsigned long save_flags; 2851 unsigned long save_flags;
2854 void *ptr; 2852 void *ptr;
@@ -2875,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2875} 2873}
2876EXPORT_SYMBOL(kmem_cache_alloc_node); 2874EXPORT_SYMBOL(kmem_cache_alloc_node);
2877 2875
2878void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 2876void *kmalloc_node(size_t size, gfp_t flags, int node)
2879{ 2877{
2880 kmem_cache_t *cachep; 2878 kmem_cache_t *cachep;
2881 2879
@@ -2908,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node);
2908 * platforms. For example, on i386, it means that the memory must come 2906 * platforms. For example, on i386, it means that the memory must come
2909 * from the first 16MB. 2907 * from the first 16MB.
2910 */ 2908 */
2911void *__kmalloc(size_t size, unsigned int __nocast flags) 2909void *__kmalloc(size_t size, gfp_t flags)
2912{ 2910{
2913 kmem_cache_t *cachep; 2911 kmem_cache_t *cachep;
2914 2912
@@ -2997,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free);
2997 * @size: how many bytes of memory are required. 2995 * @size: how many bytes of memory are required.
2998 * @flags: the type of memory to allocate. 2996 * @flags: the type of memory to allocate.
2999 */ 2997 */
3000void *kzalloc(size_t size, unsigned int __nocast flags) 2998void *kzalloc(size_t size, gfp_t flags)
3001{ 2999{
3002 void *ret = kmalloc(size, flags); 3000 void *ret = kmalloc(size, flags);
3003 if (ret) 3001 if (ret)
@@ -3603,7 +3601,7 @@ unsigned int ksize(const void *objp)
3603 * @s: the string to duplicate 3601 * @s: the string to duplicate
3604 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3602 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
3605 */ 3603 */
3606char *kstrdup(const char *s, unsigned int __nocast gfp) 3604char *kstrdup(const char *s, gfp_t gfp)
3607{ 3605{
3608 size_t len; 3606 size_t len;
3609 char *buf; 3607 char *buf;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index adbc2b426c2f..132164f7d0a7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -68,7 +68,7 @@ void show_swap_cache_info(void)
68 * but sets SwapCache flag and private instead of mapping and index. 68 * but sets SwapCache flag and private instead of mapping and index.
69 */ 69 */
70static int __add_to_swap_cache(struct page *page, swp_entry_t entry, 70static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
71 unsigned int __nocast gfp_mask) 71 gfp_t gfp_mask)
72{ 72{
73 int error; 73 int error;
74 74
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 13c3d82968ae..1150229b6366 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -395,7 +395,7 @@ void *vmap(struct page **pages, unsigned int count,
395 395
396EXPORT_SYMBOL(vmap); 396EXPORT_SYMBOL(vmap);
397 397
398void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) 398void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
399{ 399{
400 struct page **pages; 400 struct page **pages;
401 unsigned int nr_pages, array_size, i; 401 unsigned int nr_pages, array_size, i;
@@ -446,7 +446,7 @@ fail:
446 * allocator with @gfp_mask flags. Map them into contiguous 446 * allocator with @gfp_mask flags. Map them into contiguous
447 * kernel virtual space, using a pagetable protection of @prot. 447 * kernel virtual space, using a pagetable protection of @prot.
448 */ 448 */
449void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) 449void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
450{ 450{
451 struct vm_struct *area; 451 struct vm_struct *area;
452 452