aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-02-10 04:43:10 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:18 -0500
commit4b51d66989218aad731a721b5b28c79bf5388c09 (patch)
tree8ff7acbd219f699c20c2f1fd201ffb3db5a64062 /include/linux/slab_def.h
parent66701b1499a3ff11882c8c4aef36e8eac86e17b1 (diff)
[PATCH] optional ZONE_DMA: optional ZONE_DMA in the VM
Make ZONE_DMA optional in core code. - ifdef all code for ZONE_DMA and related definitions following the example for ZONE_DMA32 and ZONE_HIGHMEM. - Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of 0. - Modify the VM statistics to work correctly without a DMA zone. - Modify slab to not create DMA slabs if there is no ZONE_DMA. [akpm@osdl.org: cleanup] [jdike@addtoit.com: build fix] [apw@shadowen.org: Simplify calculation of the number of bits we need for ZONES_SHIFT] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <willy@debian.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h30
1 files changed, 21 insertions, 9 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 4b463e66ddea..5e4364644ed1 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -19,7 +19,9 @@
19struct cache_sizes { 19struct cache_sizes {
20 size_t cs_size; 20 size_t cs_size;
21 struct kmem_cache *cs_cachep; 21 struct kmem_cache *cs_cachep;
22#ifdef CONFIG_ZONE_DMA
22 struct kmem_cache *cs_dmacachep; 23 struct kmem_cache *cs_dmacachep;
24#endif
23}; 25};
24extern struct cache_sizes malloc_sizes[]; 26extern struct cache_sizes malloc_sizes[];
25 27
@@ -39,9 +41,12 @@ static inline void *kmalloc(size_t size, gfp_t flags)
39 __you_cannot_kmalloc_that_much(); 41 __you_cannot_kmalloc_that_much();
40 } 42 }
41found: 43found:
42 return kmem_cache_alloc((flags & GFP_DMA) ? 44#ifdef CONFIG_ZONE_DMA
43 malloc_sizes[i].cs_dmacachep : 45 if (flags & GFP_DMA)
44 malloc_sizes[i].cs_cachep, flags); 46 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
47 flags);
48#endif
49 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
45 } 50 }
46 return __kmalloc(size, flags); 51 return __kmalloc(size, flags);
47} 52}
@@ -62,9 +67,12 @@ static inline void *kzalloc(size_t size, gfp_t flags)
62 __you_cannot_kzalloc_that_much(); 67 __you_cannot_kzalloc_that_much();
63 } 68 }
64found: 69found:
65 return kmem_cache_zalloc((flags & GFP_DMA) ? 70#ifdef CONFIG_ZONE_DMA
66 malloc_sizes[i].cs_dmacachep : 71 if (flags & GFP_DMA)
67 malloc_sizes[i].cs_cachep, flags); 72 return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
73 flags);
74#endif
75 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
68 } 76 }
69 return __kzalloc(size, flags); 77 return __kzalloc(size, flags);
70} 78}
@@ -88,9 +96,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88 __you_cannot_kmalloc_that_much(); 96 __you_cannot_kmalloc_that_much();
89 } 97 }
90found: 98found:
91 return kmem_cache_alloc_node((flags & GFP_DMA) ? 99#ifdef CONFIG_ZONE_DMA
92 malloc_sizes[i].cs_dmacachep : 100 if (flags & GFP_DMA)
93 malloc_sizes[i].cs_cachep, flags, node); 101 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
102 flags, node);
103#endif
104 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
105 flags, node);
94 } 106 }
95 return __kmalloc_node(size, flags, node); 107 return __kmalloc_node(size, flags, node);
96} 108}