aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-02-10 04:43:10 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:18 -0500
commit4b51d66989218aad731a721b5b28c79bf5388c09 (patch)
tree8ff7acbd219f699c20c2f1fd201ffb3db5a64062
parent66701b1499a3ff11882c8c4aef36e8eac86e17b1 (diff)
[PATCH] optional ZONE_DMA: optional ZONE_DMA in the VM
Make ZONE_DMA optional in core code. - ifdef all code for ZONE_DMA and related definitions following the example for ZONE_DMA32 and ZONE_HIGHMEM. - Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of 0. - Modify the VM statistics to work correctly without a DMA zone. - Modify slab to not create DMA slabs if there is no ZONE_DMA. [akpm@osdl.org: cleanup] [jdike@addtoit.com: build fix] [apw@shadowen.org: Simplify calculation of the number of bits we need for ZONES_SHIFT] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <willy@debian.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/slab_def.h30
-rw-r--r--include/linux/vmstat.h17
-rw-r--r--mm/Kconfig6
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/slab.c20
-rw-r--r--mm/vmstat.c8
8 files changed, 90 insertions, 23 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 063799ea6be0..2a7d15bcde46 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -85,8 +85,10 @@ struct vm_area_struct;
85 85
86static inline enum zone_type gfp_zone(gfp_t flags) 86static inline enum zone_type gfp_zone(gfp_t flags)
87{ 87{
88#ifdef CONFIG_ZONE_DMA
88 if (flags & __GFP_DMA) 89 if (flags & __GFP_DMA)
89 return ZONE_DMA; 90 return ZONE_DMA;
91#endif
90#ifdef CONFIG_ZONE_DMA32 92#ifdef CONFIG_ZONE_DMA32
91 if (flags & __GFP_DMA32) 93 if (flags & __GFP_DMA32)
92 return ZONE_DMA32; 94 return ZONE_DMA32;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 398f2ec55f54..ee9e3143df4f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -96,6 +96,7 @@ struct per_cpu_pageset {
96#endif 96#endif
97 97
98enum zone_type { 98enum zone_type {
99#ifdef CONFIG_ZONE_DMA
99 /* 100 /*
100 * ZONE_DMA is used when there are devices that are not able 101 * ZONE_DMA is used when there are devices that are not able
101 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we 102 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
@@ -116,6 +117,7 @@ enum zone_type {
116 * <16M. 117 * <16M.
117 */ 118 */
118 ZONE_DMA, 119 ZONE_DMA,
120#endif
119#ifdef CONFIG_ZONE_DMA32 121#ifdef CONFIG_ZONE_DMA32
120 /* 122 /*
121 * x86_64 needs two ZONE_DMAs because it supports devices that are 123 * x86_64 needs two ZONE_DMAs because it supports devices that are
@@ -152,11 +154,27 @@ enum zone_type {
152 * match the requested limits. See gfp_zone() in include/linux/gfp.h 154 * match the requested limits. See gfp_zone() in include/linux/gfp.h
153 */ 155 */
154 156
155#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) 157/*
158 * Count the active zones. Note that the use of defined(X) outside
159 * #if and family is not necessarily defined so ensure we cannot use
160 * it later. Use __ZONE_COUNT to work out how many shift bits we need.
161 */
162#define __ZONE_COUNT ( \
163 defined(CONFIG_ZONE_DMA) \
164 + defined(CONFIG_ZONE_DMA32) \
165 + 1 \
166 + defined(CONFIG_HIGHMEM) \
167)
168#if __ZONE_COUNT < 2
169#define ZONES_SHIFT 0
170#elif __ZONE_COUNT <= 2
156#define ZONES_SHIFT 1 171#define ZONES_SHIFT 1
157#else 172#elif __ZONE_COUNT <= 4
158#define ZONES_SHIFT 2 173#define ZONES_SHIFT 2
174#else
175#error ZONES_SHIFT -- too many zones configured adjust calculation
159#endif 176#endif
177#undef __ZONE_COUNT
160 178
161struct zone { 179struct zone {
162 /* Fields commonly accessed by the page allocator */ 180 /* Fields commonly accessed by the page allocator */
@@ -523,7 +541,11 @@ static inline int is_dma32(struct zone *zone)
523 541
524static inline int is_dma(struct zone *zone) 542static inline int is_dma(struct zone *zone)
525{ 543{
544#ifdef CONFIG_ZONE_DMA
526 return zone == zone->zone_pgdat->node_zones + ZONE_DMA; 545 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
546#else
547 return 0;
548#endif
527} 549}
528 550
529/* These two functions are used to setup the per zone pages min values */ 551/* These two functions are used to setup the per zone pages min values */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 4b463e66ddea..5e4364644ed1 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -19,7 +19,9 @@
19struct cache_sizes { 19struct cache_sizes {
20 size_t cs_size; 20 size_t cs_size;
21 struct kmem_cache *cs_cachep; 21 struct kmem_cache *cs_cachep;
22#ifdef CONFIG_ZONE_DMA
22 struct kmem_cache *cs_dmacachep; 23 struct kmem_cache *cs_dmacachep;
24#endif
23}; 25};
24extern struct cache_sizes malloc_sizes[]; 26extern struct cache_sizes malloc_sizes[];
25 27
@@ -39,9 +41,12 @@ static inline void *kmalloc(size_t size, gfp_t flags)
39 __you_cannot_kmalloc_that_much(); 41 __you_cannot_kmalloc_that_much();
40 } 42 }
41found: 43found:
42 return kmem_cache_alloc((flags & GFP_DMA) ? 44#ifdef CONFIG_ZONE_DMA
43 malloc_sizes[i].cs_dmacachep : 45 if (flags & GFP_DMA)
44 malloc_sizes[i].cs_cachep, flags); 46 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
47 flags);
48#endif
49 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
45 } 50 }
46 return __kmalloc(size, flags); 51 return __kmalloc(size, flags);
47} 52}
@@ -62,9 +67,12 @@ static inline void *kzalloc(size_t size, gfp_t flags)
62 __you_cannot_kzalloc_that_much(); 67 __you_cannot_kzalloc_that_much();
63 } 68 }
64found: 69found:
65 return kmem_cache_zalloc((flags & GFP_DMA) ? 70#ifdef CONFIG_ZONE_DMA
66 malloc_sizes[i].cs_dmacachep : 71 if (flags & GFP_DMA)
67 malloc_sizes[i].cs_cachep, flags); 72 return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
73 flags);
74#endif
75 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
68 } 76 }
69 return __kzalloc(size, flags); 77 return __kzalloc(size, flags);
70} 78}
@@ -88,9 +96,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88 __you_cannot_kmalloc_that_much(); 96 __you_cannot_kmalloc_that_much();
89 } 97 }
90found: 98found:
91 return kmem_cache_alloc_node((flags & GFP_DMA) ? 99#ifdef CONFIG_ZONE_DMA
92 malloc_sizes[i].cs_dmacachep : 100 if (flags & GFP_DMA)
93 malloc_sizes[i].cs_cachep, flags, node); 101 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
102 flags, node);
103#endif
104 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
105 flags, node);
94 } 106 }
95 return __kmalloc_node(size, flags, node); 107 return __kmalloc_node(size, flags, node);
96} 108}
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 77caf911969c..7ba91f2839fa 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -19,6 +19,12 @@
19 * generated will simply be the increment of a global address. 19 * generated will simply be the increment of a global address.
20 */ 20 */
21 21
22#ifdef CONFIG_ZONE_DMA
23#define DMA_ZONE(xx) xx##_DMA,
24#else
25#define DMA_ZONE(xx)
26#endif
27
22#ifdef CONFIG_ZONE_DMA32 28#ifdef CONFIG_ZONE_DMA32
23#define DMA32_ZONE(xx) xx##_DMA32, 29#define DMA32_ZONE(xx) xx##_DMA32,
24#else 30#else
@@ -31,7 +37,7 @@
31#define HIGHMEM_ZONE(xx) 37#define HIGHMEM_ZONE(xx)
32#endif 38#endif
33 39
34#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) 40#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
35 41
36enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 42enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
37 FOR_ALL_ZONES(PGALLOC), 43 FOR_ALL_ZONES(PGALLOC),
@@ -96,7 +102,8 @@ static inline void vm_events_fold_cpu(int cpu)
96#endif /* CONFIG_VM_EVENT_COUNTERS */ 102#endif /* CONFIG_VM_EVENT_COUNTERS */
97 103
98#define __count_zone_vm_events(item, zone, delta) \ 104#define __count_zone_vm_events(item, zone, delta) \
99 __count_vm_events(item##_DMA + zone_idx(zone), delta) 105 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
106 zone_idx(zone), delta)
100 107
101/* 108/*
102 * Zone based page accounting with per cpu differentials. 109 * Zone based page accounting with per cpu differentials.
@@ -143,14 +150,16 @@ static inline unsigned long node_page_state(int node,
143 struct zone *zones = NODE_DATA(node)->node_zones; 150 struct zone *zones = NODE_DATA(node)->node_zones;
144 151
145 return 152 return
153#ifdef CONFIG_ZONE_DMA
154 zone_page_state(&zones[ZONE_DMA], item) +
155#endif
146#ifdef CONFIG_ZONE_DMA32 156#ifdef CONFIG_ZONE_DMA32
147 zone_page_state(&zones[ZONE_DMA32], item) + 157 zone_page_state(&zones[ZONE_DMA32], item) +
148#endif 158#endif
149 zone_page_state(&zones[ZONE_NORMAL], item) +
150#ifdef CONFIG_HIGHMEM 159#ifdef CONFIG_HIGHMEM
151 zone_page_state(&zones[ZONE_HIGHMEM], item) + 160 zone_page_state(&zones[ZONE_HIGHMEM], item) +
152#endif 161#endif
153 zone_page_state(&zones[ZONE_DMA], item); 162 zone_page_state(&zones[ZONE_NORMAL], item);
154} 163}
155 164
156extern void zone_statistics(struct zonelist *, struct zone *); 165extern void zone_statistics(struct zonelist *, struct zone *);
diff --git a/mm/Kconfig b/mm/Kconfig
index 50f7cfc15b78..79360cf519bf 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -161,3 +161,9 @@ config RESOURCES_64BIT
161 default 64BIT 161 default 64BIT
162 help 162 help
163 This option allows memory and IO resources to be 64 bit. 163 This option allows memory and IO resources to be 64 bit.
164
165config ZONE_DMA_FLAG
166 int
167 default "0" if !ZONE_DMA
168 default "1"
169
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cff13840c6d..d461b23a27a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -73,7 +73,9 @@ static void __free_pages_ok(struct page *page, unsigned int order);
73 * don't need any ZONE_NORMAL reservation 73 * don't need any ZONE_NORMAL reservation
74 */ 74 */
75int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 75int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
76#ifdef CONFIG_ZONE_DMA
76 256, 77 256,
78#endif
77#ifdef CONFIG_ZONE_DMA32 79#ifdef CONFIG_ZONE_DMA32
78 256, 80 256,
79#endif 81#endif
@@ -85,7 +87,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
85EXPORT_SYMBOL(totalram_pages); 87EXPORT_SYMBOL(totalram_pages);
86 88
87static char * const zone_names[MAX_NR_ZONES] = { 89static char * const zone_names[MAX_NR_ZONES] = {
90#ifdef CONFIG_ZONE_DMA
88 "DMA", 91 "DMA",
92#endif
89#ifdef CONFIG_ZONE_DMA32 93#ifdef CONFIG_ZONE_DMA32
90 "DMA32", 94 "DMA32",
91#endif 95#endif
diff --git a/mm/slab.c b/mm/slab.c
index caa8f87e04eb..348396d691a1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -793,8 +793,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
793 * has cs_{dma,}cachep==NULL. Thus no special case 793 * has cs_{dma,}cachep==NULL. Thus no special case
794 * for large kmalloc calls required. 794 * for large kmalloc calls required.
795 */ 795 */
796#ifdef CONFIG_ZONE_DMA
796 if (unlikely(gfpflags & GFP_DMA)) 797 if (unlikely(gfpflags & GFP_DMA))
797 return csizep->cs_dmacachep; 798 return csizep->cs_dmacachep;
799#endif
798 return csizep->cs_cachep; 800 return csizep->cs_cachep;
799} 801}
800 802
@@ -1493,13 +1495,15 @@ void __init kmem_cache_init(void)
1493 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1495 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1494 NULL, NULL); 1496 NULL, NULL);
1495 } 1497 }
1496 1498#ifdef CONFIG_ZONE_DMA
1497 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1499 sizes->cs_dmacachep = kmem_cache_create(
1500 names->name_dma,
1498 sizes->cs_size, 1501 sizes->cs_size,
1499 ARCH_KMALLOC_MINALIGN, 1502 ARCH_KMALLOC_MINALIGN,
1500 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1503 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1501 SLAB_PANIC, 1504 SLAB_PANIC,
1502 NULL, NULL); 1505 NULL, NULL);
1506#endif
1503 sizes++; 1507 sizes++;
1504 names++; 1508 names++;
1505 } 1509 }
@@ -2321,7 +2325,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2321 cachep->slab_size = slab_size; 2325 cachep->slab_size = slab_size;
2322 cachep->flags = flags; 2326 cachep->flags = flags;
2323 cachep->gfpflags = 0; 2327 cachep->gfpflags = 0;
2324 if (flags & SLAB_CACHE_DMA) 2328 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2325 cachep->gfpflags |= GFP_DMA; 2329 cachep->gfpflags |= GFP_DMA;
2326 cachep->buffer_size = size; 2330 cachep->buffer_size = size;
2327 cachep->reciprocal_buffer_size = reciprocal_value(size); 2331 cachep->reciprocal_buffer_size = reciprocal_value(size);
@@ -2643,10 +2647,12 @@ static void cache_init_objs(struct kmem_cache *cachep,
2643 2647
2644static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2648static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2645{ 2649{
2646 if (flags & GFP_DMA) 2650 if (CONFIG_ZONE_DMA_FLAG) {
2647 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2651 if (flags & GFP_DMA)
2648 else 2652 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2649 BUG_ON(cachep->gfpflags & GFP_DMA); 2653 else
2654 BUG_ON(cachep->gfpflags & GFP_DMA);
2655 }
2650} 2656}
2651 2657
2652static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2658static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 5ffa8c27ead8..6c488d6ac425 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -408,6 +408,12 @@ const struct seq_operations fragmentation_op = {
408 .show = frag_show, 408 .show = frag_show,
409}; 409};
410 410
411#ifdef CONFIG_ZONE_DMA
412#define TEXT_FOR_DMA(xx) xx "_dma",
413#else
414#define TEXT_FOR_DMA(xx)
415#endif
416
411#ifdef CONFIG_ZONE_DMA32 417#ifdef CONFIG_ZONE_DMA32
412#define TEXT_FOR_DMA32(xx) xx "_dma32", 418#define TEXT_FOR_DMA32(xx) xx "_dma32",
413#else 419#else
@@ -420,7 +426,7 @@ const struct seq_operations fragmentation_op = {
420#define TEXT_FOR_HIGHMEM(xx) 426#define TEXT_FOR_HIGHMEM(xx)
421#endif 427#endif
422 428
423#define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ 429#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
424 TEXT_FOR_HIGHMEM(xx) 430 TEXT_FOR_HIGHMEM(xx)
425 431
426static const char * const vmstat_text[] = { 432static const char * const vmstat_text[] = {