diff options
author | David S. Miller <davem@davemloft.net> | 2010-05-31 08:46:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-31 08:46:45 -0400 |
commit | 64960848abd18d0bcde3f53ffa7ed0b631e6b25d (patch) | |
tree | 8424a1c550a98ce09f127425fde9b7b5f2f5027a /include/linux/slub_def.h | |
parent | 2903037400a26e7c0cc93ab75a7d62abfacdf485 (diff) | |
parent | 67a3e12b05e055c0415c556a315a3d3eb637e29e (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 19 |
1 files changed, 12 insertions, 7 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0249d4175bac..4ba59cfc1f75 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -75,12 +75,6 @@ struct kmem_cache { | |||
75 | int offset; /* Free pointer offset. */ | 75 | int offset; /* Free pointer offset. */ |
76 | struct kmem_cache_order_objects oo; | 76 | struct kmem_cache_order_objects oo; |
77 | 77 | ||
78 | /* | ||
79 | * Avoid an extra cache line for UP, SMP and for the node local to | ||
80 | * struct kmem_cache. | ||
81 | */ | ||
82 | struct kmem_cache_node local_node; | ||
83 | |||
84 | /* Allocation and freeing of slabs */ | 78 | /* Allocation and freeing of slabs */ |
85 | struct kmem_cache_order_objects max; | 79 | struct kmem_cache_order_objects max; |
86 | struct kmem_cache_order_objects min; | 80 | struct kmem_cache_order_objects min; |
@@ -102,6 +96,9 @@ struct kmem_cache { | |||
102 | */ | 96 | */ |
103 | int remote_node_defrag_ratio; | 97 | int remote_node_defrag_ratio; |
104 | struct kmem_cache_node *node[MAX_NUMNODES]; | 98 | struct kmem_cache_node *node[MAX_NUMNODES]; |
99 | #else | ||
100 | /* Avoid an extra cache line for UP */ | ||
101 | struct kmem_cache_node local_node; | ||
105 | #endif | 102 | #endif |
106 | }; | 103 | }; |
107 | 104 | ||
@@ -116,6 +113,14 @@ struct kmem_cache { | |||
116 | 113 | ||
117 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 114 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
118 | 115 | ||
116 | #ifndef ARCH_KMALLOC_MINALIGN | ||
117 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
118 | #endif | ||
119 | |||
120 | #ifndef ARCH_SLAB_MINALIGN | ||
121 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
122 | #endif | ||
123 | |||
119 | /* | 124 | /* |
120 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | 125 | * Maximum kmalloc object size handled by SLUB. Larger object allocations |
121 | * are passed through to the page allocator. The page allocator "fastpath" | 126 | * are passed through to the page allocator. The page allocator "fastpath" |
@@ -132,7 +137,7 @@ struct kmem_cache { | |||
132 | #ifdef CONFIG_ZONE_DMA | 137 | #ifdef CONFIG_ZONE_DMA |
133 | #define SLUB_DMA __GFP_DMA | 138 | #define SLUB_DMA __GFP_DMA |
134 | /* Reserve extra caches for potential DMA use */ | 139 | /* Reserve extra caches for potential DMA use */ |
135 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) | 140 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) |
136 | #else | 141 | #else |
137 | /* Disable DMA functionality */ | 142 | /* Disable DMA functionality */ |
138 | #define SLUB_DMA (__force gfp_t)0 | 143 | #define SLUB_DMA (__force gfp_t)0 |