aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-05 17:35:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-05 17:35:40 -0500
commit64096c17417380d8a472d096645f4cbc9406c987 (patch)
treee01d22012de7694206143b5606f7d51f225eb249 /include
parentcc7889ff5ee7a1c1a2b5073c53db5ad9b76f14e2 (diff)
parent1154fab73ccbab010cfaa272b6987c624cfd63c6 (diff)
Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: SLUB: Fix per-cpu merge conflict failslab: add ability to filter slab caches slab: fix regression in touched logic dma kmalloc handling fixes slub: remove impossible condition slab: initialize unused alien cache entry as NULL at alloc_alien_cache(). SLUB: Make slub statistics use this_cpu_inc SLUB: this_cpu: Remove slub kmem_cache fields SLUB: Get rid of dynamic DMA kmalloc cache allocation SLUB: Use this_cpu operations in slub
Diffstat (limited to 'include')
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/slab.h5
-rw-r--r--include/linux/slub_def.h27
3 files changed, 20 insertions, 17 deletions
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 06ca9b21dad2..7b64ad40e4ce 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
82#endif /* CONFIG_FAULT_INJECTION */ 82#endif /* CONFIG_FAULT_INJECTION */
83 83
84#ifdef CONFIG_FAILSLAB 84#ifdef CONFIG_FAILSLAB
85extern bool should_failslab(size_t size, gfp_t gfpflags); 85extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
86#else 86#else
87static inline bool should_failslab(size_t size, gfp_t gfpflags) 87static inline bool should_failslab(size_t size, gfp_t gfpflags,
88 unsigned long flags)
88{ 89{
89 return false; 90 return false;
90} 91}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2da8372519f5..488446289cab 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -70,6 +70,11 @@
70#else 70#else
71# define SLAB_NOTRACK 0x00000000UL 71# define SLAB_NOTRACK 0x00000000UL
72#endif 72#endif
73#ifdef CONFIG_FAILSLAB
74# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
75#else
76# define SLAB_FAILSLAB 0x00000000UL
77#endif
73 78
74/* The following flags affect the page allocator grouping pages by mobility */ 79/* The following flags affect the page allocator grouping pages by mobility */
75#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 80#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 1e14beb23f9b..0249d4175bac 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
38 void **freelist; /* Pointer to first free per cpu object */ 38 void **freelist; /* Pointer to first free per cpu object */
39 struct page *page; /* The slab from which we are allocating */ 39 struct page *page; /* The slab from which we are allocating */
40 int node; /* The node of the page (or -1 for debug) */ 40 int node; /* The node of the page (or -1 for debug) */
41 unsigned int offset; /* Freepointer offset (in word units) */
42 unsigned int objsize; /* Size of an object (from kmem_cache) */
43#ifdef CONFIG_SLUB_STATS 41#ifdef CONFIG_SLUB_STATS
44 unsigned stat[NR_SLUB_STAT_ITEMS]; 42 unsigned stat[NR_SLUB_STAT_ITEMS];
45#endif 43#endif
@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
69 * Slab cache management. 67 * Slab cache management.
70 */ 68 */
71struct kmem_cache { 69struct kmem_cache {
70 struct kmem_cache_cpu *cpu_slab;
72 /* Used for retriving partial slabs etc */ 71 /* Used for retriving partial slabs etc */
73 unsigned long flags; 72 unsigned long flags;
74 int size; /* The size of an object including meta data */ 73 int size; /* The size of an object including meta data */
@@ -104,11 +103,6 @@ struct kmem_cache {
104 int remote_node_defrag_ratio; 103 int remote_node_defrag_ratio;
105 struct kmem_cache_node *node[MAX_NUMNODES]; 104 struct kmem_cache_node *node[MAX_NUMNODES];
106#endif 105#endif
107#ifdef CONFIG_SMP
108 struct kmem_cache_cpu *cpu_slab[NR_CPUS];
109#else
110 struct kmem_cache_cpu cpu_slab;
111#endif
112}; 106};
113 107
114/* 108/*
@@ -135,11 +129,21 @@ struct kmem_cache {
135 129
136#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) 130#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
137 131
132#ifdef CONFIG_ZONE_DMA
133#define SLUB_DMA __GFP_DMA
134/* Reserve extra caches for potential DMA use */
135#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
136#else
137/* Disable DMA functionality */
138#define SLUB_DMA (__force gfp_t)0
139#define KMALLOC_CACHES SLUB_PAGE_SHIFT
140#endif
141
138/* 142/*
139 * We keep the general caches in an array of slab caches that are used for 143 * We keep the general caches in an array of slab caches that are used for
140 * 2^x bytes of allocations. 144 * 2^x bytes of allocations.
141 */ 145 */
142extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; 146extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
143 147
144/* 148/*
145 * Sorry that the following has to be that ugly but some versions of GCC 149 * Sorry that the following has to be that ugly but some versions of GCC
@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
207 return &kmalloc_caches[index]; 211 return &kmalloc_caches[index];
208} 212}
209 213
210#ifdef CONFIG_ZONE_DMA
211#define SLUB_DMA __GFP_DMA
212#else
213/* Disable DMA functionality */
214#define SLUB_DMA (__force gfp_t)0
215#endif
216
217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 214void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218void *__kmalloc(size_t size, gfp_t flags); 215void *__kmalloc(size_t size, gfp_t flags);
219 216