aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h126
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slob_def.h46
-rw-r--r--include/linux/slub_def.h6
-rw-r--r--mm/slob.c72
5 files changed, 172 insertions, 82 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index cd6ab658553f..27402fea9b79 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -42,7 +42,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
42 void (*)(void *, struct kmem_cache *, unsigned long)); 42 void (*)(void *, struct kmem_cache *, unsigned long));
43void kmem_cache_destroy(struct kmem_cache *); 43void kmem_cache_destroy(struct kmem_cache *);
44int kmem_cache_shrink(struct kmem_cache *); 44int kmem_cache_shrink(struct kmem_cache *);
45void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
46void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 45void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
47void kmem_cache_free(struct kmem_cache *, void *); 46void kmem_cache_free(struct kmem_cache *, void *);
48unsigned int kmem_cache_size(struct kmem_cache *); 47unsigned int kmem_cache_size(struct kmem_cache *);
@@ -61,16 +60,6 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
61 sizeof(struct __struct), __alignof__(struct __struct),\ 60 sizeof(struct __struct), __alignof__(struct __struct),\
62 (__flags), NULL, NULL) 61 (__flags), NULL, NULL)
63 62
64#ifdef CONFIG_NUMA
65extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66#else
67static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
68 gfp_t flags, int node)
69{
70 return kmem_cache_alloc(cachep, flags);
71}
72#endif
73
74/* 63/*
75 * The largest kmalloc size supported by the slab allocators is 64 * The largest kmalloc size supported by the slab allocators is
76 * 32 megabyte (2^25) or the maximum allocatable page order if that is 65 * 32 megabyte (2^25) or the maximum allocatable page order if that is
@@ -89,7 +78,6 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
89/* 78/*
90 * Common kmalloc functions provided by all allocators 79 * Common kmalloc functions provided by all allocators
91 */ 80 */
92void *__kmalloc(size_t, gfp_t);
93void *__kzalloc(size_t, gfp_t); 81void *__kzalloc(size_t, gfp_t);
94void * __must_check krealloc(const void *, size_t, gfp_t); 82void * __must_check krealloc(const void *, size_t, gfp_t);
95void kfree(const void *); 83void kfree(const void *);
@@ -100,40 +88,6 @@ size_t ksize(const void *);
100 * @n: number of elements. 88 * @n: number of elements.
101 * @size: element size. 89 * @size: element size.
102 * @flags: the type of memory to allocate. 90 * @flags: the type of memory to allocate.
103 */
104static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
105{
106 if (n != 0 && size > ULONG_MAX / n)
107 return NULL;
108 return __kzalloc(n * size, flags);
109}
110
111/*
112 * Allocator specific definitions. These are mainly used to establish optimized
113 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
114 * the appropriate general cache at compile time.
115 */
116
117#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
118#ifdef CONFIG_SLUB
119#include <linux/slub_def.h>
120#else
121#include <linux/slab_def.h>
122#endif /* !CONFIG_SLUB */
123#else
124
125/*
126 * Fallback definitions for an allocator not wanting to provide
127 * its own optimized kmalloc definitions (like SLOB).
128 */
129
130/**
131 * kmalloc - allocate memory
132 * @size: how many bytes of memory are required.
133 * @flags: the type of memory to allocate.
134 *
135 * kmalloc is the normal method of allocating memory
136 * in the kernel.
137 * 91 *
138 * The @flags argument may be one of: 92 * The @flags argument may be one of:
139 * 93 *
@@ -141,7 +95,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
141 * 95 *
142 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 96 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
143 * 97 *
144 * %GFP_ATOMIC - Allocation will not sleep. 98 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
145 * For example, use this inside interrupt handlers. 99 * For example, use this inside interrupt handlers.
146 * 100 *
147 * %GFP_HIGHUSER - Allocate pages from high memory. 101 * %GFP_HIGHUSER - Allocate pages from high memory.
@@ -150,18 +104,22 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
150 * 104 *
151 * %GFP_NOFS - Do not make any fs calls while trying to get memory. 105 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
152 * 106 *
107 * %GFP_NOWAIT - Allocation will not sleep.
108 *
109 * %GFP_THISNODE - Allocate node-local memory only.
110 *
111 * %GFP_DMA - Allocation suitable for DMA.
112 * Should only be used for kmalloc() caches. Otherwise, use a
113 * slab created with SLAB_DMA.
114 *
153 * Also it is possible to set different flags by OR'ing 115 * Also it is possible to set different flags by OR'ing
154 * in one or more of the following additional @flags: 116 * in one or more of the following additional @flags:
155 * 117 *
156 * %__GFP_COLD - Request cache-cold pages instead of 118 * %__GFP_COLD - Request cache-cold pages instead of
157 * trying to return cache-warm pages. 119 * trying to return cache-warm pages.
158 * 120 *
159 * %__GFP_DMA - Request memory from the DMA-capable zone.
160 *
161 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 121 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
162 * 122 *
163 * %__GFP_HIGHMEM - Allocated memory may be from highmem.
164 *
165 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 123 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
166 * (think twice before using). 124 * (think twice before using).
167 * 125 *
@@ -171,24 +129,57 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
171 * %__GFP_NOWARN - If allocation fails, don't issue any warnings. 129 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
172 * 130 *
173 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 131 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
132 *
133 * There are other flags available as well, but these are not intended
134 * for general use, and so are not documented here. For a full list of
135 * potential flags, always refer to linux/gfp.h.
174 */ 136 */
175static inline void *kmalloc(size_t size, gfp_t flags) 137static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
176{ 138{
177 return __kmalloc(size, flags); 139 if (n != 0 && size > ULONG_MAX / n)
140 return NULL;
141 return __kzalloc(n * size, flags);
178} 142}
179 143
180/** 144/*
181 * kzalloc - allocate memory. The memory is set to zero. 145 * Allocator specific definitions. These are mainly used to establish optimized
182 * @size: how many bytes of memory are required. 146 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
183 * @flags: the type of memory to allocate (see kmalloc). 147 * selecting the appropriate general cache at compile time.
148 *
149 * Allocators must define at least:
150 *
151 * kmem_cache_alloc()
152 * __kmalloc()
153 * kmalloc()
154 * kzalloc()
155 *
156 * Those wishing to support NUMA must also define:
157 *
158 * kmem_cache_alloc_node()
159 * kmalloc_node()
160 *
161 * See each allocator definition file for additional comments and
162 * implementation notes.
184 */ 163 */
185static inline void *kzalloc(size_t size, gfp_t flags) 164#ifdef CONFIG_SLUB
186{ 165#include <linux/slub_def.h>
187 return __kzalloc(size, flags); 166#elif defined(CONFIG_SLOB)
188} 167#include <linux/slob_def.h>
168#else
169#include <linux/slab_def.h>
189#endif 170#endif
190 171
191#ifndef CONFIG_NUMA 172#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
173/**
174 * kmalloc_node - allocate memory from a specific node
175 * @size: how many bytes of memory are required.
176 * @flags: the type of memory to allocate (see kcalloc).
177 * @node: node to allocate from.
178 *
179 * kmalloc() for non-local nodes, used to allocate from a specific node
180 * if available. Equivalent to kmalloc() in the non-NUMA single-node
181 * case.
182 */
192static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 183static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
193{ 184{
194 return kmalloc(size, flags); 185 return kmalloc(size, flags);
@@ -198,7 +189,15 @@ static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
198{ 189{
199 return __kmalloc(size, flags); 190 return __kmalloc(size, flags);
200} 191}
201#endif /* !CONFIG_NUMA */ 192
193void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
194
195static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
196 gfp_t flags, int node)
197{
198 return kmem_cache_alloc(cachep, flags);
199}
200#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
202 201
203/* 202/*
204 * kmalloc_track_caller is a special version of kmalloc that records the 203 * kmalloc_track_caller is a special version of kmalloc that records the
@@ -245,4 +244,3 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
245 244
246#endif /* __KERNEL__ */ 245#endif /* __KERNEL__ */
247#endif /* _LINUX_SLAB_H */ 246#endif /* _LINUX_SLAB_H */
248
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8d81a60518e4..365d036c454a 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -25,6 +25,9 @@ struct cache_sizes {
25}; 25};
26extern struct cache_sizes malloc_sizes[]; 26extern struct cache_sizes malloc_sizes[];
27 27
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags);
30
28static inline void *kmalloc(size_t size, gfp_t flags) 31static inline void *kmalloc(size_t size, gfp_t flags)
29{ 32{
30 if (__builtin_constant_p(size)) { 33 if (__builtin_constant_p(size)) {
@@ -79,6 +82,7 @@ found:
79 82
80#ifdef CONFIG_NUMA 83#ifdef CONFIG_NUMA
81extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 84extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
85extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
82 86
83static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 87static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
84{ 88{
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
new file mode 100644
index 000000000000..a2daf2d418a9
--- /dev/null
+++ b/include/linux/slob_def.h
@@ -0,0 +1,46 @@
1#ifndef __LINUX_SLOB_DEF_H
2#define __LINUX_SLOB_DEF_H
3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
7{
8 return kmem_cache_alloc_node(cachep, flags, -1);
9}
10
11void *__kmalloc_node(size_t size, gfp_t flags, int node);
12
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{
15 return __kmalloc_node(size, flags, node);
16}
17
18/**
19 * kmalloc - allocate memory
20 * @size: how many bytes of memory are required.
21 * @flags: the type of memory to allocate (see kcalloc).
22 *
23 * kmalloc is the normal method of allocating memory
24 * in the kernel.
25 */
26static inline void *kmalloc(size_t size, gfp_t flags)
27{
28 return __kmalloc_node(size, flags, -1);
29}
30
31static inline void *__kmalloc(size_t size, gfp_t flags)
32{
33 return kmalloc(size, flags);
34}
35
36/**
37 * kzalloc - allocate memory. The memory is set to zero.
38 * @size: how many bytes of memory are required.
39 * @flags: the type of memory to allocate (see kcalloc).
40 */
41static inline void *kzalloc(size_t size, gfp_t flags)
42{
43 return __kzalloc(size, flags);
44}
45
46#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 6207a3d8da71..a582f6771525 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -171,6 +171,9 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
171#define ZERO_SIZE_PTR ((void *)16) 171#define ZERO_SIZE_PTR ((void *)16)
172 172
173 173
174void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
175void *__kmalloc(size_t size, gfp_t flags);
176
174static inline void *kmalloc(size_t size, gfp_t flags) 177static inline void *kmalloc(size_t size, gfp_t flags)
175{ 178{
176 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { 179 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
@@ -198,7 +201,8 @@ static inline void *kzalloc(size_t size, gfp_t flags)
198} 201}
199 202
200#ifdef CONFIG_NUMA 203#ifdef CONFIG_NUMA
201extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 204void *__kmalloc_node(size_t size, gfp_t flags, int node);
205void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
202 206
203static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 207static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
204{ 208{
diff --git a/mm/slob.c b/mm/slob.c
index 06e5e725fab3..b99b0ef2347e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03 4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 * 5 *
6 * NUMA support by Paul Mundt, 2007.
7 *
6 * How SLOB works: 8 * How SLOB works:
7 * 9 *
8 * The core of SLOB is a traditional K&R style heap allocator, with 10 * The core of SLOB is a traditional K&R style heap allocator, with
@@ -10,7 +12,7 @@
10 * allocator is as little as 2 bytes, however typically most architectures 12 * allocator is as little as 2 bytes, however typically most architectures
11 * will require 4 bytes on 32-bit and 8 bytes on 64-bit. 13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
12 * 14 *
13 * The slob heap is a linked list of pages from __get_free_page, and 15 * The slob heap is a linked list of pages from alloc_pages(), and
14 * within each page, there is a singly-linked list of free blocks (slob_t). 16 * within each page, there is a singly-linked list of free blocks (slob_t).
15 * The heap is grown on demand and allocation from the heap is currently 17 * The heap is grown on demand and allocation from the heap is currently
16 * first-fit. 18 * first-fit.
@@ -18,7 +20,7 @@
18 * Above this is an implementation of kmalloc/kfree. Blocks returned 20 * Above this is an implementation of kmalloc/kfree. Blocks returned
19 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 21 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
20 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls 22 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
21 * __get_free_pages directly, allocating compound pages so the page order 23 * alloc_pages() directly, allocating compound pages so the page order
22 * does not have to be separately tracked, and also stores the exact 24 * does not have to be separately tracked, and also stores the exact
23 * allocation size in page->private so that it can be used to accurately 25 * allocation size in page->private so that it can be used to accurately
24 * provide ksize(). These objects are detected in kfree() because slob_page() 26 * provide ksize(). These objects are detected in kfree() because slob_page()
@@ -29,10 +31,23 @@
29 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which 31 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
30 * case the low-level allocator will fragment blocks to create the proper 32 * case the low-level allocator will fragment blocks to create the proper
31 * alignment. Again, objects of page-size or greater are allocated by 33 * alignment. Again, objects of page-size or greater are allocated by
32 * calling __get_free_pages. As SLAB objects know their size, no separate 34 * calling alloc_pages(). As SLAB objects know their size, no separate
33 * size bookkeeping is necessary and there is essentially no allocation 35 * size bookkeeping is necessary and there is essentially no allocation
34 * space overhead, and compound pages aren't needed for multi-page 36 * space overhead, and compound pages aren't needed for multi-page
35 * allocations. 37 * allocations.
38 *
39 * NUMA support in SLOB is fairly simplistic, pushing most of the real
40 * logic down to the page allocator, and simply doing the node accounting
41 * on the upper levels. In the event that a node id is explicitly
42 * provided, alloc_pages_node() with the specified node id is used
43 * instead. The common case (or when the node id isn't explicitly provided)
44 * will default to the current node, as per numa_node_id().
45 *
46 * Node aware pages are still inserted in to the global freelist, and
47 * these are scanned for by matching against the node id encoded in the
48 * page flags. As a result, block allocations that can be satisfied from
49 * the freelist will only be done so on pages residing on the same node,
50 * in order to prevent random node placement.
36 */ 51 */
37 52
38#include <linux/kernel.h> 53#include <linux/kernel.h>
@@ -204,6 +219,23 @@ static int slob_last(slob_t *s)
204 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 219 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
205} 220}
206 221
222static void *slob_new_page(gfp_t gfp, int order, int node)
223{
224 void *page;
225
226#ifdef CONFIG_NUMA
227 if (node != -1)
228 page = alloc_pages_node(node, gfp, order);
229 else
230#endif
231 page = alloc_pages(gfp, order);
232
233 if (!page)
234 return NULL;
235
236 return page_address(page);
237}
238
207/* 239/*
208 * Allocate a slob block within a given slob_page sp. 240 * Allocate a slob block within a given slob_page sp.
209 */ 241 */
@@ -258,7 +290,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
258/* 290/*
259 * slob_alloc: entry point into the slob allocator. 291 * slob_alloc: entry point into the slob allocator.
260 */ 292 */
261static void *slob_alloc(size_t size, gfp_t gfp, int align) 293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
262{ 294{
263 struct slob_page *sp; 295 struct slob_page *sp;
264 slob_t *b = NULL; 296 slob_t *b = NULL;
@@ -267,6 +299,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align)
267 spin_lock_irqsave(&slob_lock, flags); 299 spin_lock_irqsave(&slob_lock, flags);
268 /* Iterate through each partially free page, try to find room */ 300 /* Iterate through each partially free page, try to find room */
269 list_for_each_entry(sp, &free_slob_pages, list) { 301 list_for_each_entry(sp, &free_slob_pages, list) {
302#ifdef CONFIG_NUMA
303 /*
304 * If there's a node specification, search for a partial
305 * page with a matching node id in the freelist.
306 */
307 if (node != -1 && page_to_nid(&sp->page) != node)
308 continue;
309#endif
310
270 if (sp->units >= SLOB_UNITS(size)) { 311 if (sp->units >= SLOB_UNITS(size)) {
271 b = slob_page_alloc(sp, size, align); 312 b = slob_page_alloc(sp, size, align);
272 if (b) 313 if (b)
@@ -277,7 +318,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align)
277 318
278 /* Not enough space: must allocate a new page */ 319 /* Not enough space: must allocate a new page */
279 if (!b) { 320 if (!b) {
280 b = (slob_t *)__get_free_page(gfp); 321 b = slob_new_page(gfp, 0, node);
281 if (!b) 322 if (!b)
282 return 0; 323 return 0;
283 sp = (struct slob_page *)virt_to_page(b); 324 sp = (struct slob_page *)virt_to_page(b);
@@ -381,22 +422,20 @@ out:
381#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) 422#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
382#endif 423#endif
383 424
384 425void *__kmalloc_node(size_t size, gfp_t gfp, int node)
385void *__kmalloc(size_t size, gfp_t gfp)
386{ 426{
387 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 427 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
388 428
389 if (size < PAGE_SIZE - align) { 429 if (size < PAGE_SIZE - align) {
390 unsigned int *m; 430 unsigned int *m;
391 m = slob_alloc(size + align, gfp, align); 431 m = slob_alloc(size + align, gfp, align, node);
392 if (m) 432 if (m)
393 *m = size; 433 *m = size;
394 return (void *)m + align; 434 return (void *)m + align;
395 } else { 435 } else {
396 void *ret; 436 void *ret;
397 437
398 ret = (void *) __get_free_pages(gfp | __GFP_COMP, 438 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
399 get_order(size));
400 if (ret) { 439 if (ret) {
401 struct page *page; 440 struct page *page;
402 page = virt_to_page(ret); 441 page = virt_to_page(ret);
@@ -405,7 +444,7 @@ void *__kmalloc(size_t size, gfp_t gfp)
405 return ret; 444 return ret;
406 } 445 }
407} 446}
408EXPORT_SYMBOL(__kmalloc); 447EXPORT_SYMBOL(__kmalloc_node);
409 448
410/** 449/**
411 * krealloc - reallocate memory. The contents will remain unchanged. 450 * krealloc - reallocate memory. The contents will remain unchanged.
@@ -455,7 +494,6 @@ void kfree(const void *block)
455 } else 494 } else
456 put_page(&sp->page); 495 put_page(&sp->page);
457} 496}
458
459EXPORT_SYMBOL(kfree); 497EXPORT_SYMBOL(kfree);
460 498
461/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ 499/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
@@ -487,7 +525,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
487{ 525{
488 struct kmem_cache *c; 526 struct kmem_cache *c;
489 527
490 c = slob_alloc(sizeof(struct kmem_cache), flags, 0); 528 c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
491 529
492 if (c) { 530 if (c) {
493 c->name = name; 531 c->name = name;
@@ -517,21 +555,21 @@ void kmem_cache_destroy(struct kmem_cache *c)
517} 555}
518EXPORT_SYMBOL(kmem_cache_destroy); 556EXPORT_SYMBOL(kmem_cache_destroy);
519 557
520void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) 558void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
521{ 559{
522 void *b; 560 void *b;
523 561
524 if (c->size < PAGE_SIZE) 562 if (c->size < PAGE_SIZE)
525 b = slob_alloc(c->size, flags, c->align); 563 b = slob_alloc(c->size, flags, c->align, node);
526 else 564 else
527 b = (void *)__get_free_pages(flags, get_order(c->size)); 565 b = slob_new_page(flags, get_order(c->size), node);
528 566
529 if (c->ctor) 567 if (c->ctor)
530 c->ctor(b, c, 0); 568 c->ctor(b, c, 0);
531 569
532 return b; 570 return b;
533} 571}
534EXPORT_SYMBOL(kmem_cache_alloc); 572EXPORT_SYMBOL(kmem_cache_alloc_node);
535 573
536void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) 574void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
537{ 575{