diff options
-rw-r--r-- | mm/slob.c | 422 |
1 files changed, 325 insertions, 97 deletions
@@ -7,53 +7,148 @@ | |||
7 | * | 7 | * |
8 | * The core of SLOB is a traditional K&R style heap allocator, with | 8 | * The core of SLOB is a traditional K&R style heap allocator, with |
9 | * support for returning aligned objects. The granularity of this | 9 | * support for returning aligned objects. The granularity of this |
10 | * allocator is 8 bytes on x86, though it's perhaps possible to reduce | 10 | * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it |
11 | * this to 4 if it's deemed worth the effort. The slob heap is a | 11 | * could be as low as 2 if the compiler alignment requirements allow. |
12 | * singly-linked list of pages from __get_free_page, grown on demand | 12 | * |
13 | * and allocation from the heap is currently first-fit. | 13 | * The slob heap is a linked list of pages from __get_free_page, and |
14 | * within each page, there is a singly-linked list of free blocks (slob_t). | ||
15 | * The heap is grown on demand and allocation from the heap is currently | ||
16 | * first-fit. | ||
14 | * | 17 | * |
15 | * Above this is an implementation of kmalloc/kfree. Blocks returned | 18 | * Above this is an implementation of kmalloc/kfree. Blocks returned |
16 | * from kmalloc are 8-byte aligned and prepended with a 8-byte header. | 19 | * from kmalloc are 4-byte aligned and prepended with a 4-byte header. |
17 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls | 20 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
18 | * __get_free_pages directly so that it can return page-aligned blocks | 21 | * __get_free_pages directly so that it can return page-aligned blocks |
19 | * and keeps a linked list of such pages and their orders. These | 22 | * and keeps a linked list of such pages and their orders. These |
20 | * objects are detected in kfree() by their page alignment. | 23 | * objects are detected in kfree() by their page alignment. |
21 | * | 24 | * |
22 | * SLAB is emulated on top of SLOB by simply calling constructors and | 25 | * SLAB is emulated on top of SLOB by simply calling constructors and |
23 | * destructors for every SLAB allocation. Objects are returned with | 26 | * destructors for every SLAB allocation. Objects are returned with the |
24 | * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is | 27 | * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which |
25 | * set, in which case the low-level allocator will fragment blocks to | 28 | * case the low-level allocator will fragment blocks to create the proper |
26 | * create the proper alignment. Again, objects of page-size or greater | 29 | * alignment. Again, objects of page-size or greater are allocated by |
27 | * are allocated by calling __get_free_pages. As SLAB objects know | 30 | * calling __get_free_pages. As SLAB objects know their size, no separate |
28 | * their size, no separate size bookkeeping is necessary and there is | 31 | * size bookkeeping is necessary and there is essentially no allocation |
29 | * essentially no allocation space overhead. | 32 | * space overhead. |
30 | */ | 33 | */ |
31 | 34 | ||
35 | #include <linux/kernel.h> | ||
32 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
33 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
34 | #include <linux/cache.h> | 38 | #include <linux/cache.h> |
35 | #include <linux/init.h> | 39 | #include <linux/init.h> |
36 | #include <linux/module.h> | 40 | #include <linux/module.h> |
37 | #include <linux/timer.h> | ||
38 | #include <linux/rcupdate.h> | 41 | #include <linux/rcupdate.h> |
42 | #include <linux/list.h> | ||
43 | #include <asm/atomic.h> | ||
44 | |||
45 | /* SLOB_MIN_ALIGN == sizeof(long) */ | ||
46 | #if BITS_PER_BYTE == 32 | ||
47 | #define SLOB_MIN_ALIGN 4 | ||
48 | #else | ||
49 | #define SLOB_MIN_ALIGN 8 | ||
50 | #endif | ||
39 | 51 | ||
52 | /* | ||
53 | * slob_block has a field 'units', which indicates size of block if +ve, | ||
54 | * or offset of next block if -ve (in SLOB_UNITs). | ||
55 | * | ||
56 | * Free blocks of size 1 unit simply contain the offset of the next block. | ||
57 | * Those with larger size contain their size in the first SLOB_UNIT of | ||
58 | * memory, and the offset of the next free block in the second SLOB_UNIT. | ||
59 | */ | ||
60 | #if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN) | ||
61 | typedef s16 slobidx_t; | ||
62 | #else | ||
63 | typedef s32 slobidx_t; | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Align struct slob_block to long for now, but can some embedded | ||
68 | * architectures get away with less? | ||
69 | */ | ||
40 | struct slob_block { | 70 | struct slob_block { |
41 | int units; | 71 | slobidx_t units; |
42 | struct slob_block *next; | 72 | } __attribute__((aligned(SLOB_MIN_ALIGN))); |
43 | }; | ||
44 | typedef struct slob_block slob_t; | 73 | typedef struct slob_block slob_t; |
45 | 74 | ||
75 | /* | ||
76 | * We use struct page fields to manage some slob allocation aspects, | ||
77 | * however to avoid the horrible mess in include/linux/mm_types.h, we'll | ||
78 | * just define our own struct page type variant here. | ||
79 | */ | ||
80 | struct slob_page { | ||
81 | union { | ||
82 | struct { | ||
83 | unsigned long flags; /* mandatory */ | ||
84 | atomic_t _count; /* mandatory */ | ||
85 | slobidx_t units; /* free units left in page */ | ||
86 | unsigned long pad[2]; | ||
87 | slob_t *free; /* first free slob_t in page */ | ||
88 | struct list_head list; /* linked list of free pages */ | ||
89 | }; | ||
90 | struct page page; | ||
91 | }; | ||
92 | }; | ||
93 | static inline void struct_slob_page_wrong_size(void) | ||
94 | { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } | ||
95 | |||
96 | /* | ||
97 | * free_slob_page: call before a slob_page is returned to the page allocator. | ||
98 | */ | ||
99 | static inline void free_slob_page(struct slob_page *sp) | ||
100 | { | ||
101 | reset_page_mapcount(&sp->page); | ||
102 | sp->page.mapping = NULL; | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * All (partially) free slob pages go on this list. | ||
107 | */ | ||
108 | static LIST_HEAD(free_slob_pages); | ||
109 | |||
110 | /* | ||
111 | * slob_page: True for all slob pages (false for bigblock pages) | ||
112 | */ | ||
113 | static inline int slob_page(struct slob_page *sp) | ||
114 | { | ||
115 | return test_bit(PG_active, &sp->flags); | ||
116 | } | ||
117 | |||
118 | static inline void set_slob_page(struct slob_page *sp) | ||
119 | { | ||
120 | __set_bit(PG_active, &sp->flags); | ||
121 | } | ||
122 | |||
123 | static inline void clear_slob_page(struct slob_page *sp) | ||
124 | { | ||
125 | __clear_bit(PG_active, &sp->flags); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * slob_page_free: true for pages on free_slob_pages list. | ||
130 | */ | ||
131 | static inline int slob_page_free(struct slob_page *sp) | ||
132 | { | ||
133 | return test_bit(PG_private, &sp->flags); | ||
134 | } | ||
135 | |||
136 | static inline void set_slob_page_free(struct slob_page *sp) | ||
137 | { | ||
138 | list_add(&sp->list, &free_slob_pages); | ||
139 | __set_bit(PG_private, &sp->flags); | ||
140 | } | ||
141 | |||
142 | static inline void clear_slob_page_free(struct slob_page *sp) | ||
143 | { | ||
144 | list_del(&sp->list); | ||
145 | __clear_bit(PG_private, &sp->flags); | ||
146 | } | ||
147 | |||
46 | #define SLOB_UNIT sizeof(slob_t) | 148 | #define SLOB_UNIT sizeof(slob_t) |
47 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) | 149 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) |
48 | #define SLOB_ALIGN L1_CACHE_BYTES | 150 | #define SLOB_ALIGN L1_CACHE_BYTES |
49 | 151 | ||
50 | struct bigblock { | ||
51 | int order; | ||
52 | void *pages; | ||
53 | struct bigblock *next; | ||
54 | }; | ||
55 | typedef struct bigblock bigblock_t; | ||
56 | |||
57 | /* | 152 | /* |
58 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which | 153 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which |
59 | * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free | 154 | * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free |
@@ -64,103 +159,240 @@ struct slob_rcu { | |||
64 | int size; | 159 | int size; |
65 | }; | 160 | }; |
66 | 161 | ||
67 | static slob_t arena = { .next = &arena, .units = 1 }; | 162 | /* |
68 | static slob_t *slobfree = &arena; | 163 | * slob_lock protects all slob allocator structures. |
69 | static bigblock_t *bigblocks; | 164 | */ |
70 | static DEFINE_SPINLOCK(slob_lock); | 165 | static DEFINE_SPINLOCK(slob_lock); |
71 | static DEFINE_SPINLOCK(block_lock); | ||
72 | 166 | ||
73 | static void slob_free(void *b, int size); | 167 | /* |
74 | static void slob_timer_cbk(void); | 168 | * Encode the given size and next info into a free slob block s. |
169 | */ | ||
170 | static void set_slob(slob_t *s, slobidx_t size, slob_t *next) | ||
171 | { | ||
172 | slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); | ||
173 | slobidx_t offset = next - base; | ||
75 | 174 | ||
175 | if (size > 1) { | ||
176 | s[0].units = size; | ||
177 | s[1].units = offset; | ||
178 | } else | ||
179 | s[0].units = -offset; | ||
180 | } | ||
76 | 181 | ||
77 | static void *slob_alloc(size_t size, gfp_t gfp, int align) | 182 | /* |
183 | * Return the size of a slob block. | ||
184 | */ | ||
185 | static slobidx_t slob_units(slob_t *s) | ||
186 | { | ||
187 | if (s->units > 0) | ||
188 | return s->units; | ||
189 | return 1; | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Return the next free slob block pointer after this one. | ||
194 | */ | ||
195 | static slob_t *slob_next(slob_t *s) | ||
196 | { | ||
197 | slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); | ||
198 | slobidx_t next; | ||
199 | |||
200 | if (s[0].units < 0) | ||
201 | next = -s[0].units; | ||
202 | else | ||
203 | next = s[1].units; | ||
204 | return base+next; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Returns true if s is the last free block in its page. | ||
209 | */ | ||
210 | static int slob_last(slob_t *s) | ||
211 | { | ||
212 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Allocate a slob block within a given slob_page sp. | ||
217 | */ | ||
218 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | ||
78 | { | 219 | { |
79 | slob_t *prev, *cur, *aligned = 0; | 220 | slob_t *prev, *cur, *aligned = 0; |
80 | int delta = 0, units = SLOB_UNITS(size); | 221 | int delta = 0, units = SLOB_UNITS(size); |
81 | unsigned long flags; | ||
82 | 222 | ||
83 | spin_lock_irqsave(&slob_lock, flags); | 223 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { |
84 | prev = slobfree; | 224 | slobidx_t avail = slob_units(cur); |
85 | for (cur = prev->next; ; prev = cur, cur = cur->next) { | 225 | |
86 | if (align) { | 226 | if (align) { |
87 | aligned = (slob_t *)ALIGN((unsigned long)cur, align); | 227 | aligned = (slob_t *)ALIGN((unsigned long)cur, align); |
88 | delta = aligned - cur; | 228 | delta = aligned - cur; |
89 | } | 229 | } |
90 | if (cur->units >= units + delta) { /* room enough? */ | 230 | if (avail >= units + delta) { /* room enough? */ |
231 | slob_t *next; | ||
232 | |||
91 | if (delta) { /* need to fragment head to align? */ | 233 | if (delta) { /* need to fragment head to align? */ |
92 | aligned->units = cur->units - delta; | 234 | next = slob_next(cur); |
93 | aligned->next = cur->next; | 235 | set_slob(aligned, avail - delta, next); |
94 | cur->next = aligned; | 236 | set_slob(cur, delta, aligned); |
95 | cur->units = delta; | ||
96 | prev = cur; | 237 | prev = cur; |
97 | cur = aligned; | 238 | cur = aligned; |
239 | avail = slob_units(cur); | ||
98 | } | 240 | } |
99 | 241 | ||
100 | if (cur->units == units) /* exact fit? */ | 242 | next = slob_next(cur); |
101 | prev->next = cur->next; /* unlink */ | 243 | if (avail == units) { /* exact fit? unlink. */ |
102 | else { /* fragment */ | 244 | if (prev) |
103 | prev->next = cur + units; | 245 | set_slob(prev, slob_units(prev), next); |
104 | prev->next->units = cur->units - units; | 246 | else |
105 | prev->next->next = cur->next; | 247 | sp->free = next; |
106 | cur->units = units; | 248 | } else { /* fragment */ |
249 | if (prev) | ||
250 | set_slob(prev, slob_units(prev), cur + units); | ||
251 | else | ||
252 | sp->free = cur + units; | ||
253 | set_slob(cur + units, avail - units, next); | ||
107 | } | 254 | } |
108 | 255 | ||
109 | slobfree = prev; | 256 | sp->units -= units; |
110 | spin_unlock_irqrestore(&slob_lock, flags); | 257 | if (!sp->units) |
258 | clear_slob_page_free(sp); | ||
111 | return cur; | 259 | return cur; |
112 | } | 260 | } |
113 | if (cur == slobfree) { | 261 | if (slob_last(cur)) |
114 | spin_unlock_irqrestore(&slob_lock, flags); | 262 | return NULL; |
115 | 263 | } | |
116 | if (size == PAGE_SIZE) /* trying to shrink arena? */ | 264 | } |
117 | return 0; | ||
118 | 265 | ||
119 | cur = (slob_t *)__get_free_page(gfp); | 266 | /* |
120 | if (!cur) | 267 | * slob_alloc: entry point into the slob allocator. |
121 | return 0; | 268 | */ |
269 | static void *slob_alloc(size_t size, gfp_t gfp, int align) | ||
270 | { | ||
271 | struct slob_page *sp; | ||
272 | slob_t *b = NULL; | ||
273 | unsigned long flags; | ||
122 | 274 | ||
123 | slob_free(cur, PAGE_SIZE); | 275 | spin_lock_irqsave(&slob_lock, flags); |
124 | spin_lock_irqsave(&slob_lock, flags); | 276 | /* Iterate through each partially free page, try to find room */ |
125 | cur = slobfree; | 277 | list_for_each_entry(sp, &free_slob_pages, list) { |
278 | if (sp->units >= SLOB_UNITS(size)) { | ||
279 | b = slob_page_alloc(sp, size, align); | ||
280 | if (b) | ||
281 | break; | ||
126 | } | 282 | } |
127 | } | 283 | } |
284 | spin_unlock_irqrestore(&slob_lock, flags); | ||
285 | |||
286 | /* Not enough space: must allocate a new page */ | ||
287 | if (!b) { | ||
288 | b = (slob_t *)__get_free_page(gfp); | ||
289 | if (!b) | ||
290 | return 0; | ||
291 | sp = (struct slob_page *)virt_to_page(b); | ||
292 | set_slob_page(sp); | ||
293 | |||
294 | spin_lock_irqsave(&slob_lock, flags); | ||
295 | sp->units = SLOB_UNITS(PAGE_SIZE); | ||
296 | sp->free = b; | ||
297 | INIT_LIST_HEAD(&sp->list); | ||
298 | set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); | ||
299 | set_slob_page_free(sp); | ||
300 | b = slob_page_alloc(sp, size, align); | ||
301 | BUG_ON(!b); | ||
302 | spin_unlock_irqrestore(&slob_lock, flags); | ||
303 | } | ||
304 | return b; | ||
128 | } | 305 | } |
129 | 306 | ||
307 | /* | ||
308 | * slob_free: entry point into the slob allocator. | ||
309 | */ | ||
130 | static void slob_free(void *block, int size) | 310 | static void slob_free(void *block, int size) |
131 | { | 311 | { |
132 | slob_t *cur, *b = (slob_t *)block; | 312 | struct slob_page *sp; |
313 | slob_t *prev, *next, *b = (slob_t *)block; | ||
314 | slobidx_t units; | ||
133 | unsigned long flags; | 315 | unsigned long flags; |
134 | 316 | ||
135 | if (!block) | 317 | if (!block) |
136 | return; | 318 | return; |
319 | BUG_ON(!size); | ||
137 | 320 | ||
138 | if (size) | 321 | sp = (struct slob_page *)virt_to_page(block); |
139 | b->units = SLOB_UNITS(size); | 322 | units = SLOB_UNITS(size); |
140 | 323 | ||
141 | /* Find reinsertion point */ | ||
142 | spin_lock_irqsave(&slob_lock, flags); | 324 | spin_lock_irqsave(&slob_lock, flags); |
143 | for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next) | ||
144 | if (cur >= cur->next && (b > cur || b < cur->next)) | ||
145 | break; | ||
146 | 325 | ||
147 | if (b + b->units == cur->next) { | 326 | if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { |
148 | b->units += cur->next->units; | 327 | /* Go directly to page allocator. Do not pass slob allocator */ |
149 | b->next = cur->next->next; | 328 | if (slob_page_free(sp)) |
150 | } else | 329 | clear_slob_page_free(sp); |
151 | b->next = cur->next; | 330 | clear_slob_page(sp); |
331 | free_slob_page(sp); | ||
332 | free_page((unsigned long)b); | ||
333 | goto out; | ||
334 | } | ||
152 | 335 | ||
153 | if (cur + cur->units == b) { | 336 | if (!slob_page_free(sp)) { |
154 | cur->units += b->units; | 337 | /* This slob page is about to become partially free. Easy! */ |
155 | cur->next = b->next; | 338 | sp->units = units; |
156 | } else | 339 | sp->free = b; |
157 | cur->next = b; | 340 | set_slob(b, units, |
341 | (void *)((unsigned long)(b + | ||
342 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); | ||
343 | set_slob_page_free(sp); | ||
344 | goto out; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Otherwise the page is already partially free, so find reinsertion | ||
349 | * point. | ||
350 | */ | ||
351 | sp->units += units; | ||
158 | 352 | ||
159 | slobfree = cur; | 353 | if (b < sp->free) { |
354 | set_slob(b, units, sp->free); | ||
355 | sp->free = b; | ||
356 | } else { | ||
357 | prev = sp->free; | ||
358 | next = slob_next(prev); | ||
359 | while (b > next) { | ||
360 | prev = next; | ||
361 | next = slob_next(prev); | ||
362 | } | ||
160 | 363 | ||
364 | if (!slob_last(prev) && b + units == next) { | ||
365 | units += slob_units(next); | ||
366 | set_slob(b, units, slob_next(next)); | ||
367 | } else | ||
368 | set_slob(b, units, next); | ||
369 | |||
370 | if (prev + slob_units(prev) == b) { | ||
371 | units = slob_units(b) + slob_units(prev); | ||
372 | set_slob(prev, units, slob_next(b)); | ||
373 | } else | ||
374 | set_slob(prev, slob_units(prev), b); | ||
375 | } | ||
376 | out: | ||
161 | spin_unlock_irqrestore(&slob_lock, flags); | 377 | spin_unlock_irqrestore(&slob_lock, flags); |
162 | } | 378 | } |
163 | 379 | ||
380 | /* | ||
381 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. | ||
382 | */ | ||
383 | |||
384 | struct bigblock { | ||
385 | int order; | ||
386 | void *pages; | ||
387 | struct bigblock *next; | ||
388 | }; | ||
389 | typedef struct bigblock bigblock_t; | ||
390 | |||
391 | static bigblock_t *bigblocks; | ||
392 | |||
393 | static DEFINE_SPINLOCK(block_lock); | ||
394 | |||
395 | |||
164 | void *__kmalloc(size_t size, gfp_t gfp) | 396 | void *__kmalloc(size_t size, gfp_t gfp) |
165 | { | 397 | { |
166 | slob_t *m; | 398 | slob_t *m; |
@@ -169,7 +401,9 @@ void *__kmalloc(size_t size, gfp_t gfp) | |||
169 | 401 | ||
170 | if (size < PAGE_SIZE - SLOB_UNIT) { | 402 | if (size < PAGE_SIZE - SLOB_UNIT) { |
171 | m = slob_alloc(size + SLOB_UNIT, gfp, 0); | 403 | m = slob_alloc(size + SLOB_UNIT, gfp, 0); |
172 | return m ? (void *)(m + 1) : 0; | 404 | if (m) |
405 | m->units = size; | ||
406 | return m+1; | ||
173 | } | 407 | } |
174 | 408 | ||
175 | bb = slob_alloc(sizeof(bigblock_t), gfp, 0); | 409 | bb = slob_alloc(sizeof(bigblock_t), gfp, 0); |
@@ -227,14 +461,17 @@ EXPORT_SYMBOL(krealloc); | |||
227 | 461 | ||
228 | void kfree(const void *block) | 462 | void kfree(const void *block) |
229 | { | 463 | { |
464 | struct slob_page *sp; | ||
465 | slob_t *m; | ||
230 | bigblock_t *bb, **last = &bigblocks; | 466 | bigblock_t *bb, **last = &bigblocks; |
231 | unsigned long flags; | 467 | unsigned long flags; |
232 | 468 | ||
233 | if (!block) | 469 | if (!block) |
234 | return; | 470 | return; |
235 | 471 | ||
236 | if (!((unsigned long)block & (PAGE_SIZE-1))) { | 472 | sp = (struct slob_page *)virt_to_page(block); |
237 | /* might be on the big block list */ | 473 | if (!slob_page(sp)) { |
474 | /* on the big block list */ | ||
238 | spin_lock_irqsave(&block_lock, flags); | 475 | spin_lock_irqsave(&block_lock, flags); |
239 | for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) { | 476 | for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) { |
240 | if (bb->pages == block) { | 477 | if (bb->pages == block) { |
@@ -246,9 +483,12 @@ void kfree(const void *block) | |||
246 | } | 483 | } |
247 | } | 484 | } |
248 | spin_unlock_irqrestore(&block_lock, flags); | 485 | spin_unlock_irqrestore(&block_lock, flags); |
486 | WARN_ON(1); | ||
487 | return; | ||
249 | } | 488 | } |
250 | 489 | ||
251 | slob_free((slob_t *)block - 1, 0); | 490 | m = (slob_t *)block - 1; |
491 | slob_free(m, m->units + SLOB_UNIT); | ||
252 | return; | 492 | return; |
253 | } | 493 | } |
254 | 494 | ||
@@ -256,13 +496,15 @@ EXPORT_SYMBOL(kfree); | |||
256 | 496 | ||
257 | size_t ksize(const void *block) | 497 | size_t ksize(const void *block) |
258 | { | 498 | { |
499 | struct slob_page *sp; | ||
259 | bigblock_t *bb; | 500 | bigblock_t *bb; |
260 | unsigned long flags; | 501 | unsigned long flags; |
261 | 502 | ||
262 | if (!block) | 503 | if (!block) |
263 | return 0; | 504 | return 0; |
264 | 505 | ||
265 | if (!((unsigned long)block & (PAGE_SIZE-1))) { | 506 | sp = (struct slob_page *)virt_to_page(block); |
507 | if (!slob_page(sp)) { | ||
266 | spin_lock_irqsave(&block_lock, flags); | 508 | spin_lock_irqsave(&block_lock, flags); |
267 | for (bb = bigblocks; bb; bb = bb->next) | 509 | for (bb = bigblocks; bb; bb = bb->next) |
268 | if (bb->pages == block) { | 510 | if (bb->pages == block) { |
@@ -272,7 +514,7 @@ size_t ksize(const void *block) | |||
272 | spin_unlock_irqrestore(&block_lock, flags); | 514 | spin_unlock_irqrestore(&block_lock, flags); |
273 | } | 515 | } |
274 | 516 | ||
275 | return ((slob_t *)block - 1)->units * SLOB_UNIT; | 517 | return ((slob_t *)block - 1)->units + SLOB_UNIT; |
276 | } | 518 | } |
277 | 519 | ||
278 | struct kmem_cache { | 520 | struct kmem_cache { |
@@ -385,9 +627,6 @@ const char *kmem_cache_name(struct kmem_cache *c) | |||
385 | } | 627 | } |
386 | EXPORT_SYMBOL(kmem_cache_name); | 628 | EXPORT_SYMBOL(kmem_cache_name); |
387 | 629 | ||
388 | static struct timer_list slob_timer = TIMER_INITIALIZER( | ||
389 | (void (*)(unsigned long))slob_timer_cbk, 0, 0); | ||
390 | |||
391 | int kmem_cache_shrink(struct kmem_cache *d) | 630 | int kmem_cache_shrink(struct kmem_cache *d) |
392 | { | 631 | { |
393 | return 0; | 632 | return 0; |
@@ -401,15 +640,4 @@ int kmem_ptr_validate(struct kmem_cache *a, const void *b) | |||
401 | 640 | ||
402 | void __init kmem_cache_init(void) | 641 | void __init kmem_cache_init(void) |
403 | { | 642 | { |
404 | slob_timer_cbk(); | ||
405 | } | ||
406 | |||
407 | static void slob_timer_cbk(void) | ||
408 | { | ||
409 | void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1); | ||
410 | |||
411 | if (p) | ||
412 | free_page((unsigned long)p); | ||
413 | |||
414 | mod_timer(&slob_timer, jiffies + HZ); | ||
415 | } | 643 | } |