diff options
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 152 |
1 files changed, 43 insertions, 109 deletions
@@ -59,6 +59,8 @@ | |||
59 | 59 | ||
60 | #include <linux/kernel.h> | 60 | #include <linux/kernel.h> |
61 | #include <linux/slab.h> | 61 | #include <linux/slab.h> |
62 | #include "slab.h" | ||
63 | |||
62 | #include <linux/mm.h> | 64 | #include <linux/mm.h> |
63 | #include <linux/swap.h> /* struct reclaim_state */ | 65 | #include <linux/swap.h> /* struct reclaim_state */ |
64 | #include <linux/cache.h> | 66 | #include <linux/cache.h> |
@@ -92,36 +94,6 @@ struct slob_block { | |||
92 | typedef struct slob_block slob_t; | 94 | typedef struct slob_block slob_t; |
93 | 95 | ||
94 | /* | 96 | /* |
95 | * We use struct page fields to manage some slob allocation aspects, | ||
96 | * however to avoid the horrible mess in include/linux/mm_types.h, we'll | ||
97 | * just define our own struct page type variant here. | ||
98 | */ | ||
99 | struct slob_page { | ||
100 | union { | ||
101 | struct { | ||
102 | unsigned long flags; /* mandatory */ | ||
103 | atomic_t _count; /* mandatory */ | ||
104 | slobidx_t units; /* free units left in page */ | ||
105 | unsigned long pad[2]; | ||
106 | slob_t *free; /* first free slob_t in page */ | ||
107 | struct list_head list; /* linked list of free pages */ | ||
108 | }; | ||
109 | struct page page; | ||
110 | }; | ||
111 | }; | ||
112 | static inline void struct_slob_page_wrong_size(void) | ||
113 | { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } | ||
114 | |||
115 | /* | ||
116 | * free_slob_page: call before a slob_page is returned to the page allocator. | ||
117 | */ | ||
118 | static inline void free_slob_page(struct slob_page *sp) | ||
119 | { | ||
120 | reset_page_mapcount(&sp->page); | ||
121 | sp->page.mapping = NULL; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * All partially free slob pages go on these lists. | 97 | * All partially free slob pages go on these lists. |
126 | */ | 98 | */ |
127 | #define SLOB_BREAK1 256 | 99 | #define SLOB_BREAK1 256 |
@@ -131,46 +103,23 @@ static LIST_HEAD(free_slob_medium); | |||
131 | static LIST_HEAD(free_slob_large); | 103 | static LIST_HEAD(free_slob_large); |
132 | 104 | ||
133 | /* | 105 | /* |
134 | * is_slob_page: True for all slob pages (false for bigblock pages) | ||
135 | */ | ||
136 | static inline int is_slob_page(struct slob_page *sp) | ||
137 | { | ||
138 | return PageSlab((struct page *)sp); | ||
139 | } | ||
140 | |||
141 | static inline void set_slob_page(struct slob_page *sp) | ||
142 | { | ||
143 | __SetPageSlab((struct page *)sp); | ||
144 | } | ||
145 | |||
146 | static inline void clear_slob_page(struct slob_page *sp) | ||
147 | { | ||
148 | __ClearPageSlab((struct page *)sp); | ||
149 | } | ||
150 | |||
151 | static inline struct slob_page *slob_page(const void *addr) | ||
152 | { | ||
153 | return (struct slob_page *)virt_to_page(addr); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * slob_page_free: true for pages on free_slob_pages list. | 106 | * slob_page_free: true for pages on free_slob_pages list. |
158 | */ | 107 | */ |
159 | static inline int slob_page_free(struct slob_page *sp) | 108 | static inline int slob_page_free(struct page *sp) |
160 | { | 109 | { |
161 | return PageSlobFree((struct page *)sp); | 110 | return PageSlobFree(sp); |
162 | } | 111 | } |
163 | 112 | ||
164 | static void set_slob_page_free(struct slob_page *sp, struct list_head *list) | 113 | static void set_slob_page_free(struct page *sp, struct list_head *list) |
165 | { | 114 | { |
166 | list_add(&sp->list, list); | 115 | list_add(&sp->list, list); |
167 | __SetPageSlobFree((struct page *)sp); | 116 | __SetPageSlobFree(sp); |
168 | } | 117 | } |
169 | 118 | ||
170 | static inline void clear_slob_page_free(struct slob_page *sp) | 119 | static inline void clear_slob_page_free(struct page *sp) |
171 | { | 120 | { |
172 | list_del(&sp->list); | 121 | list_del(&sp->list); |
173 | __ClearPageSlobFree((struct page *)sp); | 122 | __ClearPageSlobFree(sp); |
174 | } | 123 | } |
175 | 124 | ||
176 | #define SLOB_UNIT sizeof(slob_t) | 125 | #define SLOB_UNIT sizeof(slob_t) |
@@ -267,12 +216,12 @@ static void slob_free_pages(void *b, int order) | |||
267 | /* | 216 | /* |
268 | * Allocate a slob block within a given slob_page sp. | 217 | * Allocate a slob block within a given slob_page sp. |
269 | */ | 218 | */ |
270 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | 219 | static void *slob_page_alloc(struct page *sp, size_t size, int align) |
271 | { | 220 | { |
272 | slob_t *prev, *cur, *aligned = NULL; | 221 | slob_t *prev, *cur, *aligned = NULL; |
273 | int delta = 0, units = SLOB_UNITS(size); | 222 | int delta = 0, units = SLOB_UNITS(size); |
274 | 223 | ||
275 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { | 224 | for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { |
276 | slobidx_t avail = slob_units(cur); | 225 | slobidx_t avail = slob_units(cur); |
277 | 226 | ||
278 | if (align) { | 227 | if (align) { |
@@ -296,12 +245,12 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | |||
296 | if (prev) | 245 | if (prev) |
297 | set_slob(prev, slob_units(prev), next); | 246 | set_slob(prev, slob_units(prev), next); |
298 | else | 247 | else |
299 | sp->free = next; | 248 | sp->freelist = next; |
300 | } else { /* fragment */ | 249 | } else { /* fragment */ |
301 | if (prev) | 250 | if (prev) |
302 | set_slob(prev, slob_units(prev), cur + units); | 251 | set_slob(prev, slob_units(prev), cur + units); |
303 | else | 252 | else |
304 | sp->free = cur + units; | 253 | sp->freelist = cur + units; |
305 | set_slob(cur + units, avail - units, next); | 254 | set_slob(cur + units, avail - units, next); |
306 | } | 255 | } |
307 | 256 | ||
@@ -320,7 +269,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | |||
320 | */ | 269 | */ |
321 | static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | 270 | static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) |
322 | { | 271 | { |
323 | struct slob_page *sp; | 272 | struct page *sp; |
324 | struct list_head *prev; | 273 | struct list_head *prev; |
325 | struct list_head *slob_list; | 274 | struct list_head *slob_list; |
326 | slob_t *b = NULL; | 275 | slob_t *b = NULL; |
@@ -341,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
341 | * If there's a node specification, search for a partial | 290 | * If there's a node specification, search for a partial |
342 | * page with a matching node id in the freelist. | 291 | * page with a matching node id in the freelist. |
343 | */ | 292 | */ |
344 | if (node != -1 && page_to_nid(&sp->page) != node) | 293 | if (node != -1 && page_to_nid(sp) != node) |
345 | continue; | 294 | continue; |
346 | #endif | 295 | #endif |
347 | /* Enough room on this page? */ | 296 | /* Enough room on this page? */ |
@@ -369,12 +318,12 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
369 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); | 318 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
370 | if (!b) | 319 | if (!b) |
371 | return NULL; | 320 | return NULL; |
372 | sp = slob_page(b); | 321 | sp = virt_to_page(b); |
373 | set_slob_page(sp); | 322 | __SetPageSlab(sp); |
374 | 323 | ||
375 | spin_lock_irqsave(&slob_lock, flags); | 324 | spin_lock_irqsave(&slob_lock, flags); |
376 | sp->units = SLOB_UNITS(PAGE_SIZE); | 325 | sp->units = SLOB_UNITS(PAGE_SIZE); |
377 | sp->free = b; | 326 | sp->freelist = b; |
378 | INIT_LIST_HEAD(&sp->list); | 327 | INIT_LIST_HEAD(&sp->list); |
379 | set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); | 328 | set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); |
380 | set_slob_page_free(sp, slob_list); | 329 | set_slob_page_free(sp, slob_list); |
@@ -392,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
392 | */ | 341 | */ |
393 | static void slob_free(void *block, int size) | 342 | static void slob_free(void *block, int size) |
394 | { | 343 | { |
395 | struct slob_page *sp; | 344 | struct page *sp; |
396 | slob_t *prev, *next, *b = (slob_t *)block; | 345 | slob_t *prev, *next, *b = (slob_t *)block; |
397 | slobidx_t units; | 346 | slobidx_t units; |
398 | unsigned long flags; | 347 | unsigned long flags; |
@@ -402,7 +351,7 @@ static void slob_free(void *block, int size) | |||
402 | return; | 351 | return; |
403 | BUG_ON(!size); | 352 | BUG_ON(!size); |
404 | 353 | ||
405 | sp = slob_page(block); | 354 | sp = virt_to_page(block); |
406 | units = SLOB_UNITS(size); | 355 | units = SLOB_UNITS(size); |
407 | 356 | ||
408 | spin_lock_irqsave(&slob_lock, flags); | 357 | spin_lock_irqsave(&slob_lock, flags); |
@@ -412,8 +361,8 @@ static void slob_free(void *block, int size) | |||
412 | if (slob_page_free(sp)) | 361 | if (slob_page_free(sp)) |
413 | clear_slob_page_free(sp); | 362 | clear_slob_page_free(sp); |
414 | spin_unlock_irqrestore(&slob_lock, flags); | 363 | spin_unlock_irqrestore(&slob_lock, flags); |
415 | clear_slob_page(sp); | 364 | __ClearPageSlab(sp); |
416 | free_slob_page(sp); | 365 | reset_page_mapcount(sp); |
417 | slob_free_pages(b, 0); | 366 | slob_free_pages(b, 0); |
418 | return; | 367 | return; |
419 | } | 368 | } |
@@ -421,7 +370,7 @@ static void slob_free(void *block, int size) | |||
421 | if (!slob_page_free(sp)) { | 370 | if (!slob_page_free(sp)) { |
422 | /* This slob page is about to become partially free. Easy! */ | 371 | /* This slob page is about to become partially free. Easy! */ |
423 | sp->units = units; | 372 | sp->units = units; |
424 | sp->free = b; | 373 | sp->freelist = b; |
425 | set_slob(b, units, | 374 | set_slob(b, units, |
426 | (void *)((unsigned long)(b + | 375 | (void *)((unsigned long)(b + |
427 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); | 376 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); |
@@ -441,15 +390,15 @@ static void slob_free(void *block, int size) | |||
441 | */ | 390 | */ |
442 | sp->units += units; | 391 | sp->units += units; |
443 | 392 | ||
444 | if (b < sp->free) { | 393 | if (b < (slob_t *)sp->freelist) { |
445 | if (b + units == sp->free) { | 394 | if (b + units == sp->freelist) { |
446 | units += slob_units(sp->free); | 395 | units += slob_units(sp->freelist); |
447 | sp->free = slob_next(sp->free); | 396 | sp->freelist = slob_next(sp->freelist); |
448 | } | 397 | } |
449 | set_slob(b, units, sp->free); | 398 | set_slob(b, units, sp->freelist); |
450 | sp->free = b; | 399 | sp->freelist = b; |
451 | } else { | 400 | } else { |
452 | prev = sp->free; | 401 | prev = sp->freelist; |
453 | next = slob_next(prev); | 402 | next = slob_next(prev); |
454 | while (b > next) { | 403 | while (b > next) { |
455 | prev = next; | 404 | prev = next; |
@@ -522,7 +471,7 @@ EXPORT_SYMBOL(__kmalloc_node); | |||
522 | 471 | ||
523 | void kfree(const void *block) | 472 | void kfree(const void *block) |
524 | { | 473 | { |
525 | struct slob_page *sp; | 474 | struct page *sp; |
526 | 475 | ||
527 | trace_kfree(_RET_IP_, block); | 476 | trace_kfree(_RET_IP_, block); |
528 | 477 | ||
@@ -530,43 +479,36 @@ void kfree(const void *block) | |||
530 | return; | 479 | return; |
531 | kmemleak_free(block); | 480 | kmemleak_free(block); |
532 | 481 | ||
533 | sp = slob_page(block); | 482 | sp = virt_to_page(block); |
534 | if (is_slob_page(sp)) { | 483 | if (PageSlab(sp)) { |
535 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 484 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
536 | unsigned int *m = (unsigned int *)(block - align); | 485 | unsigned int *m = (unsigned int *)(block - align); |
537 | slob_free(m, *m + align); | 486 | slob_free(m, *m + align); |
538 | } else | 487 | } else |
539 | put_page(&sp->page); | 488 | put_page(sp); |
540 | } | 489 | } |
541 | EXPORT_SYMBOL(kfree); | 490 | EXPORT_SYMBOL(kfree); |
542 | 491 | ||
543 | /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ | 492 | /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ |
544 | size_t ksize(const void *block) | 493 | size_t ksize(const void *block) |
545 | { | 494 | { |
546 | struct slob_page *sp; | 495 | struct page *sp; |
547 | 496 | ||
548 | BUG_ON(!block); | 497 | BUG_ON(!block); |
549 | if (unlikely(block == ZERO_SIZE_PTR)) | 498 | if (unlikely(block == ZERO_SIZE_PTR)) |
550 | return 0; | 499 | return 0; |
551 | 500 | ||
552 | sp = slob_page(block); | 501 | sp = virt_to_page(block); |
553 | if (is_slob_page(sp)) { | 502 | if (PageSlab(sp)) { |
554 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 503 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
555 | unsigned int *m = (unsigned int *)(block - align); | 504 | unsigned int *m = (unsigned int *)(block - align); |
556 | return SLOB_UNITS(*m) * SLOB_UNIT; | 505 | return SLOB_UNITS(*m) * SLOB_UNIT; |
557 | } else | 506 | } else |
558 | return sp->page.private; | 507 | return sp->private; |
559 | } | 508 | } |
560 | EXPORT_SYMBOL(ksize); | 509 | EXPORT_SYMBOL(ksize); |
561 | 510 | ||
562 | struct kmem_cache { | 511 | struct kmem_cache *__kmem_cache_create(const char *name, size_t size, |
563 | unsigned int size, align; | ||
564 | unsigned long flags; | ||
565 | const char *name; | ||
566 | void (*ctor)(void *); | ||
567 | }; | ||
568 | |||
569 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | ||
570 | size_t align, unsigned long flags, void (*ctor)(void *)) | 512 | size_t align, unsigned long flags, void (*ctor)(void *)) |
571 | { | 513 | { |
572 | struct kmem_cache *c; | 514 | struct kmem_cache *c; |
@@ -589,13 +531,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
589 | c->align = ARCH_SLAB_MINALIGN; | 531 | c->align = ARCH_SLAB_MINALIGN; |
590 | if (c->align < align) | 532 | if (c->align < align) |
591 | c->align = align; | 533 | c->align = align; |
592 | } else if (flags & SLAB_PANIC) | ||
593 | panic("Cannot create slab cache %s\n", name); | ||
594 | 534 | ||
595 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | 535 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); |
536 | c->refcount = 1; | ||
537 | } | ||
596 | return c; | 538 | return c; |
597 | } | 539 | } |
598 | EXPORT_SYMBOL(kmem_cache_create); | ||
599 | 540 | ||
600 | void kmem_cache_destroy(struct kmem_cache *c) | 541 | void kmem_cache_destroy(struct kmem_cache *c) |
601 | { | 542 | { |
@@ -678,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d) | |||
678 | } | 619 | } |
679 | EXPORT_SYMBOL(kmem_cache_shrink); | 620 | EXPORT_SYMBOL(kmem_cache_shrink); |
680 | 621 | ||
681 | static unsigned int slob_ready __read_mostly; | ||
682 | |||
683 | int slab_is_available(void) | ||
684 | { | ||
685 | return slob_ready; | ||
686 | } | ||
687 | |||
688 | void __init kmem_cache_init(void) | 622 | void __init kmem_cache_init(void) |
689 | { | 623 | { |
690 | slob_ready = 1; | 624 | slab_state = UP; |
691 | } | 625 | } |
692 | 626 | ||
693 | void __init kmem_cache_init_late(void) | 627 | void __init kmem_cache_init_late(void) |
694 | { | 628 | { |
695 | /* Nothing to do */ | 629 | slab_state = FULL; |
696 | } | 630 | } |