diff options
Diffstat (limited to 'mm/slob.c')
| -rw-r--r-- | mm/slob.c | 45 |
1 files changed, 29 insertions, 16 deletions
| @@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium); | |||
| 126 | static LIST_HEAD(free_slob_large); | 126 | static LIST_HEAD(free_slob_large); |
| 127 | 127 | ||
| 128 | /* | 128 | /* |
| 129 | * slob_page: True for all slob pages (false for bigblock pages) | 129 | * is_slob_page: True for all slob pages (false for bigblock pages) |
| 130 | */ | 130 | */ |
| 131 | static inline int slob_page(struct slob_page *sp) | 131 | static inline int is_slob_page(struct slob_page *sp) |
| 132 | { | 132 | { |
| 133 | return PageSlobPage((struct page *)sp); | 133 | return PageSlobPage((struct page *)sp); |
| 134 | } | 134 | } |
| @@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp) | |||
| 143 | __ClearPageSlobPage((struct page *)sp); | 143 | __ClearPageSlobPage((struct page *)sp); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static inline struct slob_page *slob_page(const void *addr) | ||
| 147 | { | ||
| 148 | return (struct slob_page *)virt_to_page(addr); | ||
| 149 | } | ||
| 150 | |||
| 146 | /* | 151 | /* |
| 147 | * slob_page_free: true for pages on free_slob_pages list. | 152 | * slob_page_free: true for pages on free_slob_pages list. |
| 148 | */ | 153 | */ |
| @@ -230,7 +235,7 @@ static int slob_last(slob_t *s) | |||
| 230 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); | 235 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); |
| 231 | } | 236 | } |
| 232 | 237 | ||
| 233 | static void *slob_new_page(gfp_t gfp, int order, int node) | 238 | static void *slob_new_pages(gfp_t gfp, int order, int node) |
| 234 | { | 239 | { |
| 235 | void *page; | 240 | void *page; |
| 236 | 241 | ||
| @@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) | |||
| 247 | return page_address(page); | 252 | return page_address(page); |
| 248 | } | 253 | } |
| 249 | 254 | ||
| 255 | static void slob_free_pages(void *b, int order) | ||
| 256 | { | ||
| 257 | free_pages((unsigned long)b, order); | ||
| 258 | } | ||
| 259 | |||
| 250 | /* | 260 | /* |
| 251 | * Allocate a slob block within a given slob_page sp. | 261 | * Allocate a slob block within a given slob_page sp. |
| 252 | */ | 262 | */ |
| 253 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | 263 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) |
| 254 | { | 264 | { |
| 255 | slob_t *prev, *cur, *aligned = 0; | 265 | slob_t *prev, *cur, *aligned = NULL; |
| 256 | int delta = 0, units = SLOB_UNITS(size); | 266 | int delta = 0, units = SLOB_UNITS(size); |
| 257 | 267 | ||
| 258 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { | 268 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { |
| @@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
| 349 | 359 | ||
| 350 | /* Not enough space: must allocate a new page */ | 360 | /* Not enough space: must allocate a new page */ |
| 351 | if (!b) { | 361 | if (!b) { |
| 352 | b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); | 362 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
| 353 | if (!b) | 363 | if (!b) |
| 354 | return 0; | 364 | return NULL; |
| 355 | sp = (struct slob_page *)virt_to_page(b); | 365 | sp = slob_page(b); |
| 356 | set_slob_page(sp); | 366 | set_slob_page(sp); |
| 357 | 367 | ||
| 358 | spin_lock_irqsave(&slob_lock, flags); | 368 | spin_lock_irqsave(&slob_lock, flags); |
| @@ -384,7 +394,7 @@ static void slob_free(void *block, int size) | |||
| 384 | return; | 394 | return; |
| 385 | BUG_ON(!size); | 395 | BUG_ON(!size); |
| 386 | 396 | ||
| 387 | sp = (struct slob_page *)virt_to_page(block); | 397 | sp = slob_page(block); |
| 388 | units = SLOB_UNITS(size); | 398 | units = SLOB_UNITS(size); |
| 389 | 399 | ||
| 390 | spin_lock_irqsave(&slob_lock, flags); | 400 | spin_lock_irqsave(&slob_lock, flags); |
| @@ -393,10 +403,11 @@ static void slob_free(void *block, int size) | |||
| 393 | /* Go directly to page allocator. Do not pass slob allocator */ | 403 | /* Go directly to page allocator. Do not pass slob allocator */ |
| 394 | if (slob_page_free(sp)) | 404 | if (slob_page_free(sp)) |
| 395 | clear_slob_page_free(sp); | 405 | clear_slob_page_free(sp); |
| 406 | spin_unlock_irqrestore(&slob_lock, flags); | ||
| 396 | clear_slob_page(sp); | 407 | clear_slob_page(sp); |
| 397 | free_slob_page(sp); | 408 | free_slob_page(sp); |
| 398 | free_page((unsigned long)b); | 409 | free_page((unsigned long)b); |
| 399 | goto out; | 410 | return; |
| 400 | } | 411 | } |
| 401 | 412 | ||
| 402 | if (!slob_page_free(sp)) { | 413 | if (!slob_page_free(sp)) { |
| @@ -464,6 +475,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 464 | unsigned int *m; | 475 | unsigned int *m; |
| 465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 476 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 466 | 477 | ||
| 478 | lockdep_trace_alloc(gfp); | ||
| 479 | |||
| 467 | if (size < PAGE_SIZE - align) { | 480 | if (size < PAGE_SIZE - align) { |
| 468 | if (!size) | 481 | if (!size) |
| 469 | return ZERO_SIZE_PTR; | 482 | return ZERO_SIZE_PTR; |
| @@ -476,7 +489,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 476 | } else { | 489 | } else { |
| 477 | void *ret; | 490 | void *ret; |
| 478 | 491 | ||
| 479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 492 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); |
| 480 | if (ret) { | 493 | if (ret) { |
| 481 | struct page *page; | 494 | struct page *page; |
| 482 | page = virt_to_page(ret); | 495 | page = virt_to_page(ret); |
| @@ -494,8 +507,8 @@ void kfree(const void *block) | |||
| 494 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 507 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
| 495 | return; | 508 | return; |
| 496 | 509 | ||
| 497 | sp = (struct slob_page *)virt_to_page(block); | 510 | sp = slob_page(block); |
| 498 | if (slob_page(sp)) { | 511 | if (is_slob_page(sp)) { |
| 499 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 512 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 500 | unsigned int *m = (unsigned int *)(block - align); | 513 | unsigned int *m = (unsigned int *)(block - align); |
| 501 | slob_free(m, *m + align); | 514 | slob_free(m, *m + align); |
| @@ -513,8 +526,8 @@ size_t ksize(const void *block) | |||
| 513 | if (unlikely(block == ZERO_SIZE_PTR)) | 526 | if (unlikely(block == ZERO_SIZE_PTR)) |
| 514 | return 0; | 527 | return 0; |
| 515 | 528 | ||
| 516 | sp = (struct slob_page *)virt_to_page(block); | 529 | sp = slob_page(block); |
| 517 | if (slob_page(sp)) { | 530 | if (is_slob_page(sp)) { |
| 518 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 531 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 519 | unsigned int *m = (unsigned int *)(block - align); | 532 | unsigned int *m = (unsigned int *)(block - align); |
| 520 | return SLOB_UNITS(*m) * SLOB_UNIT; | 533 | return SLOB_UNITS(*m) * SLOB_UNIT; |
| @@ -573,7 +586,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 573 | if (c->size < PAGE_SIZE) | 586 | if (c->size < PAGE_SIZE) |
| 574 | b = slob_alloc(c->size, flags, c->align, node); | 587 | b = slob_alloc(c->size, flags, c->align, node); |
| 575 | else | 588 | else |
| 576 | b = slob_new_page(flags, get_order(c->size), node); | 589 | b = slob_new_pages(flags, get_order(c->size), node); |
| 577 | 590 | ||
| 578 | if (c->ctor) | 591 | if (c->ctor) |
| 579 | c->ctor(b); | 592 | c->ctor(b); |
| @@ -587,7 +600,7 @@ static void __kmem_cache_free(void *b, int size) | |||
| 587 | if (size < PAGE_SIZE) | 600 | if (size < PAGE_SIZE) |
| 588 | slob_free(b, size); | 601 | slob_free(b, size); |
| 589 | else | 602 | else |
| 590 | free_pages((unsigned long)b, get_order(size)); | 603 | slob_free_pages(b, get_order(size)); |
| 591 | } | 604 | } |
| 592 | 605 | ||
| 593 | static void kmem_rcu_free(struct rcu_head *head) | 606 | static void kmem_rcu_free(struct rcu_head *head) |
