diff options
author | Américo Wang <xiyou.wangcong@gmail.com> | 2009-01-18 13:00:38 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-01-19 03:10:22 -0500 |
commit | 6e9ed0cc4b963fde66ab47d9fb19147631e44555 (patch) | |
tree | e758725d3d6202f96268770c89f7d348c7d23763 /mm/slob.c | |
parent | a6525042bfdfcab128bd91fad264de10fd24a55e (diff) |
slob: clean up the code
- Use NULL instead of plain 0;
- Rename slob_page() to is_slob_page();
- Define slob_page() to convert void* to struct slob_page*;
- Rename slob_new_page() to slob_new_pages();
- Define slob_free_pages() accordingly.
Compile tests only.
Signed-off-by: WANG Cong <wangcong@zeuux.org>
Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 40 |
1 files changed, 25 insertions, 15 deletions
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium); | |||
126 | static LIST_HEAD(free_slob_large); | 126 | static LIST_HEAD(free_slob_large); |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * slob_page: True for all slob pages (false for bigblock pages) | 129 | * is_slob_page: True for all slob pages (false for bigblock pages) |
130 | */ | 130 | */ |
131 | static inline int slob_page(struct slob_page *sp) | 131 | static inline int is_slob_page(struct slob_page *sp) |
132 | { | 132 | { |
133 | return PageSlobPage((struct page *)sp); | 133 | return PageSlobPage((struct page *)sp); |
134 | } | 134 | } |
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp) | |||
143 | __ClearPageSlobPage((struct page *)sp); | 143 | __ClearPageSlobPage((struct page *)sp); |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline struct slob_page *slob_page(const void *addr) | ||
147 | { | ||
148 | return (struct slob_page *)virt_to_page(addr); | ||
149 | } | ||
150 | |||
146 | /* | 151 | /* |
147 | * slob_page_free: true for pages on free_slob_pages list. | 152 | * slob_page_free: true for pages on free_slob_pages list. |
148 | */ | 153 | */ |
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s) | |||
230 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); | 235 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); |
231 | } | 236 | } |
232 | 237 | ||
233 | static void *slob_new_page(gfp_t gfp, int order, int node) | 238 | static void *slob_new_pages(gfp_t gfp, int order, int node) |
234 | { | 239 | { |
235 | void *page; | 240 | void *page; |
236 | 241 | ||
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) | |||
247 | return page_address(page); | 252 | return page_address(page); |
248 | } | 253 | } |
249 | 254 | ||
255 | static void slob_free_pages(void *b, int order) | ||
256 | { | ||
257 | free_pages((unsigned long)b, order); | ||
258 | } | ||
259 | |||
250 | /* | 260 | /* |
251 | * Allocate a slob block within a given slob_page sp. | 261 | * Allocate a slob block within a given slob_page sp. |
252 | */ | 262 | */ |
253 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | 263 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) |
254 | { | 264 | { |
255 | slob_t *prev, *cur, *aligned = 0; | 265 | slob_t *prev, *cur, *aligned = NULL; |
256 | int delta = 0, units = SLOB_UNITS(size); | 266 | int delta = 0, units = SLOB_UNITS(size); |
257 | 267 | ||
258 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { | 268 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { |
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
349 | 359 | ||
350 | /* Not enough space: must allocate a new page */ | 360 | /* Not enough space: must allocate a new page */ |
351 | if (!b) { | 361 | if (!b) { |
352 | b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); | 362 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
353 | if (!b) | 363 | if (!b) |
354 | return 0; | 364 | return NULL; |
355 | sp = (struct slob_page *)virt_to_page(b); | 365 | sp = slob_page(b); |
356 | set_slob_page(sp); | 366 | set_slob_page(sp); |
357 | 367 | ||
358 | spin_lock_irqsave(&slob_lock, flags); | 368 | spin_lock_irqsave(&slob_lock, flags); |
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size) | |||
384 | return; | 394 | return; |
385 | BUG_ON(!size); | 395 | BUG_ON(!size); |
386 | 396 | ||
387 | sp = (struct slob_page *)virt_to_page(block); | 397 | sp = slob_page(block); |
388 | units = SLOB_UNITS(size); | 398 | units = SLOB_UNITS(size); |
389 | 399 | ||
390 | spin_lock_irqsave(&slob_lock, flags); | 400 | spin_lock_irqsave(&slob_lock, flags); |
@@ -476,7 +486,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
476 | } else { | 486 | } else { |
477 | void *ret; | 487 | void *ret; |
478 | 488 | ||
479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 489 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); |
480 | if (ret) { | 490 | if (ret) { |
481 | struct page *page; | 491 | struct page *page; |
482 | page = virt_to_page(ret); | 492 | page = virt_to_page(ret); |
@@ -494,8 +504,8 @@ void kfree(const void *block) | |||
494 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 504 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
495 | return; | 505 | return; |
496 | 506 | ||
497 | sp = (struct slob_page *)virt_to_page(block); | 507 | sp = slob_page(block); |
498 | if (slob_page(sp)) { | 508 | if (is_slob_page(sp)) { |
499 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 509 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
500 | unsigned int *m = (unsigned int *)(block - align); | 510 | unsigned int *m = (unsigned int *)(block - align); |
501 | slob_free(m, *m + align); | 511 | slob_free(m, *m + align); |
@@ -513,8 +523,8 @@ size_t ksize(const void *block) | |||
513 | if (unlikely(block == ZERO_SIZE_PTR)) | 523 | if (unlikely(block == ZERO_SIZE_PTR)) |
514 | return 0; | 524 | return 0; |
515 | 525 | ||
516 | sp = (struct slob_page *)virt_to_page(block); | 526 | sp = slob_page(block); |
517 | if (slob_page(sp)) { | 527 | if (is_slob_page(sp)) { |
518 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 528 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
519 | unsigned int *m = (unsigned int *)(block - align); | 529 | unsigned int *m = (unsigned int *)(block - align); |
520 | return SLOB_UNITS(*m) * SLOB_UNIT; | 530 | return SLOB_UNITS(*m) * SLOB_UNIT; |
@@ -572,7 +582,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
572 | if (c->size < PAGE_SIZE) | 582 | if (c->size < PAGE_SIZE) |
573 | b = slob_alloc(c->size, flags, c->align, node); | 583 | b = slob_alloc(c->size, flags, c->align, node); |
574 | else | 584 | else |
575 | b = slob_new_page(flags, get_order(c->size), node); | 585 | b = slob_new_pages(flags, get_order(c->size), node); |
576 | 586 | ||
577 | if (c->ctor) | 587 | if (c->ctor) |
578 | c->ctor(b); | 588 | c->ctor(b); |
@@ -586,7 +596,7 @@ static void __kmem_cache_free(void *b, int size) | |||
586 | if (size < PAGE_SIZE) | 596 | if (size < PAGE_SIZE) |
587 | slob_free(b, size); | 597 | slob_free(b, size); |
588 | else | 598 | else |
589 | free_pages((unsigned long)b, get_order(size)); | 599 | slob_free_pages(b, get_order(size)); |
590 | } | 600 | } |
591 | 601 | ||
592 | static void kmem_rcu_free(struct rcu_head *head) | 602 | static void kmem_rcu_free(struct rcu_head *head) |