diff options
author | Christoph Lameter <cl@linux.com> | 2012-06-13 11:24:54 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-06-14 02:19:52 -0400 |
commit | b5568280c9b9162b384be9d447013b74d682d4b3 (patch) | |
tree | d3b32a2279d30a7a373349a821a1949abf7bb59c /mm/slob.c | |
parent | 690d5777392180fdc05a82c0c7979e50e8d93de8 (diff) |
slob: Remove various small accessors
Those have become so simple that they are no longer needed.
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Acked-by: David Rientjes <rientjes@google.com>
signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 49 |
1 files changed, 9 insertions, 40 deletions
@@ -92,14 +92,6 @@ struct slob_block { | |||
92 | typedef struct slob_block slob_t; | 92 | typedef struct slob_block slob_t; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * free_slob_page: call before a slob_page is returned to the page allocator. | ||
96 | */ | ||
97 | static inline void free_slob_page(struct page *sp) | ||
98 | { | ||
99 | reset_page_mapcount(sp); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * All partially free slob pages go on these lists. | 95 | * All partially free slob pages go on these lists. |
104 | */ | 96 | */ |
105 | #define SLOB_BREAK1 256 | 97 | #define SLOB_BREAK1 256 |
@@ -109,29 +101,6 @@ static LIST_HEAD(free_slob_medium); | |||
109 | static LIST_HEAD(free_slob_large); | 101 | static LIST_HEAD(free_slob_large); |
110 | 102 | ||
111 | /* | 103 | /* |
112 | * is_slob_page: True for all slob pages (false for bigblock pages) | ||
113 | */ | ||
114 | static inline int is_slob_page(struct page *sp) | ||
115 | { | ||
116 | return PageSlab(sp); | ||
117 | } | ||
118 | |||
119 | static inline void set_slob_page(struct page *sp) | ||
120 | { | ||
121 | __SetPageSlab(sp); | ||
122 | } | ||
123 | |||
124 | static inline void clear_slob_page(struct page *sp) | ||
125 | { | ||
126 | __ClearPageSlab(sp); | ||
127 | } | ||
128 | |||
129 | static inline struct page *slob_page(const void *addr) | ||
130 | { | ||
131 | return virt_to_page(addr); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * slob_page_free: true for pages on free_slob_pages list. | 104 | * slob_page_free: true for pages on free_slob_pages list. |
136 | */ | 105 | */ |
137 | static inline int slob_page_free(struct page *sp) | 106 | static inline int slob_page_free(struct page *sp) |
@@ -347,8 +316,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
347 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); | 316 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
348 | if (!b) | 317 | if (!b) |
349 | return NULL; | 318 | return NULL; |
350 | sp = slob_page(b); | 319 | sp = virt_to_page(b); |
351 | set_slob_page(sp); | 320 | __SetPageSlab(sp); |
352 | 321 | ||
353 | spin_lock_irqsave(&slob_lock, flags); | 322 | spin_lock_irqsave(&slob_lock, flags); |
354 | sp->units = SLOB_UNITS(PAGE_SIZE); | 323 | sp->units = SLOB_UNITS(PAGE_SIZE); |
@@ -380,7 +349,7 @@ static void slob_free(void *block, int size) | |||
380 | return; | 349 | return; |
381 | BUG_ON(!size); | 350 | BUG_ON(!size); |
382 | 351 | ||
383 | sp = slob_page(block); | 352 | sp = virt_to_page(block); |
384 | units = SLOB_UNITS(size); | 353 | units = SLOB_UNITS(size); |
385 | 354 | ||
386 | spin_lock_irqsave(&slob_lock, flags); | 355 | spin_lock_irqsave(&slob_lock, flags); |
@@ -390,8 +359,8 @@ static void slob_free(void *block, int size) | |||
390 | if (slob_page_free(sp)) | 359 | if (slob_page_free(sp)) |
391 | clear_slob_page_free(sp); | 360 | clear_slob_page_free(sp); |
392 | spin_unlock_irqrestore(&slob_lock, flags); | 361 | spin_unlock_irqrestore(&slob_lock, flags); |
393 | clear_slob_page(sp); | 362 | __ClearPageSlab(sp); |
394 | free_slob_page(sp); | 363 | reset_page_mapcount(sp); |
395 | slob_free_pages(b, 0); | 364 | slob_free_pages(b, 0); |
396 | return; | 365 | return; |
397 | } | 366 | } |
@@ -508,8 +477,8 @@ void kfree(const void *block) | |||
508 | return; | 477 | return; |
509 | kmemleak_free(block); | 478 | kmemleak_free(block); |
510 | 479 | ||
511 | sp = slob_page(block); | 480 | sp = virt_to_page(block); |
512 | if (is_slob_page(sp)) { | 481 | if (PageSlab(sp)) { |
513 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 482 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
514 | unsigned int *m = (unsigned int *)(block - align); | 483 | unsigned int *m = (unsigned int *)(block - align); |
515 | slob_free(m, *m + align); | 484 | slob_free(m, *m + align); |
@@ -527,8 +496,8 @@ size_t ksize(const void *block) | |||
527 | if (unlikely(block == ZERO_SIZE_PTR)) | 496 | if (unlikely(block == ZERO_SIZE_PTR)) |
528 | return 0; | 497 | return 0; |
529 | 498 | ||
530 | sp = slob_page(block); | 499 | sp = virt_to_page(block); |
531 | if (is_slob_page(sp)) { | 500 | if (PageSlab(sp)) { |
532 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 501 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
533 | unsigned int *m = (unsigned int *)(block - align); | 502 | unsigned int *m = (unsigned int *)(block - align); |
534 | return SLOB_UNITS(*m) * SLOB_UNIT; | 503 | return SLOB_UNITS(*m) * SLOB_UNIT; |