aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-06-13 11:24:56 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-14 02:20:05 -0400
commit350260889b251821e770573dfd65cd851b4ef781 (patch)
tree298a87d2d54f2f3e99138f1798d242d4e7149657 /mm
parente571b0ad3495be5793e54e21cd244c4545c49d88 (diff)
slab: Remove some accessors
Those are rather trivial now and its better to see inline what is really going on. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c35
1 files changed, 8 insertions, 27 deletions
diff --git a/mm/slab.c b/mm/slab.c
index af05147d7abd..28a8f7d29d4a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -489,16 +489,6 @@ EXPORT_SYMBOL(slab_buffer_size);
489static int slab_max_order = SLAB_MAX_ORDER_LO; 489static int slab_max_order = SLAB_MAX_ORDER_LO;
490static bool slab_max_order_set __initdata; 490static bool slab_max_order_set __initdata;
491 491
492/*
493 * Functions for storing/retrieving the cachep and or slab from the page
494 * allocator. These are used to find the slab an obj belongs to. With kfree(),
495 * these are used to find the cache which an obj belongs to.
496 */
497static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
498{
499 page->slab_cache = cache;
500}
501
502static inline struct kmem_cache *page_get_cache(struct page *page) 492static inline struct kmem_cache *page_get_cache(struct page *page)
503{ 493{
504 page = compound_head(page); 494 page = compound_head(page);
@@ -506,27 +496,18 @@ static inline struct kmem_cache *page_get_cache(struct page *page)
506 return page->slab_cache; 496 return page->slab_cache;
507} 497}
508 498
509static inline void page_set_slab(struct page *page, struct slab *slab)
510{
511 page->slab_page = slab;
512}
513
514static inline struct slab *page_get_slab(struct page *page)
515{
516 BUG_ON(!PageSlab(page));
517 return page->slab_page;
518}
519
520static inline struct kmem_cache *virt_to_cache(const void *obj) 499static inline struct kmem_cache *virt_to_cache(const void *obj)
521{ 500{
522 struct page *page = virt_to_head_page(obj); 501 struct page *page = virt_to_head_page(obj);
523 return page_get_cache(page); 502 return page->slab_cache;
524} 503}
525 504
526static inline struct slab *virt_to_slab(const void *obj) 505static inline struct slab *virt_to_slab(const void *obj)
527{ 506{
528 struct page *page = virt_to_head_page(obj); 507 struct page *page = virt_to_head_page(obj);
529 return page_get_slab(page); 508
509 VM_BUG_ON(!PageSlab(page));
510 return page->slab_page;
530} 511}
531 512
532static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 513static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
@@ -2918,8 +2899,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2918 nr_pages <<= cache->gfporder; 2899 nr_pages <<= cache->gfporder;
2919 2900
2920 do { 2901 do {
2921 page_set_cache(page, cache); 2902 page->slab_cache = cache;
2922 page_set_slab(page, slab); 2903 page->slab_page = slab;
2923 page++; 2904 page++;
2924 } while (--nr_pages); 2905 } while (--nr_pages);
2925} 2906}
@@ -3057,7 +3038,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
3057 kfree_debugcheck(objp); 3038 kfree_debugcheck(objp);
3058 page = virt_to_head_page(objp); 3039 page = virt_to_head_page(objp);
3059 3040
3060 slabp = page_get_slab(page); 3041 slabp = page->slab_page;
3061 3042
3062 if (cachep->flags & SLAB_RED_ZONE) { 3043 if (cachep->flags & SLAB_RED_ZONE) {
3063 verify_redzone_free(cachep, objp); 3044 verify_redzone_free(cachep, objp);
@@ -3261,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3261 struct slab *slabp; 3242 struct slab *slabp;
3262 unsigned objnr; 3243 unsigned objnr;
3263 3244
3264 slabp = page_get_slab(virt_to_head_page(objp)); 3245 slabp = virt_to_head_page(objp)->slab_page;
3265 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3246 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3266 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3247 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3267 } 3248 }