aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index ba288b3877d1..c2f9e0a330ff 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -596,6 +596,18 @@ static inline struct slab *page_get_slab(struct page *page)
596 return (struct slab *)page->lru.prev; 596 return (struct slab *)page->lru.prev;
597} 597}
598 598
599static inline struct kmem_cache *virt_to_cache(const void *obj)
600{
601 struct page *page = virt_to_page(obj);
602 return page_get_cache(page);
603}
604
605static inline struct slab *virt_to_slab(const void *obj)
606{
607 struct page *page = virt_to_page(obj);
608 return page_get_slab(page);
609}
610
599/* These are the default caches for kmalloc. Custom caches can have other sizes. */ 611/* These are the default caches for kmalloc. Custom caches can have other sizes. */
600struct cache_sizes malloc_sizes[] = { 612struct cache_sizes malloc_sizes[] = {
601#define CACHE(x) { .cs_size = (x) }, 613#define CACHE(x) { .cs_size = (x) },
@@ -1437,7 +1449,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1437 /* Print some data about the neighboring objects, if they 1449 /* Print some data about the neighboring objects, if they
1438 * exist: 1450 * exist:
1439 */ 1451 */
1440 struct slab *slabp = page_get_slab(virt_to_page(objp)); 1452 struct slab *slabp = virt_to_slab(objp);
1441 int objnr; 1453 int objnr;
1442 1454
1443 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 1455 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
@@ -2767,7 +2779,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
2767 void *objp = objpp[i]; 2779 void *objp = objpp[i];
2768 struct slab *slabp; 2780 struct slab *slabp;
2769 2781
2770 slabp = page_get_slab(virt_to_page(objp)); 2782 slabp = virt_to_slab(objp);
2771 l3 = cachep->nodelists[node]; 2783 l3 = cachep->nodelists[node];
2772 list_del(&slabp->list); 2784 list_del(&slabp->list);
2773 check_spinlock_acquired_node(cachep, node); 2785 check_spinlock_acquired_node(cachep, node);
@@ -2867,7 +2879,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2867#ifdef CONFIG_NUMA 2879#ifdef CONFIG_NUMA
2868 { 2880 {
2869 struct slab *slabp; 2881 struct slab *slabp;
2870 slabp = page_get_slab(virt_to_page(objp)); 2882 slabp = virt_to_slab(objp);
2871 if (unlikely(slabp->nodeid != numa_node_id())) { 2883 if (unlikely(slabp->nodeid != numa_node_id())) {
2872 struct array_cache *alien = NULL; 2884 struct array_cache *alien = NULL;
2873 int nodeid = slabp->nodeid; 2885 int nodeid = slabp->nodeid;
@@ -3130,7 +3142,7 @@ void kfree(const void *objp)
3130 return; 3142 return;
3131 local_irq_save(flags); 3143 local_irq_save(flags);
3132 kfree_debugcheck(objp); 3144 kfree_debugcheck(objp);
3133 c = page_get_cache(virt_to_page(objp)); 3145 c = virt_to_cache(objp);
3134 mutex_debug_check_no_locks_freed(objp, obj_size(c)); 3146 mutex_debug_check_no_locks_freed(objp, obj_size(c));
3135 __cache_free(c, (void *)objp); 3147 __cache_free(c, (void *)objp);
3136 local_irq_restore(flags); 3148 local_irq_restore(flags);
@@ -3704,5 +3716,5 @@ unsigned int ksize(const void *objp)
3704 if (unlikely(objp == NULL)) 3716 if (unlikely(objp == NULL))
3705 return 0; 3717 return 0;
3706 3718
3707 return obj_size(page_get_cache(virt_to_page(objp))); 3719 return obj_size(virt_to_cache(objp));
3708} 3720}