aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c49
1 files changed, 32 insertions, 17 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 8a73dcfc6a27..15d25ae5b686 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -565,14 +565,29 @@ static void **dbg_userword(kmem_cache_t *cachep, void *objp)
565#define BREAK_GFP_ORDER_LO 0 565#define BREAK_GFP_ORDER_LO 0
566static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 566static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
567 567
568/* Macros for storing/retrieving the cachep and or slab from the 568/* Functions for storing/retrieving the cachep and or slab from the
569 * global 'mem_map'. These are used to find the slab an obj belongs to. 569 * global 'mem_map'. These are used to find the slab an obj belongs to.
570 * With kfree(), these are used to find the cache which an obj belongs to. 570 * With kfree(), these are used to find the cache which an obj belongs to.
571 */ 571 */
572#define SET_PAGE_CACHE(pg,x) ((pg)->lru.next = (struct list_head *)(x)) 572static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
573#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->lru.next) 573{
574#define SET_PAGE_SLAB(pg,x) ((pg)->lru.prev = (struct list_head *)(x)) 574 page->lru.next = (struct list_head *)cache;
575#define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->lru.prev) 575}
576
577static inline struct kmem_cache *page_get_cache(struct page *page)
578{
579 return (struct kmem_cache *)page->lru.next;
580}
581
582static inline void page_set_slab(struct page *page, struct slab *slab)
583{
584 page->lru.prev = (struct list_head *)slab;
585}
586
587static inline struct slab *page_get_slab(struct page *page)
588{
589 return (struct slab *)page->lru.prev;
590}
576 591
577/* These are the default caches for kmalloc. Custom caches can have other sizes. */ 592/* These are the default caches for kmalloc. Custom caches can have other sizes. */
578struct cache_sizes malloc_sizes[] = { 593struct cache_sizes malloc_sizes[] = {
@@ -1368,7 +1383,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1368 /* Print some data about the neighboring objects, if they 1383 /* Print some data about the neighboring objects, if they
1369 * exist: 1384 * exist:
1370 */ 1385 */
1371 struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp)); 1386 struct slab *slabp = page_get_slab(virt_to_page(objp));
1372 int objnr; 1387 int objnr;
1373 1388
1374 objnr = (objp-slabp->s_mem)/cachep->objsize; 1389 objnr = (objp-slabp->s_mem)/cachep->objsize;
@@ -2138,8 +2153,8 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
2138 i = 1 << cachep->gfporder; 2153 i = 1 << cachep->gfporder;
2139 page = virt_to_page(objp); 2154 page = virt_to_page(objp);
2140 do { 2155 do {
2141 SET_PAGE_CACHE(page, cachep); 2156 page_set_cache(page, cachep);
2142 SET_PAGE_SLAB(page, slabp); 2157 page_set_slab(page, slabp);
2143 page++; 2158 page++;
2144 } while (--i); 2159 } while (--i);
2145} 2160}
@@ -2269,14 +2284,14 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
2269 kfree_debugcheck(objp); 2284 kfree_debugcheck(objp);
2270 page = virt_to_page(objp); 2285 page = virt_to_page(objp);
2271 2286
2272 if (GET_PAGE_CACHE(page) != cachep) { 2287 if (page_get_cache(page) != cachep) {
2273 printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", 2288 printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
2274 GET_PAGE_CACHE(page),cachep); 2289 page_get_cache(page),cachep);
2275 printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); 2290 printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
2276 printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name); 2291 printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
2277 WARN_ON(1); 2292 WARN_ON(1);
2278 } 2293 }
2279 slabp = GET_PAGE_SLAB(page); 2294 slabp = page_get_slab(page);
2280 2295
2281 if (cachep->flags & SLAB_RED_ZONE) { 2296 if (cachep->flags & SLAB_RED_ZONE) {
2282 if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { 2297 if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
@@ -2628,7 +2643,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
2628 struct slab *slabp; 2643 struct slab *slabp;
2629 unsigned int objnr; 2644 unsigned int objnr;
2630 2645
2631 slabp = GET_PAGE_SLAB(virt_to_page(objp)); 2646 slabp = page_get_slab(virt_to_page(objp));
2632 l3 = cachep->nodelists[node]; 2647 l3 = cachep->nodelists[node];
2633 list_del(&slabp->list); 2648 list_del(&slabp->list);
2634 objnr = (objp - slabp->s_mem) / cachep->objsize; 2649 objnr = (objp - slabp->s_mem) / cachep->objsize;
@@ -2744,7 +2759,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2744#ifdef CONFIG_NUMA 2759#ifdef CONFIG_NUMA
2745 { 2760 {
2746 struct slab *slabp; 2761 struct slab *slabp;
2747 slabp = GET_PAGE_SLAB(virt_to_page(objp)); 2762 slabp = page_get_slab(virt_to_page(objp));
2748 if (unlikely(slabp->nodeid != numa_node_id())) { 2763 if (unlikely(slabp->nodeid != numa_node_id())) {
2749 struct array_cache *alien = NULL; 2764 struct array_cache *alien = NULL;
2750 int nodeid = slabp->nodeid; 2765 int nodeid = slabp->nodeid;
@@ -2830,7 +2845,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
2830 page = virt_to_page(ptr); 2845 page = virt_to_page(ptr);
2831 if (unlikely(!PageSlab(page))) 2846 if (unlikely(!PageSlab(page)))
2832 goto out; 2847 goto out;
2833 if (unlikely(GET_PAGE_CACHE(page) != cachep)) 2848 if (unlikely(page_get_cache(page) != cachep))
2834 goto out; 2849 goto out;
2835 return 1; 2850 return 1;
2836out: 2851out:
@@ -3026,7 +3041,7 @@ void kfree(const void *objp)
3026 return; 3041 return;
3027 local_irq_save(flags); 3042 local_irq_save(flags);
3028 kfree_debugcheck(objp); 3043 kfree_debugcheck(objp);
3029 c = GET_PAGE_CACHE(virt_to_page(objp)); 3044 c = page_get_cache(virt_to_page(objp));
3030 __cache_free(c, (void*)objp); 3045 __cache_free(c, (void*)objp);
3031 local_irq_restore(flags); 3046 local_irq_restore(flags);
3032} 3047}
@@ -3596,7 +3611,7 @@ unsigned int ksize(const void *objp)
3596 if (unlikely(objp == NULL)) 3611 if (unlikely(objp == NULL))
3597 return 0; 3612 return 0;
3598 3613
3599 return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp))); 3614 return obj_reallen(page_get_cache(virt_to_page(objp)));
3600} 3615}
3601 3616
3602 3617