diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 55 |
1 files changed, 33 insertions, 22 deletions
@@ -565,14 +565,29 @@ static void **dbg_userword(kmem_cache_t *cachep, void *objp) | |||
565 | #define BREAK_GFP_ORDER_LO 0 | 565 | #define BREAK_GFP_ORDER_LO 0 |
566 | static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; | 566 | static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; |
567 | 567 | ||
568 | /* Macros for storing/retrieving the cachep and or slab from the | 568 | /* Functions for storing/retrieving the cachep and or slab from the |
569 | * global 'mem_map'. These are used to find the slab an obj belongs to. | 569 | * global 'mem_map'. These are used to find the slab an obj belongs to. |
570 | * With kfree(), these are used to find the cache which an obj belongs to. | 570 | * With kfree(), these are used to find the cache which an obj belongs to. |
571 | */ | 571 | */ |
572 | #define SET_PAGE_CACHE(pg,x) ((pg)->lru.next = (struct list_head *)(x)) | 572 | static inline void page_set_cache(struct page *page, struct kmem_cache *cache) |
573 | #define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->lru.next) | 573 | { |
574 | #define SET_PAGE_SLAB(pg,x) ((pg)->lru.prev = (struct list_head *)(x)) | 574 | page->lru.next = (struct list_head *)cache; |
575 | #define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->lru.prev) | 575 | } |
576 | |||
577 | static inline struct kmem_cache *page_get_cache(struct page *page) | ||
578 | { | ||
579 | return (struct kmem_cache *)page->lru.next; | ||
580 | } | ||
581 | |||
582 | static inline void page_set_slab(struct page *page, struct slab *slab) | ||
583 | { | ||
584 | page->lru.prev = (struct list_head *)slab; | ||
585 | } | ||
586 | |||
587 | static inline struct slab *page_get_slab(struct page *page) | ||
588 | { | ||
589 | return (struct slab *)page->lru.prev; | ||
590 | } | ||
576 | 591 | ||
577 | /* These are the default caches for kmalloc. Custom caches can have other sizes. */ | 592 | /* These are the default caches for kmalloc. Custom caches can have other sizes. */ |
578 | struct cache_sizes malloc_sizes[] = { | 593 | struct cache_sizes malloc_sizes[] = { |
@@ -1190,11 +1205,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |||
1190 | int i; | 1205 | int i; |
1191 | 1206 | ||
1192 | flags |= cachep->gfpflags; | 1207 | flags |= cachep->gfpflags; |
1193 | if (likely(nodeid == -1)) { | 1208 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); |
1194 | page = alloc_pages(flags, cachep->gfporder); | ||
1195 | } else { | ||
1196 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | ||
1197 | } | ||
1198 | if (!page) | 1209 | if (!page) |
1199 | return NULL; | 1210 | return NULL; |
1200 | addr = page_address(page); | 1211 | addr = page_address(page); |
@@ -1368,7 +1379,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) | |||
1368 | /* Print some data about the neighboring objects, if they | 1379 | /* Print some data about the neighboring objects, if they |
1369 | * exist: | 1380 | * exist: |
1370 | */ | 1381 | */ |
1371 | struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp)); | 1382 | struct slab *slabp = page_get_slab(virt_to_page(objp)); |
1372 | int objnr; | 1383 | int objnr; |
1373 | 1384 | ||
1374 | objnr = (objp-slabp->s_mem)/cachep->objsize; | 1385 | objnr = (objp-slabp->s_mem)/cachep->objsize; |
@@ -2138,8 +2149,8 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) | |||
2138 | i = 1 << cachep->gfporder; | 2149 | i = 1 << cachep->gfporder; |
2139 | page = virt_to_page(objp); | 2150 | page = virt_to_page(objp); |
2140 | do { | 2151 | do { |
2141 | SET_PAGE_CACHE(page, cachep); | 2152 | page_set_cache(page, cachep); |
2142 | SET_PAGE_SLAB(page, slabp); | 2153 | page_set_slab(page, slabp); |
2143 | page++; | 2154 | page++; |
2144 | } while (--i); | 2155 | } while (--i); |
2145 | } | 2156 | } |
@@ -2269,14 +2280,14 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, | |||
2269 | kfree_debugcheck(objp); | 2280 | kfree_debugcheck(objp); |
2270 | page = virt_to_page(objp); | 2281 | page = virt_to_page(objp); |
2271 | 2282 | ||
2272 | if (GET_PAGE_CACHE(page) != cachep) { | 2283 | if (page_get_cache(page) != cachep) { |
2273 | printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", | 2284 | printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", |
2274 | GET_PAGE_CACHE(page),cachep); | 2285 | page_get_cache(page),cachep); |
2275 | printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); | 2286 | printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); |
2276 | printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name); | 2287 | printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name); |
2277 | WARN_ON(1); | 2288 | WARN_ON(1); |
2278 | } | 2289 | } |
2279 | slabp = GET_PAGE_SLAB(page); | 2290 | slabp = page_get_slab(page); |
2280 | 2291 | ||
2281 | if (cachep->flags & SLAB_RED_ZONE) { | 2292 | if (cachep->flags & SLAB_RED_ZONE) { |
2282 | if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { | 2293 | if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { |
@@ -2628,7 +2639,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n | |||
2628 | struct slab *slabp; | 2639 | struct slab *slabp; |
2629 | unsigned int objnr; | 2640 | unsigned int objnr; |
2630 | 2641 | ||
2631 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); | 2642 | slabp = page_get_slab(virt_to_page(objp)); |
2632 | l3 = cachep->nodelists[node]; | 2643 | l3 = cachep->nodelists[node]; |
2633 | list_del(&slabp->list); | 2644 | list_del(&slabp->list); |
2634 | objnr = (objp - slabp->s_mem) / cachep->objsize; | 2645 | objnr = (objp - slabp->s_mem) / cachep->objsize; |
@@ -2744,7 +2755,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
2744 | #ifdef CONFIG_NUMA | 2755 | #ifdef CONFIG_NUMA |
2745 | { | 2756 | { |
2746 | struct slab *slabp; | 2757 | struct slab *slabp; |
2747 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); | 2758 | slabp = page_get_slab(virt_to_page(objp)); |
2748 | if (unlikely(slabp->nodeid != numa_node_id())) { | 2759 | if (unlikely(slabp->nodeid != numa_node_id())) { |
2749 | struct array_cache *alien = NULL; | 2760 | struct array_cache *alien = NULL; |
2750 | int nodeid = slabp->nodeid; | 2761 | int nodeid = slabp->nodeid; |
@@ -2830,7 +2841,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) | |||
2830 | page = virt_to_page(ptr); | 2841 | page = virt_to_page(ptr); |
2831 | if (unlikely(!PageSlab(page))) | 2842 | if (unlikely(!PageSlab(page))) |
2832 | goto out; | 2843 | goto out; |
2833 | if (unlikely(GET_PAGE_CACHE(page) != cachep)) | 2844 | if (unlikely(page_get_cache(page) != cachep)) |
2834 | goto out; | 2845 | goto out; |
2835 | return 1; | 2846 | return 1; |
2836 | out: | 2847 | out: |
@@ -3026,7 +3037,7 @@ void kfree(const void *objp) | |||
3026 | return; | 3037 | return; |
3027 | local_irq_save(flags); | 3038 | local_irq_save(flags); |
3028 | kfree_debugcheck(objp); | 3039 | kfree_debugcheck(objp); |
3029 | c = GET_PAGE_CACHE(virt_to_page(objp)); | 3040 | c = page_get_cache(virt_to_page(objp)); |
3030 | __cache_free(c, (void*)objp); | 3041 | __cache_free(c, (void*)objp); |
3031 | local_irq_restore(flags); | 3042 | local_irq_restore(flags); |
3032 | } | 3043 | } |
@@ -3596,7 +3607,7 @@ unsigned int ksize(const void *objp) | |||
3596 | if (unlikely(objp == NULL)) | 3607 | if (unlikely(objp == NULL)) |
3597 | return 0; | 3608 | return 0; |
3598 | 3609 | ||
3599 | return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp))); | 3610 | return obj_reallen(page_get_cache(virt_to_page(objp))); |
3600 | } | 3611 | } |
3601 | 3612 | ||
3602 | 3613 | ||