diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 33 |
1 files changed, 16 insertions, 17 deletions
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
374 | static void set_track(struct kmem_cache *s, void *object, | 374 | static void set_track(struct kmem_cache *s, void *object, |
375 | enum track_item alloc, unsigned long addr) | 375 | enum track_item alloc, unsigned long addr) |
376 | { | 376 | { |
377 | struct track *p; | 377 | struct track *p = get_track(s, object, alloc); |
378 | 378 | ||
379 | if (s->offset) | ||
380 | p = object + s->offset + sizeof(void *); | ||
381 | else | ||
382 | p = object + s->inuse; | ||
383 | |||
384 | p += alloc; | ||
385 | if (addr) { | 379 | if (addr) { |
386 | p->addr = addr; | 380 | p->addr = addr; |
387 | p->cpu = smp_processor_id(); | 381 | p->cpu = smp_processor_id(); |
@@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1724 | c = get_cpu_slab(s, smp_processor_id()); | 1718 | c = get_cpu_slab(s, smp_processor_id()); |
1725 | debug_check_no_locks_freed(object, c->objsize); | 1719 | debug_check_no_locks_freed(object, c->objsize); |
1726 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1720 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
1727 | debug_check_no_obj_freed(object, s->objsize); | 1721 | debug_check_no_obj_freed(object, c->objsize); |
1728 | if (likely(page == c->page && c->node >= 0)) { | 1722 | if (likely(page == c->page && c->node >= 0)) { |
1729 | object[c->offset] = c->freelist; | 1723 | object[c->offset] = c->freelist; |
1730 | c->freelist = object; | 1724 | c->freelist = object; |
@@ -1844,6 +1838,7 @@ static inline int calculate_order(int size) | |||
1844 | int order; | 1838 | int order; |
1845 | int min_objects; | 1839 | int min_objects; |
1846 | int fraction; | 1840 | int fraction; |
1841 | int max_objects; | ||
1847 | 1842 | ||
1848 | /* | 1843 | /* |
1849 | * Attempt to find best configuration for a slab. This | 1844 | * Attempt to find best configuration for a slab. This |
@@ -1856,6 +1851,9 @@ static inline int calculate_order(int size) | |||
1856 | min_objects = slub_min_objects; | 1851 | min_objects = slub_min_objects; |
1857 | if (!min_objects) | 1852 | if (!min_objects) |
1858 | min_objects = 4 * (fls(nr_cpu_ids) + 1); | 1853 | min_objects = 4 * (fls(nr_cpu_ids) + 1); |
1854 | max_objects = (PAGE_SIZE << slub_max_order)/size; | ||
1855 | min_objects = min(min_objects, max_objects); | ||
1856 | |||
1859 | while (min_objects > 1) { | 1857 | while (min_objects > 1) { |
1860 | fraction = 16; | 1858 | fraction = 16; |
1861 | while (fraction >= 4) { | 1859 | while (fraction >= 4) { |
@@ -1865,7 +1863,7 @@ static inline int calculate_order(int size) | |||
1865 | return order; | 1863 | return order; |
1866 | fraction /= 2; | 1864 | fraction /= 2; |
1867 | } | 1865 | } |
1868 | min_objects /= 2; | 1866 | min_objects --; |
1869 | } | 1867 | } |
1870 | 1868 | ||
1871 | /* | 1869 | /* |
@@ -2478,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2478 | * Kmalloc subsystem | 2476 | * Kmalloc subsystem |
2479 | *******************************************************************/ | 2477 | *******************************************************************/ |
2480 | 2478 | ||
2481 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2479 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
2482 | EXPORT_SYMBOL(kmalloc_caches); | 2480 | EXPORT_SYMBOL(kmalloc_caches); |
2483 | 2481 | ||
2484 | static int __init setup_slub_min_order(char *str) | 2482 | static int __init setup_slub_min_order(char *str) |
@@ -2540,7 +2538,7 @@ panic: | |||
2540 | } | 2538 | } |
2541 | 2539 | ||
2542 | #ifdef CONFIG_ZONE_DMA | 2540 | #ifdef CONFIG_ZONE_DMA |
2543 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2541 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
2544 | 2542 | ||
2545 | static void sysfs_add_func(struct work_struct *w) | 2543 | static void sysfs_add_func(struct work_struct *w) |
2546 | { | 2544 | { |
@@ -2661,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2661 | { | 2659 | { |
2662 | struct kmem_cache *s; | 2660 | struct kmem_cache *s; |
2663 | 2661 | ||
2664 | if (unlikely(size > PAGE_SIZE)) | 2662 | if (unlikely(size > SLUB_MAX_SIZE)) |
2665 | return kmalloc_large(size, flags); | 2663 | return kmalloc_large(size, flags); |
2666 | 2664 | ||
2667 | s = get_slab(size, flags); | 2665 | s = get_slab(size, flags); |
@@ -2689,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2689 | { | 2687 | { |
2690 | struct kmem_cache *s; | 2688 | struct kmem_cache *s; |
2691 | 2689 | ||
2692 | if (unlikely(size > PAGE_SIZE)) | 2690 | if (unlikely(size > SLUB_MAX_SIZE)) |
2693 | return kmalloc_large_node(size, flags, node); | 2691 | return kmalloc_large_node(size, flags, node); |
2694 | 2692 | ||
2695 | s = get_slab(size, flags); | 2693 | s = get_slab(size, flags); |
@@ -2739,6 +2737,7 @@ size_t ksize(const void *object) | |||
2739 | */ | 2737 | */ |
2740 | return s->size; | 2738 | return s->size; |
2741 | } | 2739 | } |
2740 | EXPORT_SYMBOL(ksize); | ||
2742 | 2741 | ||
2743 | void kfree(const void *x) | 2742 | void kfree(const void *x) |
2744 | { | 2743 | { |
@@ -2988,7 +2987,7 @@ void __init kmem_cache_init(void) | |||
2988 | caches++; | 2987 | caches++; |
2989 | } | 2988 | } |
2990 | 2989 | ||
2991 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 2990 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
2992 | create_kmalloc_cache(&kmalloc_caches[i], | 2991 | create_kmalloc_cache(&kmalloc_caches[i], |
2993 | "kmalloc", 1 << i, GFP_KERNEL); | 2992 | "kmalloc", 1 << i, GFP_KERNEL); |
2994 | caches++; | 2993 | caches++; |
@@ -3025,7 +3024,7 @@ void __init kmem_cache_init(void) | |||
3025 | slab_state = UP; | 3024 | slab_state = UP; |
3026 | 3025 | ||
3027 | /* Provide the correct kmalloc names now that the caches are up */ | 3026 | /* Provide the correct kmalloc names now that the caches are up */ |
3028 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3027 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3029 | kmalloc_caches[i]. name = | 3028 | kmalloc_caches[i]. name = |
3030 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3029 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
3031 | 3030 | ||
@@ -3225,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3225 | { | 3224 | { |
3226 | struct kmem_cache *s; | 3225 | struct kmem_cache *s; |
3227 | 3226 | ||
3228 | if (unlikely(size > PAGE_SIZE)) | 3227 | if (unlikely(size > SLUB_MAX_SIZE)) |
3229 | return kmalloc_large(size, gfpflags); | 3228 | return kmalloc_large(size, gfpflags); |
3230 | 3229 | ||
3231 | s = get_slab(size, gfpflags); | 3230 | s = get_slab(size, gfpflags); |
@@ -3241,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3241 | { | 3240 | { |
3242 | struct kmem_cache *s; | 3241 | struct kmem_cache *s; |
3243 | 3242 | ||
3244 | if (unlikely(size > PAGE_SIZE)) | 3243 | if (unlikely(size > SLUB_MAX_SIZE)) |
3245 | return kmalloc_large_node(size, gfpflags, node); | 3244 | return kmalloc_large_node(size, gfpflags, node); |
3246 | 3245 | ||
3247 | s = get_slab(size, gfpflags); | 3246 | s = get_slab(size, gfpflags); |