diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 150 |
1 files changed, 39 insertions, 111 deletions
@@ -2064,7 +2064,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |||
2064 | 2064 | ||
2065 | static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); | 2065 | static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); |
2066 | 2066 | ||
2067 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | 2067 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
2068 | { | 2068 | { |
2069 | if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) | 2069 | if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) |
2070 | /* | 2070 | /* |
@@ -2091,7 +2091,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | |||
2091 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping | 2091 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping |
2092 | * memory on a fresh node that has no slab structures yet. | 2092 | * memory on a fresh node that has no slab structures yet. |
2093 | */ | 2093 | */ |
2094 | static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) | 2094 | static void early_kmem_cache_node_alloc(int node) |
2095 | { | 2095 | { |
2096 | struct page *page; | 2096 | struct page *page; |
2097 | struct kmem_cache_node *n; | 2097 | struct kmem_cache_node *n; |
@@ -2099,7 +2099,7 @@ static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) | |||
2099 | 2099 | ||
2100 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); | 2100 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); |
2101 | 2101 | ||
2102 | page = new_slab(kmalloc_caches, gfpflags, node); | 2102 | page = new_slab(kmalloc_caches, GFP_NOWAIT, node); |
2103 | 2103 | ||
2104 | BUG_ON(!page); | 2104 | BUG_ON(!page); |
2105 | if (page_to_nid(page) != node) { | 2105 | if (page_to_nid(page) != node) { |
@@ -2143,7 +2143,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2143 | } | 2143 | } |
2144 | } | 2144 | } |
2145 | 2145 | ||
2146 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2146 | static int init_kmem_cache_nodes(struct kmem_cache *s) |
2147 | { | 2147 | { |
2148 | int node; | 2148 | int node; |
2149 | 2149 | ||
@@ -2151,11 +2151,11 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2151 | struct kmem_cache_node *n; | 2151 | struct kmem_cache_node *n; |
2152 | 2152 | ||
2153 | if (slab_state == DOWN) { | 2153 | if (slab_state == DOWN) { |
2154 | early_kmem_cache_node_alloc(gfpflags, node); | 2154 | early_kmem_cache_node_alloc(node); |
2155 | continue; | 2155 | continue; |
2156 | } | 2156 | } |
2157 | n = kmem_cache_alloc_node(kmalloc_caches, | 2157 | n = kmem_cache_alloc_node(kmalloc_caches, |
2158 | gfpflags, node); | 2158 | GFP_KERNEL, node); |
2159 | 2159 | ||
2160 | if (!n) { | 2160 | if (!n) { |
2161 | free_kmem_cache_nodes(s); | 2161 | free_kmem_cache_nodes(s); |
@@ -2172,7 +2172,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2172 | { | 2172 | { |
2173 | } | 2173 | } |
2174 | 2174 | ||
2175 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2175 | static int init_kmem_cache_nodes(struct kmem_cache *s) |
2176 | { | 2176 | { |
2177 | init_kmem_cache_node(&s->local_node, s); | 2177 | init_kmem_cache_node(&s->local_node, s); |
2178 | return 1; | 2178 | return 1; |
@@ -2312,7 +2312,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2312 | 2312 | ||
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | 2315 | static int kmem_cache_open(struct kmem_cache *s, |
2316 | const char *name, size_t size, | 2316 | const char *name, size_t size, |
2317 | size_t align, unsigned long flags, | 2317 | size_t align, unsigned long flags, |
2318 | void (*ctor)(void *)) | 2318 | void (*ctor)(void *)) |
@@ -2348,10 +2348,10 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
2348 | #ifdef CONFIG_NUMA | 2348 | #ifdef CONFIG_NUMA |
2349 | s->remote_node_defrag_ratio = 1000; | 2349 | s->remote_node_defrag_ratio = 1000; |
2350 | #endif | 2350 | #endif |
2351 | if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) | 2351 | if (!init_kmem_cache_nodes(s)) |
2352 | goto error; | 2352 | goto error; |
2353 | 2353 | ||
2354 | if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) | 2354 | if (alloc_kmem_cache_cpus(s)) |
2355 | return 1; | 2355 | return 1; |
2356 | 2356 | ||
2357 | free_kmem_cache_nodes(s); | 2357 | free_kmem_cache_nodes(s); |
@@ -2510,6 +2510,10 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2510 | struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned; | 2510 | struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned; |
2511 | EXPORT_SYMBOL(kmalloc_caches); | 2511 | EXPORT_SYMBOL(kmalloc_caches); |
2512 | 2512 | ||
2513 | #ifdef CONFIG_ZONE_DMA | ||
2514 | static struct kmem_cache kmalloc_dma_caches[SLUB_PAGE_SHIFT]; | ||
2515 | #endif | ||
2516 | |||
2513 | static int __init setup_slub_min_order(char *str) | 2517 | static int __init setup_slub_min_order(char *str) |
2514 | { | 2518 | { |
2515 | get_option(&str, &slub_min_order); | 2519 | get_option(&str, &slub_min_order); |
@@ -2546,116 +2550,26 @@ static int __init setup_slub_nomerge(char *str) | |||
2546 | 2550 | ||
2547 | __setup("slub_nomerge", setup_slub_nomerge); | 2551 | __setup("slub_nomerge", setup_slub_nomerge); |
2548 | 2552 | ||
2549 | static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | 2553 | static void create_kmalloc_cache(struct kmem_cache *s, |
2550 | const char *name, int size, gfp_t gfp_flags) | 2554 | const char *name, int size, unsigned int flags) |
2551 | { | 2555 | { |
2552 | unsigned int flags = 0; | ||
2553 | |||
2554 | if (gfp_flags & SLUB_DMA) | ||
2555 | flags = SLAB_CACHE_DMA; | ||
2556 | |||
2557 | /* | 2556 | /* |
2558 | * This function is called with IRQs disabled during early-boot on | 2557 | * This function is called with IRQs disabled during early-boot on |
2559 | * single CPU so there's no need to take slub_lock here. | 2558 | * single CPU so there's no need to take slub_lock here. |
2560 | */ | 2559 | */ |
2561 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2560 | if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, |
2562 | flags, NULL)) | 2561 | flags, NULL)) |
2563 | goto panic; | 2562 | goto panic; |
2564 | 2563 | ||
2565 | list_add(&s->list, &slab_caches); | 2564 | list_add(&s->list, &slab_caches); |
2566 | 2565 | ||
2567 | if (sysfs_slab_add(s)) | 2566 | if (!sysfs_slab_add(s)) |
2568 | goto panic; | 2567 | return; |
2569 | return s; | ||
2570 | 2568 | ||
2571 | panic: | 2569 | panic: |
2572 | panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); | 2570 | panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); |
2573 | } | 2571 | } |
2574 | 2572 | ||
2575 | #ifdef CONFIG_ZONE_DMA | ||
2576 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; | ||
2577 | |||
2578 | static void sysfs_add_func(struct work_struct *w) | ||
2579 | { | ||
2580 | struct kmem_cache *s; | ||
2581 | |||
2582 | down_write(&slub_lock); | ||
2583 | list_for_each_entry(s, &slab_caches, list) { | ||
2584 | if (s->flags & __SYSFS_ADD_DEFERRED) { | ||
2585 | s->flags &= ~__SYSFS_ADD_DEFERRED; | ||
2586 | sysfs_slab_add(s); | ||
2587 | } | ||
2588 | } | ||
2589 | up_write(&slub_lock); | ||
2590 | } | ||
2591 | |||
2592 | static DECLARE_WORK(sysfs_add_work, sysfs_add_func); | ||
2593 | |||
2594 | static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | ||
2595 | { | ||
2596 | struct kmem_cache *s; | ||
2597 | char *text; | ||
2598 | size_t realsize; | ||
2599 | unsigned long slabflags; | ||
2600 | int i; | ||
2601 | |||
2602 | s = kmalloc_caches_dma[index]; | ||
2603 | if (s) | ||
2604 | return s; | ||
2605 | |||
2606 | /* Dynamically create dma cache */ | ||
2607 | if (flags & __GFP_WAIT) | ||
2608 | down_write(&slub_lock); | ||
2609 | else { | ||
2610 | if (!down_write_trylock(&slub_lock)) | ||
2611 | goto out; | ||
2612 | } | ||
2613 | |||
2614 | if (kmalloc_caches_dma[index]) | ||
2615 | goto unlock_out; | ||
2616 | |||
2617 | realsize = kmalloc_caches[index].objsize; | ||
2618 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", | ||
2619 | (unsigned int)realsize); | ||
2620 | |||
2621 | s = NULL; | ||
2622 | for (i = 0; i < KMALLOC_CACHES; i++) | ||
2623 | if (!kmalloc_caches[i].size) | ||
2624 | break; | ||
2625 | |||
2626 | BUG_ON(i >= KMALLOC_CACHES); | ||
2627 | s = kmalloc_caches + i; | ||
2628 | |||
2629 | /* | ||
2630 | * Must defer sysfs creation to a workqueue because we don't know | ||
2631 | * what context we are called from. Before sysfs comes up, we don't | ||
2632 | * need to do anything because our sysfs initcall will start by | ||
2633 | * adding all existing slabs to sysfs. | ||
2634 | */ | ||
2635 | slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK; | ||
2636 | if (slab_state >= SYSFS) | ||
2637 | slabflags |= __SYSFS_ADD_DEFERRED; | ||
2638 | |||
2639 | if (!text || !kmem_cache_open(s, flags, text, | ||
2640 | realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { | ||
2641 | s->size = 0; | ||
2642 | kfree(text); | ||
2643 | goto unlock_out; | ||
2644 | } | ||
2645 | |||
2646 | list_add(&s->list, &slab_caches); | ||
2647 | kmalloc_caches_dma[index] = s; | ||
2648 | |||
2649 | if (slab_state >= SYSFS) | ||
2650 | schedule_work(&sysfs_add_work); | ||
2651 | |||
2652 | unlock_out: | ||
2653 | up_write(&slub_lock); | ||
2654 | out: | ||
2655 | return kmalloc_caches_dma[index]; | ||
2656 | } | ||
2657 | #endif | ||
2658 | |||
2659 | /* | 2573 | /* |
2660 | * Conversion table for small slabs sizes / 8 to the index in the | 2574 | * Conversion table for small slabs sizes / 8 to the index in the |
2661 | * kmalloc array. This is necessary for slabs < 192 since we have non power | 2575 | * kmalloc array. This is necessary for slabs < 192 since we have non power |
@@ -2708,7 +2622,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2708 | 2622 | ||
2709 | #ifdef CONFIG_ZONE_DMA | 2623 | #ifdef CONFIG_ZONE_DMA |
2710 | if (unlikely((flags & SLUB_DMA))) | 2624 | if (unlikely((flags & SLUB_DMA))) |
2711 | return dma_kmalloc_cache(index, flags); | 2625 | return &kmalloc_dma_caches[index]; |
2712 | 2626 | ||
2713 | #endif | 2627 | #endif |
2714 | return &kmalloc_caches[index]; | 2628 | return &kmalloc_caches[index]; |
@@ -3047,7 +2961,7 @@ void __init kmem_cache_init(void) | |||
3047 | * kmem_cache_open for slab_state == DOWN. | 2961 | * kmem_cache_open for slab_state == DOWN. |
3048 | */ | 2962 | */ |
3049 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 2963 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", |
3050 | sizeof(struct kmem_cache_node), GFP_NOWAIT); | 2964 | sizeof(struct kmem_cache_node), 0); |
3051 | kmalloc_caches[0].refcount = -1; | 2965 | kmalloc_caches[0].refcount = -1; |
3052 | caches++; | 2966 | caches++; |
3053 | 2967 | ||
@@ -3060,18 +2974,18 @@ void __init kmem_cache_init(void) | |||
3060 | /* Caches that are not of the two-to-the-power-of size */ | 2974 | /* Caches that are not of the two-to-the-power-of size */ |
3061 | if (KMALLOC_MIN_SIZE <= 32) { | 2975 | if (KMALLOC_MIN_SIZE <= 32) { |
3062 | create_kmalloc_cache(&kmalloc_caches[1], | 2976 | create_kmalloc_cache(&kmalloc_caches[1], |
3063 | "kmalloc-96", 96, GFP_NOWAIT); | 2977 | "kmalloc-96", 96, 0); |
3064 | caches++; | 2978 | caches++; |
3065 | } | 2979 | } |
3066 | if (KMALLOC_MIN_SIZE <= 64) { | 2980 | if (KMALLOC_MIN_SIZE <= 64) { |
3067 | create_kmalloc_cache(&kmalloc_caches[2], | 2981 | create_kmalloc_cache(&kmalloc_caches[2], |
3068 | "kmalloc-192", 192, GFP_NOWAIT); | 2982 | "kmalloc-192", 192, 0); |
3069 | caches++; | 2983 | caches++; |
3070 | } | 2984 | } |
3071 | 2985 | ||
3072 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 2986 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3073 | create_kmalloc_cache(&kmalloc_caches[i], | 2987 | create_kmalloc_cache(&kmalloc_caches[i], |
3074 | "kmalloc", 1 << i, GFP_NOWAIT); | 2988 | "kmalloc", 1 << i, 0); |
3075 | caches++; | 2989 | caches++; |
3076 | } | 2990 | } |
3077 | 2991 | ||
@@ -3134,6 +3048,20 @@ void __init kmem_cache_init(void) | |||
3134 | kmem_size = sizeof(struct kmem_cache); | 3048 | kmem_size = sizeof(struct kmem_cache); |
3135 | #endif | 3049 | #endif |
3136 | 3050 | ||
3051 | #ifdef CONFIG_ZONE_DMA | ||
3052 | for (i = 1; i < SLUB_PAGE_SHIFT; i++) { | ||
3053 | struct kmem_cache *s = &kmalloc_caches[i]; | ||
3054 | |||
3055 | if (s->size) { | ||
3056 | char *name = kasprintf(GFP_NOWAIT, | ||
3057 | "dma-kmalloc-%d", s->objsize); | ||
3058 | |||
3059 | BUG_ON(!name); | ||
3060 | create_kmalloc_cache(&kmalloc_dma_caches[i], | ||
3061 | name, s->objsize, SLAB_CACHE_DMA); | ||
3062 | } | ||
3063 | } | ||
3064 | #endif | ||
3137 | printk(KERN_INFO | 3065 | printk(KERN_INFO |
3138 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | 3066 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," |
3139 | " CPUs=%d, Nodes=%d\n", | 3067 | " CPUs=%d, Nodes=%d\n", |
@@ -3236,7 +3164,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3236 | 3164 | ||
3237 | s = kmalloc(kmem_size, GFP_KERNEL); | 3165 | s = kmalloc(kmem_size, GFP_KERNEL); |
3238 | if (s) { | 3166 | if (s) { |
3239 | if (kmem_cache_open(s, GFP_KERNEL, name, | 3167 | if (kmem_cache_open(s, name, |
3240 | size, align, flags, ctor)) { | 3168 | size, align, flags, ctor)) { |
3241 | list_add(&s->list, &slab_caches); | 3169 | list_add(&s->list, &slab_caches); |
3242 | if (sysfs_slab_add(s)) { | 3170 | if (sysfs_slab_add(s)) { |