diff options
| -rw-r--r-- | include/linux/slub_def.h | 1 | ||||
| -rw-r--r-- | mm/slab.c | 1 | ||||
| -rw-r--r-- | mm/slob.c | 1 | ||||
| -rw-r--r-- | mm/slub.c | 27 |
4 files changed, 20 insertions, 10 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5bad61a93f65..2f5c16b1aacd 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -46,6 +46,7 @@ struct kmem_cache_cpu { | |||
| 46 | struct kmem_cache_node { | 46 | struct kmem_cache_node { |
| 47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
| 48 | unsigned long nr_partial; | 48 | unsigned long nr_partial; |
| 49 | unsigned long min_partial; | ||
| 49 | struct list_head partial; | 50 | struct list_head partial; |
| 50 | #ifdef CONFIG_SLUB_DEBUG | 51 | #ifdef CONFIG_SLUB_DEBUG |
| 51 | atomic_long_t nr_slabs; | 52 | atomic_long_t nr_slabs; |
| @@ -4472,4 +4472,3 @@ size_t ksize(const void *objp) | |||
| 4472 | 4472 | ||
| 4473 | return obj_size(virt_to_cache(objp)); | 4473 | return obj_size(virt_to_cache(objp)); |
| 4474 | } | 4474 | } |
| 4475 | EXPORT_SYMBOL(ksize); | ||
| @@ -519,7 +519,6 @@ size_t ksize(const void *block) | |||
| 519 | else | 519 | else |
| 520 | return sp->page.private; | 520 | return sp->page.private; |
| 521 | } | 521 | } |
| 522 | EXPORT_SYMBOL(ksize); | ||
| 523 | 522 | ||
| 524 | struct kmem_cache { | 523 | struct kmem_cache { |
| 525 | unsigned int size, align; | 524 | unsigned int size, align; |
| @@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
| 1329 | n = get_node(s, zone_to_nid(zone)); | 1329 | n = get_node(s, zone_to_nid(zone)); |
| 1330 | 1330 | ||
| 1331 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1331 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
| 1332 | n->nr_partial > MIN_PARTIAL) { | 1332 | n->nr_partial > n->min_partial) { |
| 1333 | page = get_partial_node(n); | 1333 | page = get_partial_node(n); |
| 1334 | if (page) | 1334 | if (page) |
| 1335 | return page; | 1335 | return page; |
| @@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
| 1381 | slab_unlock(page); | 1381 | slab_unlock(page); |
| 1382 | } else { | 1382 | } else { |
| 1383 | stat(c, DEACTIVATE_EMPTY); | 1383 | stat(c, DEACTIVATE_EMPTY); |
| 1384 | if (n->nr_partial < MIN_PARTIAL) { | 1384 | if (n->nr_partial < n->min_partial) { |
| 1385 | /* | 1385 | /* |
| 1386 | * Adding an empty slab to the partial slabs in order | 1386 | * Adding an empty slab to the partial slabs in order |
| 1387 | * to avoid page allocator overhead. This slab needs | 1387 | * to avoid page allocator overhead. This slab needs |
| @@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, | |||
| 1913 | #endif | 1913 | #endif |
| 1914 | } | 1914 | } |
| 1915 | 1915 | ||
| 1916 | static void init_kmem_cache_node(struct kmem_cache_node *n) | 1916 | static void |
| 1917 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | ||
| 1917 | { | 1918 | { |
| 1918 | n->nr_partial = 0; | 1919 | n->nr_partial = 0; |
| 1920 | |||
| 1921 | /* | ||
| 1922 | * The larger the object size is, the more pages we want on the partial | ||
| 1923 | * list to avoid pounding the page allocator excessively. | ||
| 1924 | */ | ||
| 1925 | n->min_partial = ilog2(s->size); | ||
| 1926 | if (n->min_partial < MIN_PARTIAL) | ||
| 1927 | n->min_partial = MIN_PARTIAL; | ||
| 1928 | else if (n->min_partial > MAX_PARTIAL) | ||
| 1929 | n->min_partial = MAX_PARTIAL; | ||
| 1930 | |||
| 1919 | spin_lock_init(&n->list_lock); | 1931 | spin_lock_init(&n->list_lock); |
| 1920 | INIT_LIST_HEAD(&n->partial); | 1932 | INIT_LIST_HEAD(&n->partial); |
| 1921 | #ifdef CONFIG_SLUB_DEBUG | 1933 | #ifdef CONFIG_SLUB_DEBUG |
| @@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
| 2087 | init_object(kmalloc_caches, n, 1); | 2099 | init_object(kmalloc_caches, n, 1); |
| 2088 | init_tracking(kmalloc_caches, n); | 2100 | init_tracking(kmalloc_caches, n); |
| 2089 | #endif | 2101 | #endif |
| 2090 | init_kmem_cache_node(n); | 2102 | init_kmem_cache_node(n, kmalloc_caches); |
| 2091 | inc_slabs_node(kmalloc_caches, node, page->objects); | 2103 | inc_slabs_node(kmalloc_caches, node, page->objects); |
| 2092 | 2104 | ||
| 2093 | /* | 2105 | /* |
| @@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
| 2144 | 2156 | ||
| 2145 | } | 2157 | } |
| 2146 | s->node[node] = n; | 2158 | s->node[node] = n; |
| 2147 | init_kmem_cache_node(n); | 2159 | init_kmem_cache_node(n, s); |
| 2148 | } | 2160 | } |
| 2149 | return 1; | 2161 | return 1; |
| 2150 | } | 2162 | } |
| @@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
| 2155 | 2167 | ||
| 2156 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2168 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) |
| 2157 | { | 2169 | { |
| 2158 | init_kmem_cache_node(&s->local_node); | 2170 | init_kmem_cache_node(&s->local_node, s); |
| 2159 | return 1; | 2171 | return 1; |
| 2160 | } | 2172 | } |
| 2161 | #endif | 2173 | #endif |
| @@ -2715,7 +2727,6 @@ size_t ksize(const void *object) | |||
| 2715 | */ | 2727 | */ |
| 2716 | return s->size; | 2728 | return s->size; |
| 2717 | } | 2729 | } |
| 2718 | EXPORT_SYMBOL(ksize); | ||
| 2719 | 2730 | ||
| 2720 | void kfree(const void *x) | 2731 | void kfree(const void *x) |
| 2721 | { | 2732 | { |
| @@ -2890,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg) | |||
| 2890 | ret = -ENOMEM; | 2901 | ret = -ENOMEM; |
| 2891 | goto out; | 2902 | goto out; |
| 2892 | } | 2903 | } |
| 2893 | init_kmem_cache_node(n); | 2904 | init_kmem_cache_node(n, s); |
| 2894 | s->node[nid] = n; | 2905 | s->node[nid] = n; |
| 2895 | } | 2906 | } |
| 2896 | out: | 2907 | out: |
