diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 39 |
1 files changed, 1 insertions, 38 deletions
@@ -233,11 +233,7 @@ int slab_is_available(void) | |||
233 | 233 | ||
234 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) | 234 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
235 | { | 235 | { |
236 | #ifdef CONFIG_NUMA | ||
237 | return s->node[node]; | 236 | return s->node[node]; |
238 | #else | ||
239 | return &s->local_node; | ||
240 | #endif | ||
241 | } | 237 | } |
242 | 238 | ||
243 | /* Verify that a pointer has an address that is valid within a slab page */ | 239 | /* Verify that a pointer has an address that is valid within a slab page */ |
@@ -871,7 +867,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) | |||
871 | * dilemma by deferring the increment of the count during | 867 | * dilemma by deferring the increment of the count during |
872 | * bootstrap (see early_kmem_cache_node_alloc). | 868 | * bootstrap (see early_kmem_cache_node_alloc). |
873 | */ | 869 | */ |
874 | if (!NUMA_BUILD || n) { | 870 | if (n) { |
875 | atomic_long_inc(&n->nr_slabs); | 871 | atomic_long_inc(&n->nr_slabs); |
876 | atomic_long_add(objects, &n->total_objects); | 872 | atomic_long_add(objects, &n->total_objects); |
877 | } | 873 | } |
@@ -2112,7 +2108,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) | |||
2112 | return s->cpu_slab != NULL; | 2108 | return s->cpu_slab != NULL; |
2113 | } | 2109 | } |
2114 | 2110 | ||
2115 | #ifdef CONFIG_NUMA | ||
2116 | static struct kmem_cache *kmem_cache_node; | 2111 | static struct kmem_cache *kmem_cache_node; |
2117 | 2112 | ||
2118 | /* | 2113 | /* |
@@ -2202,17 +2197,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) | |||
2202 | } | 2197 | } |
2203 | return 1; | 2198 | return 1; |
2204 | } | 2199 | } |
2205 | #else | ||
2206 | static void free_kmem_cache_nodes(struct kmem_cache *s) | ||
2207 | { | ||
2208 | } | ||
2209 | |||
2210 | static int init_kmem_cache_nodes(struct kmem_cache *s) | ||
2211 | { | ||
2212 | init_kmem_cache_node(&s->local_node, s); | ||
2213 | return 1; | ||
2214 | } | ||
2215 | #endif | ||
2216 | 2200 | ||
2217 | static void set_min_partial(struct kmem_cache *s, unsigned long min) | 2201 | static void set_min_partial(struct kmem_cache *s, unsigned long min) |
2218 | { | 2202 | { |
@@ -3023,8 +3007,6 @@ void __init kmem_cache_init(void) | |||
3023 | int caches = 0; | 3007 | int caches = 0; |
3024 | struct kmem_cache *temp_kmem_cache; | 3008 | struct kmem_cache *temp_kmem_cache; |
3025 | int order; | 3009 | int order; |
3026 | |||
3027 | #ifdef CONFIG_NUMA | ||
3028 | struct kmem_cache *temp_kmem_cache_node; | 3010 | struct kmem_cache *temp_kmem_cache_node; |
3029 | unsigned long kmalloc_size; | 3011 | unsigned long kmalloc_size; |
3030 | 3012 | ||
@@ -3048,12 +3030,6 @@ void __init kmem_cache_init(void) | |||
3048 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 3030 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
3049 | 3031 | ||
3050 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); | 3032 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
3051 | #else | ||
3052 | /* Allocate a single kmem_cache from the page allocator */ | ||
3053 | kmem_size = sizeof(struct kmem_cache); | ||
3054 | order = get_order(kmem_size); | ||
3055 | kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); | ||
3056 | #endif | ||
3057 | 3033 | ||
3058 | /* Able to allocate the per node structures */ | 3034 | /* Able to allocate the per node structures */ |
3059 | slab_state = PARTIAL; | 3035 | slab_state = PARTIAL; |
@@ -3064,7 +3040,6 @@ void __init kmem_cache_init(void) | |||
3064 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); | 3040 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); |
3065 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); | 3041 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); |
3066 | 3042 | ||
3067 | #ifdef CONFIG_NUMA | ||
3068 | /* | 3043 | /* |
3069 | * Allocate kmem_cache_node properly from the kmem_cache slab. | 3044 | * Allocate kmem_cache_node properly from the kmem_cache slab. |
3070 | * kmem_cache_node is separately allocated so no need to | 3045 | * kmem_cache_node is separately allocated so no need to |
@@ -3078,18 +3053,6 @@ void __init kmem_cache_init(void) | |||
3078 | kmem_cache_bootstrap_fixup(kmem_cache_node); | 3053 | kmem_cache_bootstrap_fixup(kmem_cache_node); |
3079 | 3054 | ||
3080 | caches++; | 3055 | caches++; |
3081 | #else | ||
3082 | /* | ||
3083 | * kmem_cache has kmem_cache_node embedded and we moved it! | ||
3084 | * Update the list heads | ||
3085 | */ | ||
3086 | INIT_LIST_HEAD(&kmem_cache->local_node.partial); | ||
3087 | list_splice(&temp_kmem_cache->local_node.partial, &kmem_cache->local_node.partial); | ||
3088 | #ifdef CONFIG_SLUB_DEBUG | ||
3089 | INIT_LIST_HEAD(&kmem_cache->local_node.full); | ||
3090 | list_splice(&temp_kmem_cache->local_node.full, &kmem_cache->local_node.full); | ||
3091 | #endif | ||
3092 | #endif | ||
3093 | kmem_cache_bootstrap_fixup(kmem_cache); | 3056 | kmem_cache_bootstrap_fixup(kmem_cache); |
3094 | caches++; | 3057 | caches++; |
3095 | /* Free temporary boot structure */ | 3058 | /* Free temporary boot structure */ |