diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 68 |
1 files changed, 47 insertions, 21 deletions
@@ -211,7 +211,8 @@ static inline void ClearSlabDebug(struct page *page) | |||
211 | #define MAX_OBJECTS_PER_SLAB 65535 | 211 | #define MAX_OBJECTS_PER_SLAB 65535 |
212 | 212 | ||
213 | /* Internal SLUB flags */ | 213 | /* Internal SLUB flags */ |
214 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 214 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
215 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | ||
215 | 216 | ||
216 | /* Not all arches define cache_line_size */ | 217 | /* Not all arches define cache_line_size */ |
217 | #ifndef cache_line_size | 218 | #ifndef cache_line_size |
@@ -2277,10 +2278,26 @@ panic: | |||
2277 | } | 2278 | } |
2278 | 2279 | ||
2279 | #ifdef CONFIG_ZONE_DMA | 2280 | #ifdef CONFIG_ZONE_DMA |
2281 | |||
2282 | static void sysfs_add_func(struct work_struct *w) | ||
2283 | { | ||
2284 | struct kmem_cache *s; | ||
2285 | |||
2286 | down_write(&slub_lock); | ||
2287 | list_for_each_entry(s, &slab_caches, list) { | ||
2288 | if (s->flags & __SYSFS_ADD_DEFERRED) { | ||
2289 | s->flags &= ~__SYSFS_ADD_DEFERRED; | ||
2290 | sysfs_slab_add(s); | ||
2291 | } | ||
2292 | } | ||
2293 | up_write(&slub_lock); | ||
2294 | } | ||
2295 | |||
2296 | static DECLARE_WORK(sysfs_add_work, sysfs_add_func); | ||
2297 | |||
2280 | static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | 2298 | static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) |
2281 | { | 2299 | { |
2282 | struct kmem_cache *s; | 2300 | struct kmem_cache *s; |
2283 | struct kmem_cache *x; | ||
2284 | char *text; | 2301 | char *text; |
2285 | size_t realsize; | 2302 | size_t realsize; |
2286 | 2303 | ||
@@ -2289,22 +2306,36 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2289 | return s; | 2306 | return s; |
2290 | 2307 | ||
2291 | /* Dynamically create dma cache */ | 2308 | /* Dynamically create dma cache */ |
2292 | x = kmalloc(kmem_size, flags & ~SLUB_DMA); | 2309 | if (flags & __GFP_WAIT) |
2293 | if (!x) | 2310 | down_write(&slub_lock); |
2294 | panic("Unable to allocate memory for dma cache\n"); | 2311 | else { |
2312 | if (!down_write_trylock(&slub_lock)) | ||
2313 | goto out; | ||
2314 | } | ||
2315 | |||
2316 | if (kmalloc_caches_dma[index]) | ||
2317 | goto unlock_out; | ||
2295 | 2318 | ||
2296 | realsize = kmalloc_caches[index].objsize; | 2319 | realsize = kmalloc_caches[index].objsize; |
2297 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", | 2320 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize), |
2298 | (unsigned int)realsize); | 2321 | s = kmalloc(kmem_size, flags & ~SLUB_DMA); |
2299 | s = create_kmalloc_cache(x, text, realsize, flags); | 2322 | |
2300 | down_write(&slub_lock); | 2323 | if (!s || !text || !kmem_cache_open(s, flags, text, |
2301 | if (!kmalloc_caches_dma[index]) { | 2324 | realsize, ARCH_KMALLOC_MINALIGN, |
2302 | kmalloc_caches_dma[index] = s; | 2325 | SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { |
2303 | up_write(&slub_lock); | 2326 | kfree(s); |
2304 | return s; | 2327 | kfree(text); |
2328 | goto unlock_out; | ||
2305 | } | 2329 | } |
2330 | |||
2331 | list_add(&s->list, &slab_caches); | ||
2332 | kmalloc_caches_dma[index] = s; | ||
2333 | |||
2334 | schedule_work(&sysfs_add_work); | ||
2335 | |||
2336 | unlock_out: | ||
2306 | up_write(&slub_lock); | 2337 | up_write(&slub_lock); |
2307 | kmem_cache_destroy(s); | 2338 | out: |
2308 | return kmalloc_caches_dma[index]; | 2339 | return kmalloc_caches_dma[index]; |
2309 | } | 2340 | } |
2310 | #endif | 2341 | #endif |
@@ -2500,15 +2531,11 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2500 | slab_unlock(page); | 2531 | slab_unlock(page); |
2501 | discard_slab(s, page); | 2532 | discard_slab(s, page); |
2502 | } else { | 2533 | } else { |
2503 | if (n->nr_partial > MAX_PARTIAL) | 2534 | list_move(&page->lru, |
2504 | list_move(&page->lru, | 2535 | slabs_by_inuse + page->inuse); |
2505 | slabs_by_inuse + page->inuse); | ||
2506 | } | 2536 | } |
2507 | } | 2537 | } |
2508 | 2538 | ||
2509 | if (n->nr_partial <= MAX_PARTIAL) | ||
2510 | goto out; | ||
2511 | |||
2512 | /* | 2539 | /* |
2513 | * Rebuild the partial list with the slabs filled up most | 2540 | * Rebuild the partial list with the slabs filled up most |
2514 | * first and the least used slabs at the end. | 2541 | * first and the least used slabs at the end. |
@@ -2516,7 +2543,6 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2516 | for (i = s->objects - 1; i >= 0; i--) | 2543 | for (i = s->objects - 1; i >= 0; i--) |
2517 | list_splice(slabs_by_inuse + i, n->partial.prev); | 2544 | list_splice(slabs_by_inuse + i, n->partial.prev); |
2518 | 2545 | ||
2519 | out: | ||
2520 | spin_unlock_irqrestore(&n->list_lock, flags); | 2546 | spin_unlock_irqrestore(&n->list_lock, flags); |
2521 | } | 2547 | } |
2522 | 2548 | ||