diff options
-rw-r--r-- | mm/slub.c | 59 |
1 files changed, 45 insertions, 14 deletions
@@ -211,7 +211,8 @@ static inline void ClearSlabDebug(struct page *page) | |||
211 | #define MAX_OBJECTS_PER_SLAB 65535 | 211 | #define MAX_OBJECTS_PER_SLAB 65535 |
212 | 212 | ||
213 | /* Internal SLUB flags */ | 213 | /* Internal SLUB flags */ |
214 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 214 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
215 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | ||
215 | 216 | ||
216 | /* Not all arches define cache_line_size */ | 217 | /* Not all arches define cache_line_size */ |
217 | #ifndef cache_line_size | 218 | #ifndef cache_line_size |
@@ -2277,10 +2278,26 @@ panic: | |||
2277 | } | 2278 | } |
2278 | 2279 | ||
2279 | #ifdef CONFIG_ZONE_DMA | 2280 | #ifdef CONFIG_ZONE_DMA |
2281 | |||
2282 | static void sysfs_add_func(struct work_struct *w) | ||
2283 | { | ||
2284 | struct kmem_cache *s; | ||
2285 | |||
2286 | down_write(&slub_lock); | ||
2287 | list_for_each_entry(s, &slab_caches, list) { | ||
2288 | if (s->flags & __SYSFS_ADD_DEFERRED) { | ||
2289 | s->flags &= ~__SYSFS_ADD_DEFERRED; | ||
2290 | sysfs_slab_add(s); | ||
2291 | } | ||
2292 | } | ||
2293 | up_write(&slub_lock); | ||
2294 | } | ||
2295 | |||
2296 | static DECLARE_WORK(sysfs_add_work, sysfs_add_func); | ||
2297 | |||
2280 | static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | 2298 | static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) |
2281 | { | 2299 | { |
2282 | struct kmem_cache *s; | 2300 | struct kmem_cache *s; |
2283 | struct kmem_cache *x; | ||
2284 | char *text; | 2301 | char *text; |
2285 | size_t realsize; | 2302 | size_t realsize; |
2286 | 2303 | ||
@@ -2289,22 +2306,36 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2289 | return s; | 2306 | return s; |
2290 | 2307 | ||
2291 | /* Dynamically create dma cache */ | 2308 | /* Dynamically create dma cache */ |
2292 | x = kmalloc(kmem_size, flags & ~SLUB_DMA); | 2309 | if (flags & __GFP_WAIT) |
2293 | if (!x) | 2310 | down_write(&slub_lock); |
2294 | panic("Unable to allocate memory for dma cache\n"); | 2311 | else { |
2312 | if (!down_write_trylock(&slub_lock)) | ||
2313 | goto out; | ||
2314 | } | ||
2315 | |||
2316 | if (kmalloc_caches_dma[index]) | ||
2317 | goto unlock_out; | ||
2295 | 2318 | ||
2296 | realsize = kmalloc_caches[index].objsize; | 2319 | realsize = kmalloc_caches[index].objsize; |
2297 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", | 2320 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize), |
2298 | (unsigned int)realsize); | 2321 | s = kmalloc(kmem_size, flags & ~SLUB_DMA); |
2299 | s = create_kmalloc_cache(x, text, realsize, flags); | 2322 | |
2300 | down_write(&slub_lock); | 2323 | if (!s || !text || !kmem_cache_open(s, flags, text, |
2301 | if (!kmalloc_caches_dma[index]) { | 2324 | realsize, ARCH_KMALLOC_MINALIGN, |
2302 | kmalloc_caches_dma[index] = s; | 2325 | SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { |
2303 | up_write(&slub_lock); | 2326 | kfree(s); |
2304 | return s; | 2327 | kfree(text); |
2328 | goto unlock_out; | ||
2305 | } | 2329 | } |
2330 | |||
2331 | list_add(&s->list, &slab_caches); | ||
2332 | kmalloc_caches_dma[index] = s; | ||
2333 | |||
2334 | schedule_work(&sysfs_add_work); | ||
2335 | |||
2336 | unlock_out: | ||
2306 | up_write(&slub_lock); | 2337 | up_write(&slub_lock); |
2307 | kmem_cache_destroy(s); | 2338 | out: |
2308 | return kmalloc_caches_dma[index]; | 2339 | return kmalloc_caches_dma[index]; |
2309 | } | 2340 | } |
2310 | #endif | 2341 | #endif |