aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-08-07 18:11:48 -0400
committerChristoph Lameter <clameter@sgi.com>2007-08-10 00:57:16 -0400
commit1ceef40249f21eceabf8633934d94962e7d8e1d7 (patch)
tree2b9201a2f7b102d0db7f2790df6f6456fae89e21 /mm
parentfcda3d89bf1366f6801447eab2d8a75ac5b9c4ce (diff)
SLUB: Fix dynamic dma kmalloc cache creation
The dynamic dma kmalloc creation can run into trouble if a GFP_ATOMIC allocation is the first one performed for a certain size of dma kmalloc slab. - Move the adding of the slab to sysfs into a workqueue (sysfs does GFP_KERNEL allocations) - Do not call kmem_cache_destroy() (uses slub_lock) - Only acquire the slub_lock once and--if we cannot wait--do a trylock. This introduces a slight risk of the first kmalloc(x, GFP_DMA|GFP_ATOMIC) for a range of sizes failing due to another process holding the slub_lock. However, we only need to acquire the spinlock once in order to establish each power of two DMA kmalloc cache. The possible conflict is with the slub_lock taken during slab management actions (create / remove slab cache). It is rather typical that a driver will first fill its buffers using GFP_KERNEL allocations which will wait until the slub_lock can be acquired. Drivers will also create its slab caches first outside of an atomic context before starting to use atomic kmalloc from an interrupt context. If there are any failures then they will occur early after boot or when loading of multiple drivers concurrently. Drivers can already accomodate failures of GFP_ATOMIC for other reasons. Retries will then create the slab. Signed-off-by: Christoph Lameter <clameter@sgi.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c59
1 files changed, 45 insertions, 14 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 64fd80bdae30..69d02e3e439e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -211,7 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
211#define MAX_OBJECTS_PER_SLAB 65535 211#define MAX_OBJECTS_PER_SLAB 65535
212 212
213/* Internal SLUB flags */ 213/* Internal SLUB flags */
214#define __OBJECT_POISON 0x80000000 /* Poison object */ 214#define __OBJECT_POISON 0x80000000 /* Poison object */
215#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
215 216
216/* Not all arches define cache_line_size */ 217/* Not all arches define cache_line_size */
217#ifndef cache_line_size 218#ifndef cache_line_size
@@ -2277,10 +2278,26 @@ panic:
2277} 2278}
2278 2279
2279#ifdef CONFIG_ZONE_DMA 2280#ifdef CONFIG_ZONE_DMA
2281
2282static void sysfs_add_func(struct work_struct *w)
2283{
2284 struct kmem_cache *s;
2285
2286 down_write(&slub_lock);
2287 list_for_each_entry(s, &slab_caches, list) {
2288 if (s->flags & __SYSFS_ADD_DEFERRED) {
2289 s->flags &= ~__SYSFS_ADD_DEFERRED;
2290 sysfs_slab_add(s);
2291 }
2292 }
2293 up_write(&slub_lock);
2294}
2295
2296static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2297
2280static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) 2298static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2281{ 2299{
2282 struct kmem_cache *s; 2300 struct kmem_cache *s;
2283 struct kmem_cache *x;
2284 char *text; 2301 char *text;
2285 size_t realsize; 2302 size_t realsize;
2286 2303
@@ -2289,22 +2306,36 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2289 return s; 2306 return s;
2290 2307
2291 /* Dynamically create dma cache */ 2308 /* Dynamically create dma cache */
2292 x = kmalloc(kmem_size, flags & ~SLUB_DMA); 2309 if (flags & __GFP_WAIT)
2293 if (!x) 2310 down_write(&slub_lock);
2294 panic("Unable to allocate memory for dma cache\n"); 2311 else {
2312 if (!down_write_trylock(&slub_lock))
2313 goto out;
2314 }
2315
2316 if (kmalloc_caches_dma[index])
2317 goto unlock_out;
2295 2318
2296 realsize = kmalloc_caches[index].objsize; 2319 realsize = kmalloc_caches[index].objsize;
2297 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2320 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
2298 (unsigned int)realsize); 2321 s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2299 s = create_kmalloc_cache(x, text, realsize, flags); 2322
2300 down_write(&slub_lock); 2323 if (!s || !text || !kmem_cache_open(s, flags, text,
2301 if (!kmalloc_caches_dma[index]) { 2324 realsize, ARCH_KMALLOC_MINALIGN,
2302 kmalloc_caches_dma[index] = s; 2325 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2303 up_write(&slub_lock); 2326 kfree(s);
2304 return s; 2327 kfree(text);
2328 goto unlock_out;
2305 } 2329 }
2330
2331 list_add(&s->list, &slab_caches);
2332 kmalloc_caches_dma[index] = s;
2333
2334 schedule_work(&sysfs_add_work);
2335
2336unlock_out:
2306 up_write(&slub_lock); 2337 up_write(&slub_lock);
2307 kmem_cache_destroy(s); 2338out:
2308 return kmalloc_caches_dma[index]; 2339 return kmalloc_caches_dma[index];
2309} 2340}
2310#endif 2341#endif