diff options
author | Christoph Lameter <cl@linux.com> | 2012-09-04 19:18:33 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-09-05 05:00:37 -0400 |
commit | 8a13a4cc80bb25c9eab2e7e56bab724fcfa55fce (patch) | |
tree | a212edb3d0b139b0743ca5ca34c14037a6ada4dc | |
parent | 278b1bb1313664d4999a7f7d47a8a8d964862d02 (diff) |
mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists
Do the initial settings of the fields in common code. This will allow us
to push more processing into common code later and improve readability.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | mm/slab.c | 73 | ||||
-rw-r--r-- | mm/slab.h | 3 | ||||
-rw-r--r-- | mm/slab_common.c | 26 | ||||
-rw-r--r-- | mm/slob.c | 8 | ||||
-rw-r--r-- | mm/slub.c | 39 |
5 files changed, 68 insertions, 81 deletions
@@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void) | |||
1677 | */ | 1677 | */ |
1678 | 1678 | ||
1679 | sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | 1679 | sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1680 | __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, | 1680 | sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; |
1681 | sizes[INDEX_AC].cs_size, | 1681 | sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; |
1682 | ARCH_KMALLOC_MINALIGN, | 1682 | sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; |
1683 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1683 | sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
1684 | NULL); | 1684 | __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); |
1685 | |||
1686 | list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); | 1685 | list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); |
1686 | |||
1687 | if (INDEX_AC != INDEX_L3) { | 1687 | if (INDEX_AC != INDEX_L3) { |
1688 | sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | 1688 | sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1689 | __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, | 1689 | sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; |
1690 | sizes[INDEX_L3].cs_size, | 1690 | sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; |
1691 | ARCH_KMALLOC_MINALIGN, | 1691 | sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; |
1692 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1692 | sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
1693 | NULL); | 1693 | __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); |
1694 | list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); | 1694 | list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); |
1695 | } | 1695 | } |
1696 | 1696 | ||
@@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void) | |||
1706 | */ | 1706 | */ |
1707 | if (!sizes->cs_cachep) { | 1707 | if (!sizes->cs_cachep) { |
1708 | sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | 1708 | sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1709 | __kmem_cache_create(sizes->cs_cachep, names->name, | 1709 | sizes->cs_cachep->name = names->name; |
1710 | sizes->cs_size, | 1710 | sizes->cs_cachep->size = sizes->cs_size; |
1711 | ARCH_KMALLOC_MINALIGN, | 1711 | sizes->cs_cachep->object_size = sizes->cs_size; |
1712 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1712 | sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
1713 | NULL); | 1713 | __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); |
1714 | list_add(&sizes->cs_cachep->list, &slab_caches); | 1714 | list_add(&sizes->cs_cachep->list, &slab_caches); |
1715 | } | 1715 | } |
1716 | #ifdef CONFIG_ZONE_DMA | 1716 | #ifdef CONFIG_ZONE_DMA |
1717 | sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | 1717 | sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1718 | sizes->cs_dmacachep->name = names->name_dma; | ||
1719 | sizes->cs_dmacachep->size = sizes->cs_size; | ||
1720 | sizes->cs_dmacachep->object_size = sizes->cs_size; | ||
1721 | sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; | ||
1718 | __kmem_cache_create(sizes->cs_dmacachep, | 1722 | __kmem_cache_create(sizes->cs_dmacachep, |
1719 | names->name_dma, | 1723 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); |
1720 | sizes->cs_size, | ||
1721 | ARCH_KMALLOC_MINALIGN, | ||
1722 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| | ||
1723 | SLAB_PANIC, | ||
1724 | NULL); | ||
1725 | list_add(&sizes->cs_dmacachep->list, &slab_caches); | 1724 | list_add(&sizes->cs_dmacachep->list, &slab_caches); |
1726 | #endif | 1725 | #endif |
1727 | sizes++; | 1726 | sizes++; |
@@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2360 | * as davem. | 2359 | * as davem. |
2361 | */ | 2360 | */ |
2362 | int | 2361 | int |
2363 | __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, | 2362 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
2364 | unsigned long flags, void (*ctor)(void *)) | ||
2365 | { | 2363 | { |
2366 | size_t left_over, slab_size, ralign; | 2364 | size_t left_over, slab_size, ralign; |
2367 | gfp_t gfp; | 2365 | gfp_t gfp; |
2368 | int err; | 2366 | int err; |
2367 | size_t size = cachep->size; | ||
2369 | 2368 | ||
2370 | #if DEBUG | 2369 | #if DEBUG |
2371 | #if FORCED_DEBUG | 2370 | #if FORCED_DEBUG |
@@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s | |||
2437 | ralign = ARCH_SLAB_MINALIGN; | 2436 | ralign = ARCH_SLAB_MINALIGN; |
2438 | } | 2437 | } |
2439 | /* 3) caller mandated alignment */ | 2438 | /* 3) caller mandated alignment */ |
2440 | if (ralign < align) { | 2439 | if (ralign < cachep->align) { |
2441 | ralign = align; | 2440 | ralign = cachep->align; |
2442 | } | 2441 | } |
2443 | /* disable debug if necessary */ | 2442 | /* disable debug if necessary */ |
2444 | if (ralign > __alignof__(unsigned long long)) | 2443 | if (ralign > __alignof__(unsigned long long)) |
@@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s | |||
2446 | /* | 2445 | /* |
2447 | * 4) Store it. | 2446 | * 4) Store it. |
2448 | */ | 2447 | */ |
2449 | align = ralign; | 2448 | cachep->align = ralign; |
2450 | 2449 | ||
2451 | if (slab_is_available()) | 2450 | if (slab_is_available()) |
2452 | gfp = GFP_KERNEL; | 2451 | gfp = GFP_KERNEL; |
@@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s | |||
2454 | gfp = GFP_NOWAIT; | 2453 | gfp = GFP_NOWAIT; |
2455 | 2454 | ||
2456 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 2455 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; |
2457 | cachep->object_size = size; | ||
2458 | cachep->align = align; | ||
2459 | #if DEBUG | 2456 | #if DEBUG |
2460 | 2457 | ||
2461 | /* | 2458 | /* |
@@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s | |||
2500 | */ | 2497 | */ |
2501 | flags |= CFLGS_OFF_SLAB; | 2498 | flags |= CFLGS_OFF_SLAB; |
2502 | 2499 | ||
2503 | size = ALIGN(size, align); | 2500 | size = ALIGN(size, cachep->align); |
2504 | 2501 | ||
2505 | left_over = calculate_slab_order(cachep, size, align, flags); | 2502 | left_over = calculate_slab_order(cachep, size, cachep->align, flags); |
2506 | 2503 | ||
2507 | if (!cachep->num) { | 2504 | if (!cachep->num) |
2508 | printk(KERN_ERR | ||
2509 | "kmem_cache_create: couldn't create cache %s.\n", name); | ||
2510 | return -E2BIG; | 2505 | return -E2BIG; |
2511 | } | 2506 | |
2512 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 2507 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
2513 | + sizeof(struct slab), align); | 2508 | + sizeof(struct slab), cachep->align); |
2514 | 2509 | ||
2515 | /* | 2510 | /* |
2516 | * If the slab has been placed off-slab, and we have enough space then | 2511 | * If the slab has been placed off-slab, and we have enough space then |
@@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s | |||
2538 | 2533 | ||
2539 | cachep->colour_off = cache_line_size(); | 2534 | cachep->colour_off = cache_line_size(); |
2540 | /* Offset must be a multiple of the alignment. */ | 2535 | /* Offset must be a multiple of the alignment. */ |
2541 | if (cachep->colour_off < align) | 2536 | if (cachep->colour_off < cachep->align) |
2542 | cachep->colour_off = align; | 2537 | cachep->colour_off = cachep->align; |
2543 | cachep->colour = left_over / cachep->colour_off; | 2538 | cachep->colour = left_over / cachep->colour_off; |
2544 | cachep->slab_size = slab_size; | 2539 | cachep->slab_size = slab_size; |
2545 | cachep->flags = flags; | 2540 | cachep->flags = flags; |
@@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s | |||
2560 | */ | 2555 | */ |
2561 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); | 2556 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); |
2562 | } | 2557 | } |
2563 | cachep->ctor = ctor; | ||
2564 | cachep->name = name; | ||
2565 | cachep->refcount = 1; | 2558 | cachep->refcount = 1; |
2566 | 2559 | ||
2567 | err = setup_cpu_cache(cachep, gfp); | 2560 | err = setup_cpu_cache(cachep, gfp); |
@@ -33,8 +33,7 @@ extern struct list_head slab_caches; | |||
33 | extern struct kmem_cache *kmem_cache; | 33 | extern struct kmem_cache *kmem_cache; |
34 | 34 | ||
35 | /* Functions provided by the slab allocators */ | 35 | /* Functions provided by the slab allocators */ |
36 | extern int __kmem_cache_create(struct kmem_cache *, const char *name, | 36 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
37 | size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); | ||
38 | 37 | ||
39 | #ifdef CONFIG_SLUB | 38 | #ifdef CONFIG_SLUB |
40 | struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, | 39 | struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, |
diff --git a/mm/slab_common.c b/mm/slab_common.c index f50d2ed4fbf1..8a85a19d90ef 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
100 | { | 100 | { |
101 | struct kmem_cache *s = NULL; | 101 | struct kmem_cache *s = NULL; |
102 | int err = 0; | 102 | int err = 0; |
103 | char *n; | ||
104 | 103 | ||
105 | get_online_cpus(); | 104 | get_online_cpus(); |
106 | mutex_lock(&slab_mutex); | 105 | mutex_lock(&slab_mutex); |
@@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
109 | goto out_locked; | 108 | goto out_locked; |
110 | 109 | ||
111 | 110 | ||
112 | n = kstrdup(name, GFP_KERNEL); | ||
113 | if (!n) { | ||
114 | err = -ENOMEM; | ||
115 | goto out_locked; | ||
116 | } | ||
117 | |||
118 | s = __kmem_cache_alias(name, size, align, flags, ctor); | 111 | s = __kmem_cache_alias(name, size, align, flags, ctor); |
119 | if (s) | 112 | if (s) |
120 | goto out_locked; | 113 | goto out_locked; |
121 | 114 | ||
122 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | 115 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
123 | if (s) { | 116 | if (s) { |
124 | err = __kmem_cache_create(s, n, size, align, flags, ctor); | 117 | s->object_size = s->size = size; |
118 | s->align = align; | ||
119 | s->ctor = ctor; | ||
120 | s->name = kstrdup(name, GFP_KERNEL); | ||
121 | if (!s->name) { | ||
122 | kmem_cache_free(kmem_cache, s); | ||
123 | err = -ENOMEM; | ||
124 | goto out_locked; | ||
125 | } | ||
126 | |||
127 | err = __kmem_cache_create(s, flags); | ||
125 | if (!err) | 128 | if (!err) |
126 | 129 | ||
127 | list_add(&s->list, &slab_caches); | 130 | list_add(&s->list, &slab_caches); |
128 | 131 | ||
129 | else { | 132 | else { |
130 | kfree(n); | 133 | kfree(s->name); |
131 | kmem_cache_free(kmem_cache, s); | 134 | kmem_cache_free(kmem_cache, s); |
132 | } | 135 | } |
133 | 136 | } else | |
134 | } else { | ||
135 | kfree(n); | ||
136 | err = -ENOMEM; | 137 | err = -ENOMEM; |
137 | } | ||
138 | 138 | ||
139 | out_locked: | 139 | out_locked: |
140 | mutex_unlock(&slab_mutex); | 140 | mutex_unlock(&slab_mutex); |
@@ -508,17 +508,15 @@ size_t ksize(const void *block) | |||
508 | } | 508 | } |
509 | EXPORT_SYMBOL(ksize); | 509 | EXPORT_SYMBOL(ksize); |
510 | 510 | ||
511 | int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, | 511 | int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) |
512 | size_t align, unsigned long flags, void (*ctor)(void *)) | ||
513 | { | 512 | { |
514 | c->name = name; | 513 | size_t align = c->size; |
515 | c->size = size; | 514 | |
516 | if (flags & SLAB_DESTROY_BY_RCU) { | 515 | if (flags & SLAB_DESTROY_BY_RCU) { |
517 | /* leave room for rcu footer at the end of object */ | 516 | /* leave room for rcu footer at the end of object */ |
518 | c->size += sizeof(struct slob_rcu); | 517 | c->size += sizeof(struct slob_rcu); |
519 | } | 518 | } |
520 | c->flags = flags; | 519 | c->flags = flags; |
521 | c->ctor = ctor; | ||
522 | /* ignore alignment unless it's forced */ | 520 | /* ignore alignment unless it's forced */ |
523 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 521 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
524 | if (c->align < ARCH_SLAB_MINALIGN) | 522 | if (c->align < ARCH_SLAB_MINALIGN) |
@@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
3029 | 3029 | ||
3030 | } | 3030 | } |
3031 | 3031 | ||
3032 | static int kmem_cache_open(struct kmem_cache *s, | 3032 | static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) |
3033 | const char *name, size_t size, | ||
3034 | size_t align, unsigned long flags, | ||
3035 | void (*ctor)(void *)) | ||
3036 | { | 3033 | { |
3037 | s->name = name; | 3034 | s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); |
3038 | s->ctor = ctor; | ||
3039 | s->object_size = size; | ||
3040 | s->align = align; | ||
3041 | s->flags = kmem_cache_flags(size, flags, name, ctor); | ||
3042 | s->reserved = 0; | 3035 | s->reserved = 0; |
3043 | 3036 | ||
3044 | if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) | 3037 | if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) |
@@ -3115,7 +3108,7 @@ error: | |||
3115 | if (flags & SLAB_PANIC) | 3108 | if (flags & SLAB_PANIC) |
3116 | panic("Cannot create slab %s size=%lu realsize=%u " | 3109 | panic("Cannot create slab %s size=%lu realsize=%u " |
3117 | "order=%u offset=%u flags=%lx\n", | 3110 | "order=%u offset=%u flags=%lx\n", |
3118 | s->name, (unsigned long)size, s->size, oo_order(s->oo), | 3111 | s->name, (unsigned long)s->size, s->size, oo_order(s->oo), |
3119 | s->offset, flags); | 3112 | s->offset, flags); |
3120 | return -EINVAL; | 3113 | return -EINVAL; |
3121 | } | 3114 | } |
@@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, | |||
3261 | 3254 | ||
3262 | s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | 3255 | s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
3263 | 3256 | ||
3257 | s->name = name; | ||
3258 | s->size = s->object_size = size; | ||
3259 | s->align = ARCH_KMALLOC_MINALIGN; | ||
3260 | |||
3264 | /* | 3261 | /* |
3265 | * This function is called with IRQs disabled during early-boot on | 3262 | * This function is called with IRQs disabled during early-boot on |
3266 | * single CPU so there's no need to take slab_mutex here. | 3263 | * single CPU so there's no need to take slab_mutex here. |
3267 | */ | 3264 | */ |
3268 | if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, | 3265 | if (kmem_cache_open(s, flags)) |
3269 | flags, NULL)) | ||
3270 | goto panic; | 3266 | goto panic; |
3271 | 3267 | ||
3272 | list_add(&s->list, &slab_caches); | 3268 | list_add(&s->list, &slab_caches); |
@@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void) | |||
3719 | */ | 3715 | */ |
3720 | kmem_cache_node = (void *)kmem_cache + kmalloc_size; | 3716 | kmem_cache_node = (void *)kmem_cache + kmalloc_size; |
3721 | 3717 | ||
3722 | kmem_cache_open(kmem_cache_node, "kmem_cache_node", | 3718 | kmem_cache_node->name = "kmem_cache_node"; |
3723 | sizeof(struct kmem_cache_node), | 3719 | kmem_cache_node->size = kmem_cache_node->object_size = |
3724 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 3720 | sizeof(struct kmem_cache_node); |
3721 | kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); | ||
3725 | 3722 | ||
3726 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); | 3723 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
3727 | 3724 | ||
@@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void) | |||
3729 | slab_state = PARTIAL; | 3726 | slab_state = PARTIAL; |
3730 | 3727 | ||
3731 | temp_kmem_cache = kmem_cache; | 3728 | temp_kmem_cache = kmem_cache; |
3732 | kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, | 3729 | kmem_cache->name = "kmem_cache"; |
3733 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 3730 | kmem_cache->size = kmem_cache->object_size = kmem_size; |
3731 | kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); | ||
3732 | |||
3734 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); | 3733 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); |
3735 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); | 3734 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); |
3736 | 3735 | ||
@@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, | |||
3943 | return s; | 3942 | return s; |
3944 | } | 3943 | } |
3945 | 3944 | ||
3946 | int __kmem_cache_create(struct kmem_cache *s, | 3945 | int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) |
3947 | const char *name, size_t size, | ||
3948 | size_t align, unsigned long flags, void (*ctor)(void *)) | ||
3949 | { | 3946 | { |
3950 | return kmem_cache_open(s, name, size, align, flags, ctor); | 3947 | return kmem_cache_open(s, flags); |
3951 | } | 3948 | } |
3952 | 3949 | ||
3953 | #ifdef CONFIG_SMP | 3950 | #ifdef CONFIG_SMP |