diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 11 | ||||
-rw-r--r-- | mm/slob.c | 6 | ||||
-rw-r--r-- | mm/slub.c | 12 |
5 files changed, 16 insertions, 19 deletions
@@ -137,8 +137,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) | |||
137 | anon_vma_free(anon_vma); | 137 | anon_vma_free(anon_vma); |
138 | } | 138 | } |
139 | 139 | ||
140 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, | 140 | static void anon_vma_ctor(struct kmem_cache *cachep, void *data) |
141 | unsigned long flags) | ||
142 | { | 141 | { |
143 | struct anon_vma *anon_vma = data; | 142 | struct anon_vma *anon_vma = data; |
144 | 143 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 2f039f32031f..204865750fe4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2328,8 +2328,7 @@ static void shmem_destroy_inode(struct inode *inode) | |||
2328 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | 2328 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); |
2329 | } | 2329 | } |
2330 | 2330 | ||
2331 | static void init_once(void *foo, struct kmem_cache *cachep, | 2331 | static void init_once(struct kmem_cache *cachep, void *foo) |
2332 | unsigned long flags) | ||
2333 | { | 2332 | { |
2334 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | 2333 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; |
2335 | 2334 | ||
@@ -408,7 +408,7 @@ struct kmem_cache { | |||
408 | unsigned int dflags; /* dynamic flags */ | 408 | unsigned int dflags; /* dynamic flags */ |
409 | 409 | ||
410 | /* constructor func */ | 410 | /* constructor func */ |
411 | void (*ctor) (void *, struct kmem_cache *, unsigned long); | 411 | void (*ctor)(struct kmem_cache *, void *); |
412 | 412 | ||
413 | /* 5) cache creation/removal */ | 413 | /* 5) cache creation/removal */ |
414 | const char *name; | 414 | const char *name; |
@@ -2129,7 +2129,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2129 | struct kmem_cache * | 2129 | struct kmem_cache * |
2130 | kmem_cache_create (const char *name, size_t size, size_t align, | 2130 | kmem_cache_create (const char *name, size_t size, size_t align, |
2131 | unsigned long flags, | 2131 | unsigned long flags, |
2132 | void (*ctor)(void*, struct kmem_cache *, unsigned long)) | 2132 | void (*ctor)(struct kmem_cache *, void *)) |
2133 | { | 2133 | { |
2134 | size_t left_over, slab_size, ralign; | 2134 | size_t left_over, slab_size, ralign; |
2135 | struct kmem_cache *cachep = NULL, *pc; | 2135 | struct kmem_cache *cachep = NULL, *pc; |
@@ -2636,8 +2636,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2636 | * They must also be threaded. | 2636 | * They must also be threaded. |
2637 | */ | 2637 | */ |
2638 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 2638 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) |
2639 | cachep->ctor(objp + obj_offset(cachep), cachep, | 2639 | cachep->ctor(cachep, objp + obj_offset(cachep)); |
2640 | 0); | ||
2641 | 2640 | ||
2642 | if (cachep->flags & SLAB_RED_ZONE) { | 2641 | if (cachep->flags & SLAB_RED_ZONE) { |
2643 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 2642 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
@@ -2653,7 +2652,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2653 | cachep->buffer_size / PAGE_SIZE, 0); | 2652 | cachep->buffer_size / PAGE_SIZE, 0); |
2654 | #else | 2653 | #else |
2655 | if (cachep->ctor) | 2654 | if (cachep->ctor) |
2656 | cachep->ctor(objp, cachep, 0); | 2655 | cachep->ctor(cachep, objp); |
2657 | #endif | 2656 | #endif |
2658 | slab_bufctl(slabp)[i] = i + 1; | 2657 | slab_bufctl(slabp)[i] = i + 1; |
2659 | } | 2658 | } |
@@ -3078,7 +3077,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3078 | #endif | 3077 | #endif |
3079 | objp += obj_offset(cachep); | 3078 | objp += obj_offset(cachep); |
3080 | if (cachep->ctor && cachep->flags & SLAB_POISON) | 3079 | if (cachep->ctor && cachep->flags & SLAB_POISON) |
3081 | cachep->ctor(objp, cachep, 0); | 3080 | cachep->ctor(cachep, objp); |
3082 | #if ARCH_SLAB_MINALIGN | 3081 | #if ARCH_SLAB_MINALIGN |
3083 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { | 3082 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { |
3084 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 3083 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", |
@@ -499,12 +499,12 @@ struct kmem_cache { | |||
499 | unsigned int size, align; | 499 | unsigned int size, align; |
500 | unsigned long flags; | 500 | unsigned long flags; |
501 | const char *name; | 501 | const char *name; |
502 | void (*ctor)(void *, struct kmem_cache *, unsigned long); | 502 | void (*ctor)(struct kmem_cache *, void *); |
503 | }; | 503 | }; |
504 | 504 | ||
505 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 505 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
506 | size_t align, unsigned long flags, | 506 | size_t align, unsigned long flags, |
507 | void (*ctor)(void*, struct kmem_cache *, unsigned long)) | 507 | void (*ctor)(struct kmem_cache *, void *)) |
508 | { | 508 | { |
509 | struct kmem_cache *c; | 509 | struct kmem_cache *c; |
510 | 510 | ||
@@ -548,7 +548,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
548 | b = slob_new_page(flags, get_order(c->size), node); | 548 | b = slob_new_page(flags, get_order(c->size), node); |
549 | 549 | ||
550 | if (c->ctor) | 550 | if (c->ctor) |
551 | c->ctor(b, c, 0); | 551 | c->ctor(c, b); |
552 | 552 | ||
553 | return b; | 553 | return b; |
554 | } | 554 | } |
@@ -980,7 +980,7 @@ __setup("slub_debug", setup_slub_debug); | |||
980 | 980 | ||
981 | static unsigned long kmem_cache_flags(unsigned long objsize, | 981 | static unsigned long kmem_cache_flags(unsigned long objsize, |
982 | unsigned long flags, const char *name, | 982 | unsigned long flags, const char *name, |
983 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | 983 | void (*ctor)(struct kmem_cache *, void *)) |
984 | { | 984 | { |
985 | /* | 985 | /* |
986 | * The page->offset field is only 16 bit wide. This is an offset | 986 | * The page->offset field is only 16 bit wide. This is an offset |
@@ -1027,7 +1027,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page, | |||
1027 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | 1027 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
1028 | static inline unsigned long kmem_cache_flags(unsigned long objsize, | 1028 | static inline unsigned long kmem_cache_flags(unsigned long objsize, |
1029 | unsigned long flags, const char *name, | 1029 | unsigned long flags, const char *name, |
1030 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | 1030 | void (*ctor)(struct kmem_cache *, void *)) |
1031 | { | 1031 | { |
1032 | return flags; | 1032 | return flags; |
1033 | } | 1033 | } |
@@ -1071,7 +1071,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, | |||
1071 | { | 1071 | { |
1072 | setup_object_debug(s, page, object); | 1072 | setup_object_debug(s, page, object); |
1073 | if (unlikely(s->ctor)) | 1073 | if (unlikely(s->ctor)) |
1074 | s->ctor(object, s, 0); | 1074 | s->ctor(s, object); |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | 1077 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
@@ -2211,7 +2211,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2211 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | 2211 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, |
2212 | const char *name, size_t size, | 2212 | const char *name, size_t size, |
2213 | size_t align, unsigned long flags, | 2213 | size_t align, unsigned long flags, |
2214 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | 2214 | void (*ctor)(struct kmem_cache *, void *)) |
2215 | { | 2215 | { |
2216 | memset(s, 0, kmem_size); | 2216 | memset(s, 0, kmem_size); |
2217 | s->name = name; | 2217 | s->name = name; |
@@ -2801,7 +2801,7 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
2801 | 2801 | ||
2802 | static struct kmem_cache *find_mergeable(size_t size, | 2802 | static struct kmem_cache *find_mergeable(size_t size, |
2803 | size_t align, unsigned long flags, const char *name, | 2803 | size_t align, unsigned long flags, const char *name, |
2804 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | 2804 | void (*ctor)(struct kmem_cache *, void *)) |
2805 | { | 2805 | { |
2806 | struct kmem_cache *s; | 2806 | struct kmem_cache *s; |
2807 | 2807 | ||
@@ -2842,7 +2842,7 @@ static struct kmem_cache *find_mergeable(size_t size, | |||
2842 | 2842 | ||
2843 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 2843 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
2844 | size_t align, unsigned long flags, | 2844 | size_t align, unsigned long flags, |
2845 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | 2845 | void (*ctor)(struct kmem_cache *, void *)) |
2846 | { | 2846 | { |
2847 | struct kmem_cache *s; | 2847 | struct kmem_cache *s; |
2848 | 2848 | ||