diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:01 -0400 |
commit | ce15fea8274acca06daa1674322d37a7d3f0036b (patch) | |
tree | ade273da0bfdc0eadb176d847012ce1656b75c93 /mm | |
parent | 12ad6843dd145050231ec5a27fe326c2085f9095 (diff) |
SLUB: Do not use length parameter in slab_alloc()
We can get to the length of the object through the kmem_cache_structure. The
additional parameter does no good and causes the compiler to generate bad
code.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 20 |
1 files changed, 9 insertions, 11 deletions
@@ -1541,7 +1541,7 @@ debug: | |||
1541 | * Otherwise we can simply pick the next object from the lockless free list. | 1541 | * Otherwise we can simply pick the next object from the lockless free list. |
1542 | */ | 1542 | */ |
1543 | static void __always_inline *slab_alloc(struct kmem_cache *s, | 1543 | static void __always_inline *slab_alloc(struct kmem_cache *s, |
1544 | gfp_t gfpflags, int node, void *addr, int length) | 1544 | gfp_t gfpflags, int node, void *addr) |
1545 | { | 1545 | { |
1546 | struct page *page; | 1546 | struct page *page; |
1547 | void **object; | 1547 | void **object; |
@@ -1561,23 +1561,21 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, | |||
1561 | local_irq_restore(flags); | 1561 | local_irq_restore(flags); |
1562 | 1562 | ||
1563 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1563 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
1564 | memset(object, 0, length); | 1564 | memset(object, 0, s->objsize); |
1565 | 1565 | ||
1566 | return object; | 1566 | return object; |
1567 | } | 1567 | } |
1568 | 1568 | ||
1569 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1569 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1570 | { | 1570 | { |
1571 | return slab_alloc(s, gfpflags, -1, | 1571 | return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); |
1572 | __builtin_return_address(0), s->objsize); | ||
1573 | } | 1572 | } |
1574 | EXPORT_SYMBOL(kmem_cache_alloc); | 1573 | EXPORT_SYMBOL(kmem_cache_alloc); |
1575 | 1574 | ||
1576 | #ifdef CONFIG_NUMA | 1575 | #ifdef CONFIG_NUMA |
1577 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1576 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1578 | { | 1577 | { |
1579 | return slab_alloc(s, gfpflags, node, | 1578 | return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); |
1580 | __builtin_return_address(0), s->objsize); | ||
1581 | } | 1579 | } |
1582 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1580 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1583 | #endif | 1581 | #endif |
@@ -2369,7 +2367,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2369 | if (ZERO_OR_NULL_PTR(s)) | 2367 | if (ZERO_OR_NULL_PTR(s)) |
2370 | return s; | 2368 | return s; |
2371 | 2369 | ||
2372 | return slab_alloc(s, flags, -1, __builtin_return_address(0), size); | 2370 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); |
2373 | } | 2371 | } |
2374 | EXPORT_SYMBOL(__kmalloc); | 2372 | EXPORT_SYMBOL(__kmalloc); |
2375 | 2373 | ||
@@ -2381,7 +2379,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2381 | if (ZERO_OR_NULL_PTR(s)) | 2379 | if (ZERO_OR_NULL_PTR(s)) |
2382 | return s; | 2380 | return s; |
2383 | 2381 | ||
2384 | return slab_alloc(s, flags, node, __builtin_return_address(0), size); | 2382 | return slab_alloc(s, flags, node, __builtin_return_address(0)); |
2385 | } | 2383 | } |
2386 | EXPORT_SYMBOL(__kmalloc_node); | 2384 | EXPORT_SYMBOL(__kmalloc_node); |
2387 | #endif | 2385 | #endif |
@@ -2712,7 +2710,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) | |||
2712 | { | 2710 | { |
2713 | void *x; | 2711 | void *x; |
2714 | 2712 | ||
2715 | x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0); | 2713 | x = slab_alloc(s, flags, -1, __builtin_return_address(0)); |
2716 | if (x) | 2714 | if (x) |
2717 | memset(x, 0, s->objsize); | 2715 | memset(x, 0, s->objsize); |
2718 | return x; | 2716 | return x; |
@@ -2762,7 +2760,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2762 | if (ZERO_OR_NULL_PTR(s)) | 2760 | if (ZERO_OR_NULL_PTR(s)) |
2763 | return s; | 2761 | return s; |
2764 | 2762 | ||
2765 | return slab_alloc(s, gfpflags, -1, caller, size); | 2763 | return slab_alloc(s, gfpflags, -1, caller); |
2766 | } | 2764 | } |
2767 | 2765 | ||
2768 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 2766 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
@@ -2773,7 +2771,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2773 | if (ZERO_OR_NULL_PTR(s)) | 2771 | if (ZERO_OR_NULL_PTR(s)) |
2774 | return s; | 2772 | return s; |
2775 | 2773 | ||
2776 | return slab_alloc(s, gfpflags, node, caller, size); | 2774 | return slab_alloc(s, gfpflags, node, caller); |
2777 | } | 2775 | } |
2778 | 2776 | ||
2779 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) | 2777 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |