diff options
author | Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | 2008-08-19 13:43:25 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-11-26 09:47:25 -0500 |
commit | ce71e27c6fdc43c29f36d307b9100bde70c947fc (patch) | |
tree | 4c38611002eb3945835ed6bec78d6fb55118165a /mm/slub.c | |
parent | 210b5c06130f266370b5ff86e3cb6d860e1be29c (diff) |
SLUB: Replace __builtin_return_address(0) with _RET_IP_.
This patch replaces __builtin_return_address(0) with _RET_IP_, since a
previous patch moved _RET_IP_ and _THIS_IP_ to include/linux/kernel.h and
they're widely available now. This makes for shorter and easier to read
code.
[penberg@cs.helsinki.fi: remove _RET_IP_ casts to void pointer]
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 48 |
1 files changed, 24 insertions, 24 deletions
@@ -182,7 +182,7 @@ static LIST_HEAD(slab_caches); | |||
182 | * Tracking user of a slab. | 182 | * Tracking user of a slab. |
183 | */ | 183 | */ |
184 | struct track { | 184 | struct track { |
185 | void *addr; /* Called from address */ | 185 | unsigned long addr; /* Called from address */ |
186 | int cpu; /* Was running on cpu */ | 186 | int cpu; /* Was running on cpu */ |
187 | int pid; /* Pid context */ | 187 | int pid; /* Pid context */ |
188 | unsigned long when; /* When did the operation occur */ | 188 | unsigned long when; /* When did the operation occur */ |
@@ -371,7 +371,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
371 | } | 371 | } |
372 | 372 | ||
373 | static void set_track(struct kmem_cache *s, void *object, | 373 | static void set_track(struct kmem_cache *s, void *object, |
374 | enum track_item alloc, void *addr) | 374 | enum track_item alloc, unsigned long addr) |
375 | { | 375 | { |
376 | struct track *p; | 376 | struct track *p; |
377 | 377 | ||
@@ -395,8 +395,8 @@ static void init_tracking(struct kmem_cache *s, void *object) | |||
395 | if (!(s->flags & SLAB_STORE_USER)) | 395 | if (!(s->flags & SLAB_STORE_USER)) |
396 | return; | 396 | return; |
397 | 397 | ||
398 | set_track(s, object, TRACK_FREE, NULL); | 398 | set_track(s, object, TRACK_FREE, 0UL); |
399 | set_track(s, object, TRACK_ALLOC, NULL); | 399 | set_track(s, object, TRACK_ALLOC, 0UL); |
400 | } | 400 | } |
401 | 401 | ||
402 | static void print_track(const char *s, struct track *t) | 402 | static void print_track(const char *s, struct track *t) |
@@ -405,7 +405,7 @@ static void print_track(const char *s, struct track *t) | |||
405 | return; | 405 | return; |
406 | 406 | ||
407 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", | 407 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
408 | s, t->addr, jiffies - t->when, t->cpu, t->pid); | 408 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
409 | } | 409 | } |
410 | 410 | ||
411 | static void print_tracking(struct kmem_cache *s, void *object) | 411 | static void print_tracking(struct kmem_cache *s, void *object) |
@@ -870,7 +870,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 872 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
873 | void *object, void *addr) | 873 | void *object, unsigned long addr) |
874 | { | 874 | { |
875 | if (!check_slab(s, page)) | 875 | if (!check_slab(s, page)) |
876 | goto bad; | 876 | goto bad; |
@@ -910,7 +910,7 @@ bad: | |||
910 | } | 910 | } |
911 | 911 | ||
912 | static int free_debug_processing(struct kmem_cache *s, struct page *page, | 912 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
913 | void *object, void *addr) | 913 | void *object, unsigned long addr) |
914 | { | 914 | { |
915 | if (!check_slab(s, page)) | 915 | if (!check_slab(s, page)) |
916 | goto fail; | 916 | goto fail; |
@@ -1033,10 +1033,10 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
1033 | struct page *page, void *object) {} | 1033 | struct page *page, void *object) {} |
1034 | 1034 | ||
1035 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1035 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1036 | struct page *page, void *object, void *addr) { return 0; } | 1036 | struct page *page, void *object, unsigned long addr) { return 0; } |
1037 | 1037 | ||
1038 | static inline int free_debug_processing(struct kmem_cache *s, | 1038 | static inline int free_debug_processing(struct kmem_cache *s, |
1039 | struct page *page, void *object, void *addr) { return 0; } | 1039 | struct page *page, void *object, unsigned long addr) { return 0; } |
1040 | 1040 | ||
1041 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1041 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1042 | { return 1; } | 1042 | { return 1; } |
@@ -1503,8 +1503,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1503 | * we need to allocate a new slab. This is the slowest path since it involves | 1503 | * we need to allocate a new slab. This is the slowest path since it involves |
1504 | * a call to the page allocator and the setup of a new slab. | 1504 | * a call to the page allocator and the setup of a new slab. |
1505 | */ | 1505 | */ |
1506 | static void *__slab_alloc(struct kmem_cache *s, | 1506 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
1507 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) | 1507 | unsigned long addr, struct kmem_cache_cpu *c) |
1508 | { | 1508 | { |
1509 | void **object; | 1509 | void **object; |
1510 | struct page *new; | 1510 | struct page *new; |
@@ -1588,7 +1588,7 @@ debug: | |||
1588 | * Otherwise we can simply pick the next object from the lockless free list. | 1588 | * Otherwise we can simply pick the next object from the lockless free list. |
1589 | */ | 1589 | */ |
1590 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 1590 | static __always_inline void *slab_alloc(struct kmem_cache *s, |
1591 | gfp_t gfpflags, int node, void *addr) | 1591 | gfp_t gfpflags, int node, unsigned long addr) |
1592 | { | 1592 | { |
1593 | void **object; | 1593 | void **object; |
1594 | struct kmem_cache_cpu *c; | 1594 | struct kmem_cache_cpu *c; |
@@ -1617,14 +1617,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1617 | 1617 | ||
1618 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1618 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1619 | { | 1619 | { |
1620 | return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); | 1620 | return slab_alloc(s, gfpflags, -1, _RET_IP_); |
1621 | } | 1621 | } |
1622 | EXPORT_SYMBOL(kmem_cache_alloc); | 1622 | EXPORT_SYMBOL(kmem_cache_alloc); |
1623 | 1623 | ||
1624 | #ifdef CONFIG_NUMA | 1624 | #ifdef CONFIG_NUMA |
1625 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1625 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1626 | { | 1626 | { |
1627 | return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); | 1627 | return slab_alloc(s, gfpflags, node, _RET_IP_); |
1628 | } | 1628 | } |
1629 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1629 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1630 | #endif | 1630 | #endif |
@@ -1638,7 +1638,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); | |||
1638 | * handling required then we can return immediately. | 1638 | * handling required then we can return immediately. |
1639 | */ | 1639 | */ |
1640 | static void __slab_free(struct kmem_cache *s, struct page *page, | 1640 | static void __slab_free(struct kmem_cache *s, struct page *page, |
1641 | void *x, void *addr, unsigned int offset) | 1641 | void *x, unsigned long addr, unsigned int offset) |
1642 | { | 1642 | { |
1643 | void *prior; | 1643 | void *prior; |
1644 | void **object = (void *)x; | 1644 | void **object = (void *)x; |
@@ -1708,7 +1708,7 @@ debug: | |||
1708 | * with all sorts of special processing. | 1708 | * with all sorts of special processing. |
1709 | */ | 1709 | */ |
1710 | static __always_inline void slab_free(struct kmem_cache *s, | 1710 | static __always_inline void slab_free(struct kmem_cache *s, |
1711 | struct page *page, void *x, void *addr) | 1711 | struct page *page, void *x, unsigned long addr) |
1712 | { | 1712 | { |
1713 | void **object = (void *)x; | 1713 | void **object = (void *)x; |
1714 | struct kmem_cache_cpu *c; | 1714 | struct kmem_cache_cpu *c; |
@@ -1735,7 +1735,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1735 | 1735 | ||
1736 | page = virt_to_head_page(x); | 1736 | page = virt_to_head_page(x); |
1737 | 1737 | ||
1738 | slab_free(s, page, x, __builtin_return_address(0)); | 1738 | slab_free(s, page, x, _RET_IP_); |
1739 | } | 1739 | } |
1740 | EXPORT_SYMBOL(kmem_cache_free); | 1740 | EXPORT_SYMBOL(kmem_cache_free); |
1741 | 1741 | ||
@@ -2663,7 +2663,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2663 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2663 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2664 | return s; | 2664 | return s; |
2665 | 2665 | ||
2666 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2666 | return slab_alloc(s, flags, -1, _RET_IP_); |
2667 | } | 2667 | } |
2668 | EXPORT_SYMBOL(__kmalloc); | 2668 | EXPORT_SYMBOL(__kmalloc); |
2669 | 2669 | ||
@@ -2691,7 +2691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2691 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2691 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2692 | return s; | 2692 | return s; |
2693 | 2693 | ||
2694 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2694 | return slab_alloc(s, flags, node, _RET_IP_); |
2695 | } | 2695 | } |
2696 | EXPORT_SYMBOL(__kmalloc_node); | 2696 | EXPORT_SYMBOL(__kmalloc_node); |
2697 | #endif | 2697 | #endif |
@@ -2748,7 +2748,7 @@ void kfree(const void *x) | |||
2748 | put_page(page); | 2748 | put_page(page); |
2749 | return; | 2749 | return; |
2750 | } | 2750 | } |
2751 | slab_free(page->slab, page, object, __builtin_return_address(0)); | 2751 | slab_free(page->slab, page, object, _RET_IP_); |
2752 | } | 2752 | } |
2753 | EXPORT_SYMBOL(kfree); | 2753 | EXPORT_SYMBOL(kfree); |
2754 | 2754 | ||
@@ -3204,7 +3204,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3204 | 3204 | ||
3205 | #endif | 3205 | #endif |
3206 | 3206 | ||
3207 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 3207 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3208 | { | 3208 | { |
3209 | struct kmem_cache *s; | 3209 | struct kmem_cache *s; |
3210 | 3210 | ||
@@ -3220,7 +3220,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
3220 | } | 3220 | } |
3221 | 3221 | ||
3222 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3222 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3223 | int node, void *caller) | 3223 | int node, unsigned long caller) |
3224 | { | 3224 | { |
3225 | struct kmem_cache *s; | 3225 | struct kmem_cache *s; |
3226 | 3226 | ||
@@ -3431,7 +3431,7 @@ static void resiliency_test(void) {}; | |||
3431 | 3431 | ||
3432 | struct location { | 3432 | struct location { |
3433 | unsigned long count; | 3433 | unsigned long count; |
3434 | void *addr; | 3434 | unsigned long addr; |
3435 | long long sum_time; | 3435 | long long sum_time; |
3436 | long min_time; | 3436 | long min_time; |
3437 | long max_time; | 3437 | long max_time; |
@@ -3479,7 +3479,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3479 | { | 3479 | { |
3480 | long start, end, pos; | 3480 | long start, end, pos; |
3481 | struct location *l; | 3481 | struct location *l; |
3482 | void *caddr; | 3482 | unsigned long caddr; |
3483 | unsigned long age = jiffies - track->when; | 3483 | unsigned long age = jiffies - track->when; |
3484 | 3484 | ||
3485 | start = -1; | 3485 | start = -1; |