aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>2008-08-19 13:43:25 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-12-29 08:33:59 -0500
commit35995a4d815586bc968a857f7235707940a2f755 (patch)
treebbee251d5e4fc027a07cb86b3428ba5f08dffab3
parented313489badef16d700f5a3be50e8fd8f8294bc8 (diff)
SLUB: Replace __builtin_return_address(0) with _RET_IP_.
This patch replaces __builtin_return_address(0) with _RET_IP_, since a previous patch moved _RET_IP_ and _THIS_IP_ to include/linux/kernel.h and they're widely available now. This makes for shorter and easier to read code. [penberg@cs.helsinki.fi: remove _RET_IP_ casts to void pointer] Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--include/linux/slab.h8
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slub.c48
3 files changed, 32 insertions, 32 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 000da12b5cf0..c97ed28559ec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
253 * request comes from. 253 * request comes from.
254 */ 254 */
255#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 255#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
256extern void *__kmalloc_track_caller(size_t, gfp_t, void*); 256extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
257#define kmalloc_track_caller(size, flags) \ 257#define kmalloc_track_caller(size, flags) \
258 __kmalloc_track_caller(size, flags, __builtin_return_address(0)) 258 __kmalloc_track_caller(size, flags, _RET_IP_)
259#else 259#else
260#define kmalloc_track_caller(size, flags) \ 260#define kmalloc_track_caller(size, flags) \
261 __kmalloc(size, flags) 261 __kmalloc(size, flags)
@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
271 * allocation request comes from. 271 * allocation request comes from.
272 */ 272 */
273#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 273#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
274extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 274extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
275#define kmalloc_node_track_caller(size, flags, node) \ 275#define kmalloc_node_track_caller(size, flags, node) \
276 __kmalloc_node_track_caller(size, flags, node, \ 276 __kmalloc_node_track_caller(size, flags, node, \
277 __builtin_return_address(0)) 277 _RET_IP_)
278#else 278#else
279#define kmalloc_node_track_caller(size, flags, node) \ 279#define kmalloc_node_track_caller(size, flags, node) \
280 __kmalloc_node(size, flags, node) 280 __kmalloc_node(size, flags, node)
diff --git a/mm/slab.c b/mm/slab.c
index 09187517f9dc..a14787799014 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3686,9 +3686,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3686EXPORT_SYMBOL(__kmalloc_node); 3686EXPORT_SYMBOL(__kmalloc_node);
3687 3687
3688void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3688void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3689 int node, void *caller) 3689 int node, unsigned long caller)
3690{ 3690{
3691 return __do_kmalloc_node(size, flags, node, caller); 3691 return __do_kmalloc_node(size, flags, node, (void *)caller);
3692} 3692}
3693EXPORT_SYMBOL(__kmalloc_node_track_caller); 3693EXPORT_SYMBOL(__kmalloc_node_track_caller);
3694#else 3694#else
@@ -3730,9 +3730,9 @@ void *__kmalloc(size_t size, gfp_t flags)
3730} 3730}
3731EXPORT_SYMBOL(__kmalloc); 3731EXPORT_SYMBOL(__kmalloc);
3732 3732
3733void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3733void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3734{ 3734{
3735 return __do_kmalloc(size, flags, caller); 3735 return __do_kmalloc(size, flags, (void *)caller);
3736} 3736}
3737EXPORT_SYMBOL(__kmalloc_track_caller); 3737EXPORT_SYMBOL(__kmalloc_track_caller);
3738 3738
diff --git a/mm/slub.c b/mm/slub.c
index 7ad489af9561..06da86654875 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -178,7 +178,7 @@ static LIST_HEAD(slab_caches);
178 * Tracking user of a slab. 178 * Tracking user of a slab.
179 */ 179 */
180struct track { 180struct track {
181 void *addr; /* Called from address */ 181 unsigned long addr; /* Called from address */
182 int cpu; /* Was running on cpu */ 182 int cpu; /* Was running on cpu */
183 int pid; /* Pid context */ 183 int pid; /* Pid context */
184 unsigned long when; /* When did the operation occur */ 184 unsigned long when; /* When did the operation occur */
@@ -367,7 +367,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
367} 367}
368 368
369static void set_track(struct kmem_cache *s, void *object, 369static void set_track(struct kmem_cache *s, void *object,
370 enum track_item alloc, void *addr) 370 enum track_item alloc, unsigned long addr)
371{ 371{
372 struct track *p; 372 struct track *p;
373 373
@@ -391,8 +391,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
391 if (!(s->flags & SLAB_STORE_USER)) 391 if (!(s->flags & SLAB_STORE_USER))
392 return; 392 return;
393 393
394 set_track(s, object, TRACK_FREE, NULL); 394 set_track(s, object, TRACK_FREE, 0UL);
395 set_track(s, object, TRACK_ALLOC, NULL); 395 set_track(s, object, TRACK_ALLOC, 0UL);
396} 396}
397 397
398static void print_track(const char *s, struct track *t) 398static void print_track(const char *s, struct track *t)
@@ -401,7 +401,7 @@ static void print_track(const char *s, struct track *t)
401 return; 401 return;
402 402
403 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 403 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
404 s, t->addr, jiffies - t->when, t->cpu, t->pid); 404 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
405} 405}
406 406
407static void print_tracking(struct kmem_cache *s, void *object) 407static void print_tracking(struct kmem_cache *s, void *object)
@@ -866,7 +866,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
866} 866}
867 867
868static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 868static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
869 void *object, void *addr) 869 void *object, unsigned long addr)
870{ 870{
871 if (!check_slab(s, page)) 871 if (!check_slab(s, page))
872 goto bad; 872 goto bad;
@@ -906,7 +906,7 @@ bad:
906} 906}
907 907
908static int free_debug_processing(struct kmem_cache *s, struct page *page, 908static int free_debug_processing(struct kmem_cache *s, struct page *page,
909 void *object, void *addr) 909 void *object, unsigned long addr)
910{ 910{
911 if (!check_slab(s, page)) 911 if (!check_slab(s, page))
912 goto fail; 912 goto fail;
@@ -1029,10 +1029,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
1029 struct page *page, void *object) {} 1029 struct page *page, void *object) {}
1030 1030
1031static inline int alloc_debug_processing(struct kmem_cache *s, 1031static inline int alloc_debug_processing(struct kmem_cache *s,
1032 struct page *page, void *object, void *addr) { return 0; } 1032 struct page *page, void *object, unsigned long addr) { return 0; }
1033 1033
1034static inline int free_debug_processing(struct kmem_cache *s, 1034static inline int free_debug_processing(struct kmem_cache *s,
1035 struct page *page, void *object, void *addr) { return 0; } 1035 struct page *page, void *object, unsigned long addr) { return 0; }
1036 1036
1037static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1037static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1038 { return 1; } 1038 { return 1; }
@@ -1499,8 +1499,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1499 * we need to allocate a new slab. This is the slowest path since it involves 1499 * we need to allocate a new slab. This is the slowest path since it involves
1500 * a call to the page allocator and the setup of a new slab. 1500 * a call to the page allocator and the setup of a new slab.
1501 */ 1501 */
1502static void *__slab_alloc(struct kmem_cache *s, 1502static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1503 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1503 unsigned long addr, struct kmem_cache_cpu *c)
1504{ 1504{
1505 void **object; 1505 void **object;
1506 struct page *new; 1506 struct page *new;
@@ -1584,7 +1584,7 @@ debug:
1584 * Otherwise we can simply pick the next object from the lockless free list. 1584 * Otherwise we can simply pick the next object from the lockless free list.
1585 */ 1585 */
1586static __always_inline void *slab_alloc(struct kmem_cache *s, 1586static __always_inline void *slab_alloc(struct kmem_cache *s,
1587 gfp_t gfpflags, int node, void *addr) 1587 gfp_t gfpflags, int node, unsigned long addr)
1588{ 1588{
1589 void **object; 1589 void **object;
1590 struct kmem_cache_cpu *c; 1590 struct kmem_cache_cpu *c;
@@ -1613,14 +1613,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1613 1613
1614void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1614void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1615{ 1615{
1616 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1616 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1617} 1617}
1618EXPORT_SYMBOL(kmem_cache_alloc); 1618EXPORT_SYMBOL(kmem_cache_alloc);
1619 1619
1620#ifdef CONFIG_NUMA 1620#ifdef CONFIG_NUMA
1621void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1621void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1622{ 1622{
1623 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1623 return slab_alloc(s, gfpflags, node, _RET_IP_);
1624} 1624}
1625EXPORT_SYMBOL(kmem_cache_alloc_node); 1625EXPORT_SYMBOL(kmem_cache_alloc_node);
1626#endif 1626#endif
@@ -1634,7 +1634,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1634 * handling required then we can return immediately. 1634 * handling required then we can return immediately.
1635 */ 1635 */
1636static void __slab_free(struct kmem_cache *s, struct page *page, 1636static void __slab_free(struct kmem_cache *s, struct page *page,
1637 void *x, void *addr, unsigned int offset) 1637 void *x, unsigned long addr, unsigned int offset)
1638{ 1638{
1639 void *prior; 1639 void *prior;
1640 void **object = (void *)x; 1640 void **object = (void *)x;
@@ -1704,7 +1704,7 @@ debug:
1704 * with all sorts of special processing. 1704 * with all sorts of special processing.
1705 */ 1705 */
1706static __always_inline void slab_free(struct kmem_cache *s, 1706static __always_inline void slab_free(struct kmem_cache *s,
1707 struct page *page, void *x, void *addr) 1707 struct page *page, void *x, unsigned long addr)
1708{ 1708{
1709 void **object = (void *)x; 1709 void **object = (void *)x;
1710 struct kmem_cache_cpu *c; 1710 struct kmem_cache_cpu *c;
@@ -1731,7 +1731,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1731 1731
1732 page = virt_to_head_page(x); 1732 page = virt_to_head_page(x);
1733 1733
1734 slab_free(s, page, x, __builtin_return_address(0)); 1734 slab_free(s, page, x, _RET_IP_);
1735} 1735}
1736EXPORT_SYMBOL(kmem_cache_free); 1736EXPORT_SYMBOL(kmem_cache_free);
1737 1737
@@ -2659,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2659 if (unlikely(ZERO_OR_NULL_PTR(s))) 2659 if (unlikely(ZERO_OR_NULL_PTR(s)))
2660 return s; 2660 return s;
2661 2661
2662 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2662 return slab_alloc(s, flags, -1, _RET_IP_);
2663} 2663}
2664EXPORT_SYMBOL(__kmalloc); 2664EXPORT_SYMBOL(__kmalloc);
2665 2665
@@ -2687,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2687 if (unlikely(ZERO_OR_NULL_PTR(s))) 2687 if (unlikely(ZERO_OR_NULL_PTR(s)))
2688 return s; 2688 return s;
2689 2689
2690 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2690 return slab_alloc(s, flags, node, _RET_IP_);
2691} 2691}
2692EXPORT_SYMBOL(__kmalloc_node); 2692EXPORT_SYMBOL(__kmalloc_node);
2693#endif 2693#endif
@@ -2744,7 +2744,7 @@ void kfree(const void *x)
2744 put_page(page); 2744 put_page(page);
2745 return; 2745 return;
2746 } 2746 }
2747 slab_free(page->slab, page, object, __builtin_return_address(0)); 2747 slab_free(page->slab, page, object, _RET_IP_);
2748} 2748}
2749EXPORT_SYMBOL(kfree); 2749EXPORT_SYMBOL(kfree);
2750 2750
@@ -3200,7 +3200,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3200 3200
3201#endif 3201#endif
3202 3202
3203void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3203void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3204{ 3204{
3205 struct kmem_cache *s; 3205 struct kmem_cache *s;
3206 3206
@@ -3216,7 +3216,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3216} 3216}
3217 3217
3218void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3218void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3219 int node, void *caller) 3219 int node, unsigned long caller)
3220{ 3220{
3221 struct kmem_cache *s; 3221 struct kmem_cache *s;
3222 3222
@@ -3427,7 +3427,7 @@ static void resiliency_test(void) {};
3427 3427
3428struct location { 3428struct location {
3429 unsigned long count; 3429 unsigned long count;
3430 void *addr; 3430 unsigned long addr;
3431 long long sum_time; 3431 long long sum_time;
3432 long min_time; 3432 long min_time;
3433 long max_time; 3433 long max_time;
@@ -3475,7 +3475,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3475{ 3475{
3476 long start, end, pos; 3476 long start, end, pos;
3477 struct location *l; 3477 struct location *l;
3478 void *caddr; 3478 unsigned long caddr;
3479 unsigned long age = jiffies - track->when; 3479 unsigned long age = jiffies - track->when;
3480 3480
3481 start = -1; 3481 start = -1;