aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 06:05:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 06:05:25 -0400
commit6c009ecef8cca28c7c09eb16d0802e37915a76e1 (patch)
tree11c773f780186fdb9fbc9c80a73fb7c8426b1fba /mm
parent98c2aaf8be5baf7193be37fb28bce8e7327158bc (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into perfcounters/core
Merge reason: need the upstream facility added by: 7f1e2ca: hrtimer: fix rq->lock inversion (again) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/backing-dev.c10
-rw-r--r--mm/failslab.c1
-rw-r--r--mm/slab.c26
-rw-r--r--mm/slob.c30
-rw-r--r--mm/slub.c32
-rw-r--r--mm/util.c16
7 files changed, 61 insertions, 56 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 139d5b7b6621..dfdee6a47359 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -31,7 +31,7 @@ static void percpu_depopulate(void *__pdata, int cpu)
31 * @__pdata: per-cpu data to depopulate 31 * @__pdata: per-cpu data to depopulate
32 * @mask: depopulate per-cpu data for cpu's selected through mask bits 32 * @mask: depopulate per-cpu data for cpu's selected through mask bits
33 */ 33 */
34static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) 34static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
35{ 35{
36 int cpu; 36 int cpu;
37 for_each_cpu_mask_nr(cpu, *mask) 37 for_each_cpu_mask_nr(cpu, *mask)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index be68c956a660..493b468a5035 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -284,12 +284,12 @@ static wait_queue_head_t congestion_wqh[2] = {
284 }; 284 };
285 285
286 286
287void clear_bdi_congested(struct backing_dev_info *bdi, int rw) 287void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
288{ 288{
289 enum bdi_state bit; 289 enum bdi_state bit;
290 wait_queue_head_t *wqh = &congestion_wqh[rw]; 290 wait_queue_head_t *wqh = &congestion_wqh[sync];
291 291
292 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; 292 bit = sync ? BDI_sync_congested : BDI_async_congested;
293 clear_bit(bit, &bdi->state); 293 clear_bit(bit, &bdi->state);
294 smp_mb__after_clear_bit(); 294 smp_mb__after_clear_bit();
295 if (waitqueue_active(wqh)) 295 if (waitqueue_active(wqh))
@@ -297,11 +297,11 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
297} 297}
298EXPORT_SYMBOL(clear_bdi_congested); 298EXPORT_SYMBOL(clear_bdi_congested);
299 299
300void set_bdi_congested(struct backing_dev_info *bdi, int rw) 300void set_bdi_congested(struct backing_dev_info *bdi, int sync)
301{ 301{
302 enum bdi_state bit; 302 enum bdi_state bit;
303 303
304 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; 304 bit = sync ? BDI_sync_congested : BDI_async_congested;
305 set_bit(bit, &bdi->state); 305 set_bit(bit, &bdi->state);
306} 306}
307EXPORT_SYMBOL(set_bdi_congested); 307EXPORT_SYMBOL(set_bdi_congested);
diff --git a/mm/failslab.c b/mm/failslab.c
index 7c6ea6493f80..9339de5f0a91 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -1,4 +1,5 @@
1#include <linux/fault-inject.h> 1#include <linux/fault-inject.h>
2#include <linux/gfp.h>
2 3
3static struct { 4static struct {
4 struct fault_attr attr; 5 struct fault_attr attr;
diff --git a/mm/slab.c b/mm/slab.c
index 4fc1761c6dc8..9a90b00d2f91 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3565,8 +3565,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3565{ 3565{
3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567 3567
3568 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 3568 trace_kmem_cache_alloc(_RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags); 3569 obj_size(cachep), cachep->buffer_size, flags);
3570 3570
3571 return ret; 3571 return ret;
3572} 3572}
@@ -3627,9 +3627,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3627 void *ret = __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3628 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629 3629
3630 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 3630 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size, 3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid); 3632 flags, nodeid);
3633 3633
3634 return ret; 3634 return ret;
3635} 3635}
@@ -3657,9 +3657,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3657 return cachep; 3657 return cachep;
3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659 3659
3660 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 3660 trace_kmalloc_node((unsigned long) caller, ret,
3661 (unsigned long) caller, ret, 3661 size, cachep->buffer_size, flags, node);
3662 size, cachep->buffer_size, flags, node);
3663 3662
3664 return ret; 3663 return ret;
3665} 3664}
@@ -3709,9 +3708,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3709 return cachep; 3708 return cachep;
3710 ret = __cache_alloc(cachep, flags, caller); 3709 ret = __cache_alloc(cachep, flags, caller);
3711 3710
3712 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, 3711 trace_kmalloc((unsigned long) caller, ret,
3713 (unsigned long) caller, ret, 3712 size, cachep->buffer_size, flags);
3714 size, cachep->buffer_size, flags);
3715 3713
3716 return ret; 3714 return ret;
3717} 3715}
@@ -3757,7 +3755,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3757 __cache_free(cachep, objp); 3755 __cache_free(cachep, objp);
3758 local_irq_restore(flags); 3756 local_irq_restore(flags);
3759 3757
3760 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); 3758 trace_kmem_cache_free(_RET_IP_, objp);
3761} 3759}
3762EXPORT_SYMBOL(kmem_cache_free); 3760EXPORT_SYMBOL(kmem_cache_free);
3763 3761
@@ -3775,6 +3773,8 @@ void kfree(const void *objp)
3775 struct kmem_cache *c; 3773 struct kmem_cache *c;
3776 unsigned long flags; 3774 unsigned long flags;
3777 3775
3776 trace_kfree(_RET_IP_, objp);
3777
3778 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3778 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3779 return; 3779 return;
3780 local_irq_save(flags); 3780 local_irq_save(flags);
@@ -3784,8 +3784,6 @@ void kfree(const void *objp)
3784 debug_check_no_obj_freed(objp, obj_size(c)); 3784 debug_check_no_obj_freed(objp, obj_size(c));
3785 __cache_free(c, (void *)objp); 3785 __cache_free(c, (void *)objp);
3786 local_irq_restore(flags); 3786 local_irq_restore(flags);
3787
3788 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3789} 3787}
3790EXPORT_SYMBOL(kfree); 3788EXPORT_SYMBOL(kfree);
3791 3789
diff --git a/mm/slob.c b/mm/slob.c
index 4dd6516447f2..a2d4ab32198d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -490,9 +490,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
490 *m = size; 490 *m = size;
491 ret = (void *)m + align; 491 ret = (void *)m + align;
492 492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 493 trace_kmalloc_node(_RET_IP_, ret,
494 _RET_IP_, ret, 494 size, size + align, gfp, node);
495 size, size + align, gfp, node);
496 } else { 495 } else {
497 unsigned int order = get_order(size); 496 unsigned int order = get_order(size);
498 497
@@ -503,9 +502,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
503 page->private = size; 502 page->private = size;
504 } 503 }
505 504
506 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 505 trace_kmalloc_node(_RET_IP_, ret,
507 _RET_IP_, ret, 506 size, PAGE_SIZE << order, gfp, node);
508 size, PAGE_SIZE << order, gfp, node);
509 } 507 }
510 508
511 return ret; 509 return ret;
@@ -516,6 +514,8 @@ void kfree(const void *block)
516{ 514{
517 struct slob_page *sp; 515 struct slob_page *sp;
518 516
517 trace_kfree(_RET_IP_, block);
518
519 if (unlikely(ZERO_OR_NULL_PTR(block))) 519 if (unlikely(ZERO_OR_NULL_PTR(block)))
520 return; 520 return;
521 521
@@ -526,8 +526,6 @@ void kfree(const void *block)
526 slob_free(m, *m + align); 526 slob_free(m, *m + align);
527 } else 527 } else
528 put_page(&sp->page); 528 put_page(&sp->page);
529
530 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
531} 529}
532EXPORT_SYMBOL(kfree); 530EXPORT_SYMBOL(kfree);
533 531
@@ -599,16 +597,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
599 597
600 if (c->size < PAGE_SIZE) { 598 if (c->size < PAGE_SIZE) {
601 b = slob_alloc(c->size, flags, c->align, node); 599 b = slob_alloc(c->size, flags, c->align, node);
602 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, 600 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
603 _RET_IP_, b, c->size, 601 SLOB_UNITS(c->size) * SLOB_UNIT,
604 SLOB_UNITS(c->size) * SLOB_UNIT, 602 flags, node);
605 flags, node);
606 } else { 603 } else {
607 b = slob_new_pages(flags, get_order(c->size), node); 604 b = slob_new_pages(flags, get_order(c->size), node);
608 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, 605 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
609 _RET_IP_, b, c->size, 606 PAGE_SIZE << get_order(c->size),
610 PAGE_SIZE << get_order(c->size), 607 flags, node);
611 flags, node);
612 } 608 }
613 609
614 if (c->ctor) 610 if (c->ctor)
@@ -646,7 +642,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
646 __kmem_cache_free(b, c->size); 642 __kmem_cache_free(b, c->size);
647 } 643 }
648 644
649 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); 645 trace_kmem_cache_free(_RET_IP_, b);
650} 646}
651EXPORT_SYMBOL(kmem_cache_free); 647EXPORT_SYMBOL(kmem_cache_free);
652 648
diff --git a/mm/slub.c b/mm/slub.c
index 7aaa121d0ea9..7ab54ecbd3f3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1621,8 +1621,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1621{ 1621{
1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623 1623
1624 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 1624 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1625 s->objsize, s->size, gfpflags);
1626 1625
1627 return ret; 1626 return ret;
1628} 1627}
@@ -1641,8 +1640,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1641{ 1640{
1642 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1641 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1643 1642
1644 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 1643 trace_kmem_cache_alloc_node(_RET_IP_, ret,
1645 s->objsize, s->size, gfpflags, node); 1644 s->objsize, s->size, gfpflags, node);
1646 1645
1647 return ret; 1646 return ret;
1648} 1647}
@@ -1767,7 +1766,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1767 1766
1768 slab_free(s, page, x, _RET_IP_); 1767 slab_free(s, page, x, _RET_IP_);
1769 1768
1770 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); 1769 trace_kmem_cache_free(_RET_IP_, x);
1771} 1770}
1772EXPORT_SYMBOL(kmem_cache_free); 1771EXPORT_SYMBOL(kmem_cache_free);
1773 1772
@@ -2702,8 +2701,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2702 2701
2703 ret = slab_alloc(s, flags, -1, _RET_IP_); 2702 ret = slab_alloc(s, flags, -1, _RET_IP_);
2704 2703
2705 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, 2704 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2706 size, s->size, flags);
2707 2705
2708 return ret; 2706 return ret;
2709} 2707}
@@ -2729,10 +2727,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2729 if (unlikely(size > SLUB_MAX_SIZE)) { 2727 if (unlikely(size > SLUB_MAX_SIZE)) {
2730 ret = kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2731 2729
2732 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 2730 trace_kmalloc_node(_RET_IP_, ret,
2733 _RET_IP_, ret, 2731 size, PAGE_SIZE << get_order(size),
2734 size, PAGE_SIZE << get_order(size), 2732 flags, node);
2735 flags, node);
2736 2733
2737 return ret; 2734 return ret;
2738 } 2735 }
@@ -2744,8 +2741,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2744 2741
2745 ret = slab_alloc(s, flags, node, _RET_IP_); 2742 ret = slab_alloc(s, flags, node, _RET_IP_);
2746 2743
2747 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, 2744 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2748 size, s->size, flags, node);
2749 2745
2750 return ret; 2746 return ret;
2751} 2747}
@@ -2796,6 +2792,8 @@ void kfree(const void *x)
2796 struct page *page; 2792 struct page *page;
2797 void *object = (void *)x; 2793 void *object = (void *)x;
2798 2794
2795 trace_kfree(_RET_IP_, x);
2796
2799 if (unlikely(ZERO_OR_NULL_PTR(x))) 2797 if (unlikely(ZERO_OR_NULL_PTR(x)))
2800 return; 2798 return;
2801 2799
@@ -2806,8 +2804,6 @@ void kfree(const void *x)
2806 return; 2804 return;
2807 } 2805 }
2808 slab_free(page->slab, page, object, _RET_IP_); 2806 slab_free(page->slab, page, object, _RET_IP_);
2809
2810 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2811} 2807}
2812EXPORT_SYMBOL(kfree); 2808EXPORT_SYMBOL(kfree);
2813 2809
@@ -3290,8 +3286,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3290 ret = slab_alloc(s, gfpflags, -1, caller); 3286 ret = slab_alloc(s, gfpflags, -1, caller);
3291 3287
3292 /* Honor the call site pointer we recieved. */ 3288 /* Honor the call site pointer we recieved. */
3293 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, 3289 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3294 s->size, gfpflags);
3295 3290
3296 return ret; 3291 return ret;
3297} 3292}
@@ -3313,8 +3308,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3313 ret = slab_alloc(s, gfpflags, node, caller); 3308 ret = slab_alloc(s, gfpflags, node, caller);
3314 3309
3315 /* Honor the call site pointer we recieved. */ 3310 /* Honor the call site pointer we recieved. */
3316 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, 3311 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3317 size, s->size, gfpflags, node);
3318 3312
3319 return ret; 3313 return ret;
3320} 3314}
diff --git a/mm/util.c b/mm/util.c
index 7c122e49f769..2599e83eea17 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/tracepoint.h>
7#include <asm/uaccess.h> 8#include <asm/uaccess.h>
8 9
9/** 10/**
@@ -236,3 +237,18 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
236 return ret; 237 return ret;
237} 238}
238EXPORT_SYMBOL_GPL(get_user_pages_fast); 239EXPORT_SYMBOL_GPL(get_user_pages_fast);
240
241/* Tracepoints definitions. */
242DEFINE_TRACE(kmalloc);
243DEFINE_TRACE(kmem_cache_alloc);
244DEFINE_TRACE(kmalloc_node);
245DEFINE_TRACE(kmem_cache_alloc_node);
246DEFINE_TRACE(kfree);
247DEFINE_TRACE(kmem_cache_free);
248
249EXPORT_TRACEPOINT_SYMBOL(kmalloc);
250EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
251EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
252EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
253EXPORT_TRACEPOINT_SYMBOL(kfree);
254EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);