diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/allocpercpu.c | 2 | ||||
| -rw-r--r-- | mm/backing-dev.c | 10 | ||||
| -rw-r--r-- | mm/failslab.c | 1 | ||||
| -rw-r--r-- | mm/pdflush.c | 47 | ||||
| -rw-r--r-- | mm/slab.c | 26 | ||||
| -rw-r--r-- | mm/slob.c | 30 | ||||
| -rw-r--r-- | mm/slub.c | 32 | ||||
| -rw-r--r-- | mm/util.c | 16 |
8 files changed, 97 insertions, 67 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 139d5b7b6621..dfdee6a47359 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
| @@ -31,7 +31,7 @@ static void percpu_depopulate(void *__pdata, int cpu) | |||
| 31 | * @__pdata: per-cpu data to depopulate | 31 | * @__pdata: per-cpu data to depopulate |
| 32 | * @mask: depopulate per-cpu data for cpu's selected through mask bits | 32 | * @mask: depopulate per-cpu data for cpu's selected through mask bits |
| 33 | */ | 33 | */ |
| 34 | static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | 34 | static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask) |
| 35 | { | 35 | { |
| 36 | int cpu; | 36 | int cpu; |
| 37 | for_each_cpu_mask_nr(cpu, *mask) | 37 | for_each_cpu_mask_nr(cpu, *mask) |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index be68c956a660..493b468a5035 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -284,12 +284,12 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
| 284 | }; | 284 | }; |
| 285 | 285 | ||
| 286 | 286 | ||
| 287 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw) | 287 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 288 | { | 288 | { |
| 289 | enum bdi_state bit; | 289 | enum bdi_state bit; |
| 290 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | 290 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
| 291 | 291 | ||
| 292 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | 292 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
| 293 | clear_bit(bit, &bdi->state); | 293 | clear_bit(bit, &bdi->state); |
| 294 | smp_mb__after_clear_bit(); | 294 | smp_mb__after_clear_bit(); |
| 295 | if (waitqueue_active(wqh)) | 295 | if (waitqueue_active(wqh)) |
| @@ -297,11 +297,11 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int rw) | |||
| 297 | } | 297 | } |
| 298 | EXPORT_SYMBOL(clear_bdi_congested); | 298 | EXPORT_SYMBOL(clear_bdi_congested); |
| 299 | 299 | ||
| 300 | void set_bdi_congested(struct backing_dev_info *bdi, int rw) | 300 | void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 301 | { | 301 | { |
| 302 | enum bdi_state bit; | 302 | enum bdi_state bit; |
| 303 | 303 | ||
| 304 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | 304 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
| 305 | set_bit(bit, &bdi->state); | 305 | set_bit(bit, &bdi->state); |
| 306 | } | 306 | } |
| 307 | EXPORT_SYMBOL(set_bdi_congested); | 307 | EXPORT_SYMBOL(set_bdi_congested); |
diff --git a/mm/failslab.c b/mm/failslab.c index 7c6ea6493f80..9339de5f0a91 100644 --- a/mm/failslab.c +++ b/mm/failslab.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/fault-inject.h> | 1 | #include <linux/fault-inject.h> |
| 2 | #include <linux/gfp.h> | ||
| 2 | 3 | ||
| 3 | static struct { | 4 | static struct { |
| 4 | struct fault_attr attr; | 5 | struct fault_attr attr; |
diff --git a/mm/pdflush.c b/mm/pdflush.c index 118905e3d788..f2caf96993f8 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c | |||
| @@ -58,6 +58,14 @@ static DEFINE_SPINLOCK(pdflush_lock); | |||
| 58 | int nr_pdflush_threads = 0; | 58 | int nr_pdflush_threads = 0; |
| 59 | 59 | ||
| 60 | /* | 60 | /* |
| 61 | * The max/min number of pdflush threads. R/W by sysctl at | ||
| 62 | * /proc/sys/vm/nr_pdflush_threads_max/min | ||
| 63 | */ | ||
| 64 | int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS; | ||
| 65 | int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS; | ||
| 66 | |||
| 67 | |||
| 68 | /* | ||
| 61 | * The time at which the pdflush thread pool last went empty | 69 | * The time at which the pdflush thread pool last went empty |
| 62 | */ | 70 | */ |
| 63 | static unsigned long last_empty_jifs; | 71 | static unsigned long last_empty_jifs; |
| @@ -68,7 +76,7 @@ static unsigned long last_empty_jifs; | |||
| 68 | * Thread pool management algorithm: | 76 | * Thread pool management algorithm: |
| 69 | * | 77 | * |
| 70 | * - The minimum and maximum number of pdflush instances are bound | 78 | * - The minimum and maximum number of pdflush instances are bound |
| 71 | * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS. | 79 | * by nr_pdflush_threads_min and nr_pdflush_threads_max. |
| 72 | * | 80 | * |
| 73 | * - If there have been no idle pdflush instances for 1 second, create | 81 | * - If there have been no idle pdflush instances for 1 second, create |
| 74 | * a new one. | 82 | * a new one. |
| @@ -98,7 +106,6 @@ static int __pdflush(struct pdflush_work *my_work) | |||
| 98 | INIT_LIST_HEAD(&my_work->list); | 106 | INIT_LIST_HEAD(&my_work->list); |
| 99 | 107 | ||
| 100 | spin_lock_irq(&pdflush_lock); | 108 | spin_lock_irq(&pdflush_lock); |
| 101 | nr_pdflush_threads++; | ||
| 102 | for ( ; ; ) { | 109 | for ( ; ; ) { |
| 103 | struct pdflush_work *pdf; | 110 | struct pdflush_work *pdf; |
| 104 | 111 | ||
| @@ -126,20 +133,25 @@ static int __pdflush(struct pdflush_work *my_work) | |||
| 126 | 133 | ||
| 127 | (*my_work->fn)(my_work->arg0); | 134 | (*my_work->fn)(my_work->arg0); |
| 128 | 135 | ||
| 136 | spin_lock_irq(&pdflush_lock); | ||
| 137 | |||
| 129 | /* | 138 | /* |
| 130 | * Thread creation: For how long have there been zero | 139 | * Thread creation: For how long have there been zero |
| 131 | * available threads? | 140 | * available threads? |
| 141 | * | ||
| 142 | * To throttle creation, we reset last_empty_jifs. | ||
| 132 | */ | 143 | */ |
| 133 | if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { | 144 | if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { |
| 134 | /* unlocked list_empty() test is OK here */ | 145 | if (list_empty(&pdflush_list) && |
| 135 | if (list_empty(&pdflush_list)) { | 146 | nr_pdflush_threads < nr_pdflush_threads_max) { |
| 136 | /* unlocked test is OK here */ | 147 | last_empty_jifs = jiffies; |
| 137 | if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) | 148 | nr_pdflush_threads++; |
| 138 | start_one_pdflush_thread(); | 149 | spin_unlock_irq(&pdflush_lock); |
| 150 | start_one_pdflush_thread(); | ||
| 151 | spin_lock_irq(&pdflush_lock); | ||
| 139 | } | 152 | } |
| 140 | } | 153 | } |
| 141 | 154 | ||
| 142 | spin_lock_irq(&pdflush_lock); | ||
| 143 | my_work->fn = NULL; | 155 | my_work->fn = NULL; |
| 144 | 156 | ||
| 145 | /* | 157 | /* |
| @@ -148,7 +160,7 @@ static int __pdflush(struct pdflush_work *my_work) | |||
| 148 | */ | 160 | */ |
| 149 | if (list_empty(&pdflush_list)) | 161 | if (list_empty(&pdflush_list)) |
| 150 | continue; | 162 | continue; |
| 151 | if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS) | 163 | if (nr_pdflush_threads <= nr_pdflush_threads_min) |
| 152 | continue; | 164 | continue; |
| 153 | pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); | 165 | pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); |
| 154 | if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { | 166 | if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { |
| @@ -236,14 +248,27 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0) | |||
| 236 | 248 | ||
| 237 | static void start_one_pdflush_thread(void) | 249 | static void start_one_pdflush_thread(void) |
| 238 | { | 250 | { |
| 239 | kthread_run(pdflush, NULL, "pdflush"); | 251 | struct task_struct *k; |
| 252 | |||
| 253 | k = kthread_run(pdflush, NULL, "pdflush"); | ||
| 254 | if (unlikely(IS_ERR(k))) { | ||
| 255 | spin_lock_irq(&pdflush_lock); | ||
| 256 | nr_pdflush_threads--; | ||
| 257 | spin_unlock_irq(&pdflush_lock); | ||
| 258 | } | ||
| 240 | } | 259 | } |
| 241 | 260 | ||
| 242 | static int __init pdflush_init(void) | 261 | static int __init pdflush_init(void) |
| 243 | { | 262 | { |
| 244 | int i; | 263 | int i; |
| 245 | 264 | ||
| 246 | for (i = 0; i < MIN_PDFLUSH_THREADS; i++) | 265 | /* |
| 266 | * Pre-set nr_pdflush_threads... If we fail to create, | ||
| 267 | * the count will be decremented. | ||
| 268 | */ | ||
| 269 | nr_pdflush_threads = nr_pdflush_threads_min; | ||
| 270 | |||
| 271 | for (i = 0; i < nr_pdflush_threads_min; i++) | ||
| 247 | start_one_pdflush_thread(); | 272 | start_one_pdflush_thread(); |
| 248 | return 0; | 273 | return 0; |
| 249 | } | 274 | } |
| @@ -3565,8 +3565,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3565 | { | 3565 | { |
| 3566 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3566 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
| 3567 | 3567 | ||
| 3568 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | 3568 | trace_kmem_cache_alloc(_RET_IP_, ret, |
| 3569 | obj_size(cachep), cachep->buffer_size, flags); | 3569 | obj_size(cachep), cachep->buffer_size, flags); |
| 3570 | 3570 | ||
| 3571 | return ret; | 3571 | return ret; |
| 3572 | } | 3572 | } |
| @@ -3627,9 +3627,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
| 3627 | void *ret = __cache_alloc_node(cachep, flags, nodeid, | 3627 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
| 3628 | __builtin_return_address(0)); | 3628 | __builtin_return_address(0)); |
| 3629 | 3629 | ||
| 3630 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | 3630 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 3631 | obj_size(cachep), cachep->buffer_size, | 3631 | obj_size(cachep), cachep->buffer_size, |
| 3632 | flags, nodeid); | 3632 | flags, nodeid); |
| 3633 | 3633 | ||
| 3634 | return ret; | 3634 | return ret; |
| 3635 | } | 3635 | } |
| @@ -3657,9 +3657,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | |||
| 3657 | return cachep; | 3657 | return cachep; |
| 3658 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | 3658 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
| 3659 | 3659 | ||
| 3660 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | 3660 | trace_kmalloc_node((unsigned long) caller, ret, |
| 3661 | (unsigned long) caller, ret, | 3661 | size, cachep->buffer_size, flags, node); |
| 3662 | size, cachep->buffer_size, flags, node); | ||
| 3663 | 3662 | ||
| 3664 | return ret; | 3663 | return ret; |
| 3665 | } | 3664 | } |
| @@ -3709,9 +3708,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3709 | return cachep; | 3708 | return cachep; |
| 3710 | ret = __cache_alloc(cachep, flags, caller); | 3709 | ret = __cache_alloc(cachep, flags, caller); |
| 3711 | 3710 | ||
| 3712 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | 3711 | trace_kmalloc((unsigned long) caller, ret, |
| 3713 | (unsigned long) caller, ret, | 3712 | size, cachep->buffer_size, flags); |
| 3714 | size, cachep->buffer_size, flags); | ||
| 3715 | 3713 | ||
| 3716 | return ret; | 3714 | return ret; |
| 3717 | } | 3715 | } |
| @@ -3757,7 +3755,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3757 | __cache_free(cachep, objp); | 3755 | __cache_free(cachep, objp); |
| 3758 | local_irq_restore(flags); | 3756 | local_irq_restore(flags); |
| 3759 | 3757 | ||
| 3760 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | 3758 | trace_kmem_cache_free(_RET_IP_, objp); |
| 3761 | } | 3759 | } |
| 3762 | EXPORT_SYMBOL(kmem_cache_free); | 3760 | EXPORT_SYMBOL(kmem_cache_free); |
| 3763 | 3761 | ||
| @@ -3775,6 +3773,8 @@ void kfree(const void *objp) | |||
| 3775 | struct kmem_cache *c; | 3773 | struct kmem_cache *c; |
| 3776 | unsigned long flags; | 3774 | unsigned long flags; |
| 3777 | 3775 | ||
| 3776 | trace_kfree(_RET_IP_, objp); | ||
| 3777 | |||
| 3778 | if (unlikely(ZERO_OR_NULL_PTR(objp))) | 3778 | if (unlikely(ZERO_OR_NULL_PTR(objp))) |
| 3779 | return; | 3779 | return; |
| 3780 | local_irq_save(flags); | 3780 | local_irq_save(flags); |
| @@ -3784,8 +3784,6 @@ void kfree(const void *objp) | |||
| 3784 | debug_check_no_obj_freed(objp, obj_size(c)); | 3784 | debug_check_no_obj_freed(objp, obj_size(c)); |
| 3785 | __cache_free(c, (void *)objp); | 3785 | __cache_free(c, (void *)objp); |
| 3786 | local_irq_restore(flags); | 3786 | local_irq_restore(flags); |
| 3787 | |||
| 3788 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
| 3789 | } | 3787 | } |
| 3790 | EXPORT_SYMBOL(kfree); | 3788 | EXPORT_SYMBOL(kfree); |
| 3791 | 3789 | ||
| @@ -490,9 +490,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 490 | *m = size; | 490 | *m = size; |
| 491 | ret = (void *)m + align; | 491 | ret = (void *)m + align; |
| 492 | 492 | ||
| 493 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | 493 | trace_kmalloc_node(_RET_IP_, ret, |
| 494 | _RET_IP_, ret, | 494 | size, size + align, gfp, node); |
| 495 | size, size + align, gfp, node); | ||
| 496 | } else { | 495 | } else { |
| 497 | unsigned int order = get_order(size); | 496 | unsigned int order = get_order(size); |
| 498 | 497 | ||
| @@ -503,9 +502,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 503 | page->private = size; | 502 | page->private = size; |
| 504 | } | 503 | } |
| 505 | 504 | ||
| 506 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | 505 | trace_kmalloc_node(_RET_IP_, ret, |
| 507 | _RET_IP_, ret, | 506 | size, PAGE_SIZE << order, gfp, node); |
| 508 | size, PAGE_SIZE << order, gfp, node); | ||
| 509 | } | 507 | } |
| 510 | 508 | ||
| 511 | return ret; | 509 | return ret; |
| @@ -516,6 +514,8 @@ void kfree(const void *block) | |||
| 516 | { | 514 | { |
| 517 | struct slob_page *sp; | 515 | struct slob_page *sp; |
| 518 | 516 | ||
| 517 | trace_kfree(_RET_IP_, block); | ||
| 518 | |||
| 519 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 519 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
| 520 | return; | 520 | return; |
| 521 | 521 | ||
| @@ -526,8 +526,6 @@ void kfree(const void *block) | |||
| 526 | slob_free(m, *m + align); | 526 | slob_free(m, *m + align); |
| 527 | } else | 527 | } else |
| 528 | put_page(&sp->page); | 528 | put_page(&sp->page); |
| 529 | |||
| 530 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); | ||
| 531 | } | 529 | } |
| 532 | EXPORT_SYMBOL(kfree); | 530 | EXPORT_SYMBOL(kfree); |
| 533 | 531 | ||
| @@ -599,16 +597,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 599 | 597 | ||
| 600 | if (c->size < PAGE_SIZE) { | 598 | if (c->size < PAGE_SIZE) { |
| 601 | b = slob_alloc(c->size, flags, c->align, node); | 599 | b = slob_alloc(c->size, flags, c->align, node); |
| 602 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | 600 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, |
| 603 | _RET_IP_, b, c->size, | 601 | SLOB_UNITS(c->size) * SLOB_UNIT, |
| 604 | SLOB_UNITS(c->size) * SLOB_UNIT, | 602 | flags, node); |
| 605 | flags, node); | ||
| 606 | } else { | 603 | } else { |
| 607 | b = slob_new_pages(flags, get_order(c->size), node); | 604 | b = slob_new_pages(flags, get_order(c->size), node); |
| 608 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | 605 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, |
| 609 | _RET_IP_, b, c->size, | 606 | PAGE_SIZE << get_order(c->size), |
| 610 | PAGE_SIZE << get_order(c->size), | 607 | flags, node); |
| 611 | flags, node); | ||
| 612 | } | 608 | } |
| 613 | 609 | ||
| 614 | if (c->ctor) | 610 | if (c->ctor) |
| @@ -646,7 +642,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
| 646 | __kmem_cache_free(b, c->size); | 642 | __kmem_cache_free(b, c->size); |
| 647 | } | 643 | } |
| 648 | 644 | ||
| 649 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); | 645 | trace_kmem_cache_free(_RET_IP_, b); |
| 650 | } | 646 | } |
| 651 | EXPORT_SYMBOL(kmem_cache_free); | 647 | EXPORT_SYMBOL(kmem_cache_free); |
| 652 | 648 | ||
| @@ -1621,8 +1621,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | |||
| 1621 | { | 1621 | { |
| 1622 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); | 1622 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
| 1623 | 1623 | ||
| 1624 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | 1624 | trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); |
| 1625 | s->objsize, s->size, gfpflags); | ||
| 1626 | 1625 | ||
| 1627 | return ret; | 1626 | return ret; |
| 1628 | } | 1627 | } |
| @@ -1641,8 +1640,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | |||
| 1641 | { | 1640 | { |
| 1642 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); | 1641 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
| 1643 | 1642 | ||
| 1644 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | 1643 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 1645 | s->objsize, s->size, gfpflags, node); | 1644 | s->objsize, s->size, gfpflags, node); |
| 1646 | 1645 | ||
| 1647 | return ret; | 1646 | return ret; |
| 1648 | } | 1647 | } |
| @@ -1767,7 +1766,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 1767 | 1766 | ||
| 1768 | slab_free(s, page, x, _RET_IP_); | 1767 | slab_free(s, page, x, _RET_IP_); |
| 1769 | 1768 | ||
| 1770 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | 1769 | trace_kmem_cache_free(_RET_IP_, x); |
| 1771 | } | 1770 | } |
| 1772 | EXPORT_SYMBOL(kmem_cache_free); | 1771 | EXPORT_SYMBOL(kmem_cache_free); |
| 1773 | 1772 | ||
| @@ -2702,8 +2701,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2702 | 2701 | ||
| 2703 | ret = slab_alloc(s, flags, -1, _RET_IP_); | 2702 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
| 2704 | 2703 | ||
| 2705 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | 2704 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
| 2706 | size, s->size, flags); | ||
| 2707 | 2705 | ||
| 2708 | return ret; | 2706 | return ret; |
| 2709 | } | 2707 | } |
| @@ -2729,10 +2727,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 2729 | if (unlikely(size > SLUB_MAX_SIZE)) { | 2727 | if (unlikely(size > SLUB_MAX_SIZE)) { |
| 2730 | ret = kmalloc_large_node(size, flags, node); | 2728 | ret = kmalloc_large_node(size, flags, node); |
| 2731 | 2729 | ||
| 2732 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | 2730 | trace_kmalloc_node(_RET_IP_, ret, |
| 2733 | _RET_IP_, ret, | 2731 | size, PAGE_SIZE << get_order(size), |
| 2734 | size, PAGE_SIZE << get_order(size), | 2732 | flags, node); |
| 2735 | flags, node); | ||
| 2736 | 2733 | ||
| 2737 | return ret; | 2734 | return ret; |
| 2738 | } | 2735 | } |
| @@ -2744,8 +2741,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 2744 | 2741 | ||
| 2745 | ret = slab_alloc(s, flags, node, _RET_IP_); | 2742 | ret = slab_alloc(s, flags, node, _RET_IP_); |
| 2746 | 2743 | ||
| 2747 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | 2744 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); |
| 2748 | size, s->size, flags, node); | ||
| 2749 | 2745 | ||
| 2750 | return ret; | 2746 | return ret; |
| 2751 | } | 2747 | } |
| @@ -2796,6 +2792,8 @@ void kfree(const void *x) | |||
| 2796 | struct page *page; | 2792 | struct page *page; |
| 2797 | void *object = (void *)x; | 2793 | void *object = (void *)x; |
| 2798 | 2794 | ||
| 2795 | trace_kfree(_RET_IP_, x); | ||
| 2796 | |||
| 2799 | if (unlikely(ZERO_OR_NULL_PTR(x))) | 2797 | if (unlikely(ZERO_OR_NULL_PTR(x))) |
| 2800 | return; | 2798 | return; |
| 2801 | 2799 | ||
| @@ -2806,8 +2804,6 @@ void kfree(const void *x) | |||
| 2806 | return; | 2804 | return; |
| 2807 | } | 2805 | } |
| 2808 | slab_free(page->slab, page, object, _RET_IP_); | 2806 | slab_free(page->slab, page, object, _RET_IP_); |
| 2809 | |||
| 2810 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
| 2811 | } | 2807 | } |
| 2812 | EXPORT_SYMBOL(kfree); | 2808 | EXPORT_SYMBOL(kfree); |
| 2813 | 2809 | ||
| @@ -3290,8 +3286,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 3290 | ret = slab_alloc(s, gfpflags, -1, caller); | 3286 | ret = slab_alloc(s, gfpflags, -1, caller); |
| 3291 | 3287 | ||
| 3292 | /* Honor the call site pointer we recieved. */ | 3288 | /* Honor the call site pointer we recieved. */ |
| 3293 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | 3289 | trace_kmalloc(caller, ret, size, s->size, gfpflags); |
| 3294 | s->size, gfpflags); | ||
| 3295 | 3290 | ||
| 3296 | return ret; | 3291 | return ret; |
| 3297 | } | 3292 | } |
| @@ -3313,8 +3308,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3313 | ret = slab_alloc(s, gfpflags, node, caller); | 3308 | ret = slab_alloc(s, gfpflags, node, caller); |
| 3314 | 3309 | ||
| 3315 | /* Honor the call site pointer we recieved. */ | 3310 | /* Honor the call site pointer we recieved. */ |
| 3316 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | 3311 | trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); |
| 3317 | size, s->size, gfpflags, node); | ||
| 3318 | 3312 | ||
| 3319 | return ret; | 3313 | return ret; |
| 3320 | } | 3314 | } |
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
| 5 | #include <linux/err.h> | 5 | #include <linux/err.h> |
| 6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
| 7 | #include <linux/tracepoint.h> | ||
| 7 | #include <asm/uaccess.h> | 8 | #include <asm/uaccess.h> |
| 8 | 9 | ||
| 9 | /** | 10 | /** |
| @@ -236,3 +237,18 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start, | |||
| 236 | return ret; | 237 | return ret; |
| 237 | } | 238 | } |
| 238 | EXPORT_SYMBOL_GPL(get_user_pages_fast); | 239 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
| 240 | |||
| 241 | /* Tracepoints definitions. */ | ||
| 242 | DEFINE_TRACE(kmalloc); | ||
| 243 | DEFINE_TRACE(kmem_cache_alloc); | ||
| 244 | DEFINE_TRACE(kmalloc_node); | ||
| 245 | DEFINE_TRACE(kmem_cache_alloc_node); | ||
| 246 | DEFINE_TRACE(kfree); | ||
| 247 | DEFINE_TRACE(kmem_cache_free); | ||
| 248 | |||
| 249 | EXPORT_TRACEPOINT_SYMBOL(kmalloc); | ||
| 250 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); | ||
| 251 | EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); | ||
| 252 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); | ||
| 253 | EXPORT_TRACEPOINT_SYMBOL(kfree); | ||
| 254 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); | ||
