aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 16:30:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 16:30:00 -0400
commit12fe32e4f942ac5c71a4ab70b039fee65c0dc29d (patch)
treeb0878e49e9fab1fd154fde1dd57057391831b668
parenta63856252d2112e7c452696037a86ceb12f47f80 (diff)
parent2121db74ba0fd2259f0e2265511684fadda9ac49 (diff)
Merge branch 'kmemtrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'kmemtrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: kmemtrace: trace kfree() calls with NULL or zero-length objects kmemtrace: small cleanups kmemtrace: restore original tracing data binary format, improve ABI kmemtrace: kmemtrace_alloc() must fill type_id kmemtrace: use tracepoints kmemtrace, rcu: don't include unnecessary headers, allow kmemtrace w/ tracepoints kmemtrace, rcu: fix rcupreempt.c data structure dependencies kmemtrace, rcu: fix rcu_tree_trace.c data structure dependencies kmemtrace, rcu: fix linux/rcutree.h and linux/rcuclassic.h dependencies kmemtrace, mm: fix slab.h dependency problem in mm/failslab.c kmemtrace, kbuild: fix slab.h dependency problem in lib/decompress_unlzma.c kmemtrace, kbuild: fix slab.h dependency problem in lib/decompress_bunzip2.c kmemtrace, kbuild: fix slab.h dependency problem in lib/decompress_inflate.c kmemtrace, squashfs: fix slab.h dependency problem in squasfs kmemtrace, befs: fix slab.h dependency problem kmemtrace, security: fix linux/key.h header file dependencies kmemtrace, fs: fix linux/fdtable.h header file dependencies kmemtrace, fs: uninline simple_transaction_set() kmemtrace, fs, security: move alloc_secdata() and free_secdata() to linux/security.h
-rw-r--r--fs/befs/debug.c1
-rw-r--r--fs/libfs.c16
-rw-r--r--fs/squashfs/export.c1
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/rcuclassic.h16
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rcupreempt.h53
-rw-r--r--include/linux/rcutree.h27
-rw-r--r--include/linux/security.h24
-rw-r--r--include/linux/slab_def.h10
-rw-r--r--include/linux/slub_def.h12
-rw-r--r--include/trace/kmemtrace.h92
-rw-r--r--kernel/rcuclassic.c23
-rw-r--r--kernel/rcupreempt.c48
-rw-r--r--kernel/rcutree.c20
-rw-r--r--kernel/rcutree.h10
-rw-r--r--kernel/rcutree_trace.c2
-rw-r--r--kernel/trace/kmemtrace.c319
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--lib/decompress_bunzip2.c1
-rw-r--r--lib/decompress_inflate.c1
-rw-r--r--lib/decompress_unlzma.c1
-rw-r--r--mm/failslab.c1
-rw-r--r--mm/slab.c26
-rw-r--r--mm/slob.c30
-rw-r--r--mm/slub.c32
-rw-r--r--mm/util.c16
29 files changed, 493 insertions, 336 deletions
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
index b8e304a0661e..622e73775c83 100644
--- a/fs/befs/debug.c
+++ b/fs/befs/debug.c
@@ -17,6 +17,7 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/slab.h>
20 21
21#endif /* __KERNEL__ */ 22#endif /* __KERNEL__ */
22 23
diff --git a/fs/libfs.c b/fs/libfs.c
index 4910a36f516e..cd223190c4e9 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -575,6 +575,21 @@ ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
575 * possibly a read which collects the result - which is stored in a 575 * possibly a read which collects the result - which is stored in a
576 * file-local buffer. 576 * file-local buffer.
577 */ 577 */
578
579void simple_transaction_set(struct file *file, size_t n)
580{
581 struct simple_transaction_argresp *ar = file->private_data;
582
583 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
584
585 /*
586 * The barrier ensures that ar->size will really remain zero until
587 * ar->data is ready for reading.
588 */
589 smp_mb();
590 ar->size = n;
591}
592
578char *simple_transaction_get(struct file *file, const char __user *buf, size_t size) 593char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
579{ 594{
580 struct simple_transaction_argresp *ar; 595 struct simple_transaction_argresp *ar;
@@ -820,6 +835,7 @@ EXPORT_SYMBOL(simple_sync_file);
820EXPORT_SYMBOL(simple_unlink); 835EXPORT_SYMBOL(simple_unlink);
821EXPORT_SYMBOL(simple_read_from_buffer); 836EXPORT_SYMBOL(simple_read_from_buffer);
822EXPORT_SYMBOL(memory_read_from_buffer); 837EXPORT_SYMBOL(memory_read_from_buffer);
838EXPORT_SYMBOL(simple_transaction_set);
823EXPORT_SYMBOL(simple_transaction_get); 839EXPORT_SYMBOL(simple_transaction_get);
824EXPORT_SYMBOL(simple_transaction_read); 840EXPORT_SYMBOL(simple_transaction_read);
825EXPORT_SYMBOL(simple_transaction_release); 841EXPORT_SYMBOL(simple_transaction_release);
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 69e971d5ddc1..2b1b8fe5e037 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -40,6 +40,7 @@
40#include <linux/dcache.h> 40#include <linux/dcache.h>
41#include <linux/exportfs.h> 41#include <linux/exportfs.h>
42#include <linux/zlib.h> 42#include <linux/zlib.h>
43#include <linux/slab.h>
43 44
44#include "squashfs_fs.h" 45#include "squashfs_fs.h"
45#include "squashfs_fs_sb.h" 46#include "squashfs_fs_sb.h"
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 09d6c5bbdddd..a2ec74bc4812 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -5,12 +5,14 @@
5#ifndef __LINUX_FDTABLE_H 5#ifndef __LINUX_FDTABLE_H
6#define __LINUX_FDTABLE_H 6#define __LINUX_FDTABLE_H
7 7
8#include <asm/atomic.h>
9#include <linux/posix_types.h> 8#include <linux/posix_types.h>
10#include <linux/compiler.h> 9#include <linux/compiler.h>
11#include <linux/spinlock.h> 10#include <linux/spinlock.h>
12#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
13#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/init.h>
14
15#include <asm/atomic.h>
14 16
15/* 17/*
16 * The default fd array needs to be at least BITS_PER_LONG, 18 * The default fd array needs to be at least BITS_PER_LONG,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cae5720f431c..bce40a2207ee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2341,19 +2341,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf,
2341 size_t size, loff_t *pos); 2341 size_t size, loff_t *pos);
2342int simple_transaction_release(struct inode *inode, struct file *file); 2342int simple_transaction_release(struct inode *inode, struct file *file);
2343 2343
2344static inline void simple_transaction_set(struct file *file, size_t n) 2344void simple_transaction_set(struct file *file, size_t n);
2345{
2346 struct simple_transaction_argresp *ar = file->private_data;
2347
2348 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
2349
2350 /*
2351 * The barrier ensures that ar->size will really remain zero until
2352 * ar->data is ready for reading.
2353 */
2354 smp_mb();
2355 ar->size = n;
2356}
2357 2345
2358/* 2346/*
2359 * simple attribute files 2347 * simple attribute files
@@ -2400,27 +2388,6 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
2400ssize_t simple_attr_write(struct file *file, const char __user *buf, 2388ssize_t simple_attr_write(struct file *file, const char __user *buf,
2401 size_t len, loff_t *ppos); 2389 size_t len, loff_t *ppos);
2402 2390
2403
2404#ifdef CONFIG_SECURITY
2405static inline char *alloc_secdata(void)
2406{
2407 return (char *)get_zeroed_page(GFP_KERNEL);
2408}
2409
2410static inline void free_secdata(void *secdata)
2411{
2412 free_page((unsigned long)secdata);
2413}
2414#else
2415static inline char *alloc_secdata(void)
2416{
2417 return (char *)1;
2418}
2419
2420static inline void free_secdata(void *secdata)
2421{ }
2422#endif /* CONFIG_SECURITY */
2423
2424struct ctl_table; 2391struct ctl_table;
2425int proc_nr_files(struct ctl_table *table, int write, struct file *filp, 2392int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
2426 void __user *buffer, size_t *lenp, loff_t *ppos); 2393 void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/key.h b/include/linux/key.h
index 21d32a142c00..e544f466d69a 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -20,6 +20,7 @@
20#include <linux/rbtree.h> 20#include <linux/rbtree.h>
21#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/rwsem.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24 25
25#ifdef __KERNEL__ 26#ifdef __KERNEL__
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 80044a4f3ab9..bfd92e1e5d2c 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42 41
@@ -108,25 +107,14 @@ struct rcu_data {
108 struct rcu_head barrier; 107 struct rcu_head barrier;
109}; 108};
110 109
111DECLARE_PER_CPU(struct rcu_data, rcu_data);
112DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
113
114/* 110/*
115 * Increment the quiescent state counter. 111 * Increment the quiescent state counter.
116 * The counter is a bit degenerated: We do not need to know 112 * The counter is a bit degenerated: We do not need to know
117 * how many quiescent states passed, just if there was at least 113 * how many quiescent states passed, just if there was at least
118 * one since the start of the grace period. Thus just a flag. 114 * one since the start of the grace period. Thus just a flag.
119 */ 115 */
120static inline void rcu_qsctr_inc(int cpu) 116extern void rcu_qsctr_inc(int cpu);
121{ 117extern void rcu_bh_qsctr_inc(int cpu);
122 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
123 rdp->passed_quiesc = 1;
124}
125static inline void rcu_bh_qsctr_inc(int cpu)
126{
127 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129}
130 118
131extern int rcu_pending(int cpu); 119extern int rcu_pending(int cpu);
132extern int rcu_needs_cpu(int cpu); 120extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 528343e6da51..15fbb3ca634d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42#include <linux/lockdep.h> 41#include <linux/lockdep.h>
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 74304b4538d8..fce522782ffa 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -36,34 +36,19 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h> 39#include <linux/smp.h>
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43struct rcu_dyntick_sched { 43extern void rcu_qsctr_inc(int cpu);
44 int dynticks; 44static inline void rcu_bh_qsctr_inc(int cpu) { }
45 int dynticks_snap;
46 int sched_qs;
47 int sched_qs_snap;
48 int sched_dynticks_snap;
49};
50
51DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
52
53static inline void rcu_qsctr_inc(int cpu)
54{
55 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
56
57 rdssp->sched_qs++;
58}
59#define rcu_bh_qsctr_inc(cpu)
60 45
61/* 46/*
62 * Someone might want to pass call_rcu_bh as a function pointer. 47 * Someone might want to pass call_rcu_bh as a function pointer.
63 * So this needs to just be a rename and not a macro function. 48 * So this needs to just be a rename and not a macro function.
64 * (no parentheses) 49 * (no parentheses)
65 */ 50 */
66#define call_rcu_bh call_rcu 51#define call_rcu_bh call_rcu
67 52
68/** 53/**
69 * call_rcu_sched - Queue RCU callback for invocation after sched grace period. 54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
117struct softirq_action; 102struct softirq_action;
118 103
119#ifdef CONFIG_NO_HZ 104#ifdef CONFIG_NO_HZ
120 105extern void rcu_enter_nohz(void);
121static inline void rcu_enter_nohz(void) 106extern void rcu_exit_nohz(void);
122{ 107#else
123 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); 108# define rcu_enter_nohz() do { } while (0)
124 109# define rcu_exit_nohz() do { } while (0)
125 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 110#endif
126 __get_cpu_var(rcu_dyntick_sched).dynticks++;
127 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
128}
129
130static inline void rcu_exit_nohz(void)
131{
132 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
133
134 __get_cpu_var(rcu_dyntick_sched).dynticks++;
135 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
136 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
137 &rs);
138}
139
140#else /* CONFIG_NO_HZ */
141#define rcu_enter_nohz() do { } while (0)
142#define rcu_exit_nohz() do { } while (0)
143#endif /* CONFIG_NO_HZ */
144 111
145/* 112/*
146 * A context switch is a grace period for rcupreempt synchronize_rcu() 113 * A context switch is a grace period for rcupreempt synchronize_rcu()
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index a722fb67bb2d..0cdda00f2b2a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,6 @@
33#include <linux/cache.h> 33#include <linux/cache.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/threads.h> 35#include <linux/threads.h>
36#include <linux/percpu.h>
37#include <linux/cpumask.h> 36#include <linux/cpumask.h>
38#include <linux/seqlock.h> 37#include <linux/seqlock.h>
39 38
@@ -236,30 +235,8 @@ struct rcu_state {
236#endif /* #ifdef CONFIG_NO_HZ */ 235#endif /* #ifdef CONFIG_NO_HZ */
237}; 236};
238 237
239extern struct rcu_state rcu_state; 238extern void rcu_qsctr_inc(int cpu);
240DECLARE_PER_CPU(struct rcu_data, rcu_data); 239extern void rcu_bh_qsctr_inc(int cpu);
241
242extern struct rcu_state rcu_bh_state;
243DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244
245/*
246 * Increment the quiescent state counter.
247 * The counter is a bit degenerated: We do not need to know
248 * how many quiescent states passed, just if there was at least
249 * one since the start of the grace period. Thus just a flag.
250 */
251static inline void rcu_qsctr_inc(int cpu)
252{
253 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 rdp->passed_quiesc = 1;
255 rdp->passed_quiesc_completed = rdp->completed;
256}
257static inline void rcu_bh_qsctr_inc(int cpu)
258{
259 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 rdp->passed_quiesc = 1;
261 rdp->passed_quiesc_completed = rdp->completed;
262}
263 240
264extern int rcu_pending(int cpu); 241extern int rcu_pending(int cpu);
265extern int rcu_needs_cpu(int cpu); 242extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/security.h b/include/linux/security.h
index 54ed15799a83..d5fd6163606f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/key.h> 33#include <linux/key.h>
34#include <linux/xfrm.h> 34#include <linux/xfrm.h>
35#include <linux/gfp.h>
35#include <net/flow.h> 36#include <net/flow.h>
36 37
37/* Maximum number of letters for an LSM name string */ 38/* Maximum number of letters for an LSM name string */
@@ -2953,5 +2954,28 @@ static inline void securityfs_remove(struct dentry *dentry)
2953 2954
2954#endif 2955#endif
2955 2956
2957#ifdef CONFIG_SECURITY
2958
2959static inline char *alloc_secdata(void)
2960{
2961 return (char *)get_zeroed_page(GFP_KERNEL);
2962}
2963
2964static inline void free_secdata(void *secdata)
2965{
2966 free_page((unsigned long)secdata);
2967}
2968
2969#else
2970
2971static inline char *alloc_secdata(void)
2972{
2973 return (char *)1;
2974}
2975
2976static inline void free_secdata(void *secdata)
2977{ }
2978#endif /* CONFIG_SECURITY */
2979
2956#endif /* ! __LINUX_SECURITY_H */ 2980#endif /* ! __LINUX_SECURITY_H */
2957 2981
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index f4523651fa42..5ac9b0bcaf9a 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -73,8 +73,8 @@ found:
73 73
74 ret = kmem_cache_alloc_notrace(cachep, flags); 74 ret = kmem_cache_alloc_notrace(cachep, flags);
75 75
76 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, 76 trace_kmalloc(_THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags); 77 size, slab_buffer_size(cachep), flags);
78 78
79 return ret; 79 return ret;
80 } 80 }
@@ -128,9 +128,9 @@ found:
128 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130 130
131 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, 131 trace_kmalloc_node(_THIS_IP_, ret,
132 ret, size, slab_buffer_size(cachep), 132 size, slab_buffer_size(cachep),
133 flags, node); 133 flags, node);
134 134
135 return ret; 135 return ret;
136 } 136 }
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a1f90528e70b..5046f90c1171 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -233,8 +233,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
233 unsigned int order = get_order(size); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235 235
236 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, 236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
237 size, PAGE_SIZE << order, flags);
238 237
239 return ret; 238 return ret;
240} 239}
@@ -255,9 +254,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
255 254
256 ret = kmem_cache_alloc_notrace(s, flags); 255 ret = kmem_cache_alloc_notrace(s, flags);
257 256
258 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, 257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
259 _THIS_IP_, ret,
260 size, s->size, flags);
261 258
262 return ret; 259 return ret;
263 } 260 }
@@ -296,9 +293,8 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
296 293
297 ret = kmem_cache_alloc_node_notrace(s, flags, node); 294 ret = kmem_cache_alloc_node_notrace(s, flags, node);
298 295
299 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 296 trace_kmalloc_node(_THIS_IP_, ret,
300 _THIS_IP_, ret, 297 size, s->size, flags, node);
301 size, s->size, flags, node);
302 298
303 return ret; 299 return ret;
304 } 300 }
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
index ad8b7857855a..28ee69f9cd46 100644
--- a/include/trace/kmemtrace.h
+++ b/include/trace/kmemtrace.h
@@ -9,65 +9,53 @@
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12#include <linux/tracepoint.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/marker.h>
14
15enum kmemtrace_type_id {
16 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
17 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
18 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
19};
20 14
21#ifdef CONFIG_KMEMTRACE 15#ifdef CONFIG_KMEMTRACE
22
23extern void kmemtrace_init(void); 16extern void kmemtrace_init(void);
24 17#else
25extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
26 unsigned long call_site,
27 const void *ptr,
28 size_t bytes_req,
29 size_t bytes_alloc,
30 gfp_t gfp_flags,
31 int node);
32
33extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
34 unsigned long call_site,
35 const void *ptr);
36
37#else /* CONFIG_KMEMTRACE */
38
39static inline void kmemtrace_init(void) 18static inline void kmemtrace_init(void)
40{ 19{
41} 20}
42 21#endif
43static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, 22
44 unsigned long call_site, 23DECLARE_TRACE(kmalloc,
45 const void *ptr, 24 TP_PROTO(unsigned long call_site,
46 size_t bytes_req, 25 const void *ptr,
47 size_t bytes_alloc, 26 size_t bytes_req,
48 gfp_t gfp_flags, 27 size_t bytes_alloc,
49 int node) 28 gfp_t gfp_flags),
50{ 29 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
51} 30DECLARE_TRACE(kmem_cache_alloc,
52 31 TP_PROTO(unsigned long call_site,
53static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id, 32 const void *ptr,
54 unsigned long call_site, 33 size_t bytes_req,
55 const void *ptr) 34 size_t bytes_alloc,
56{ 35 gfp_t gfp_flags),
57} 36 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
58 37DECLARE_TRACE(kmalloc_node,
59#endif /* CONFIG_KMEMTRACE */ 38 TP_PROTO(unsigned long call_site,
60 39 const void *ptr,
61static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id, 40 size_t bytes_req,
62 unsigned long call_site, 41 size_t bytes_alloc,
63 const void *ptr, 42 gfp_t gfp_flags,
64 size_t bytes_req, 43 int node),
65 size_t bytes_alloc, 44 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
66 gfp_t gfp_flags) 45DECLARE_TRACE(kmem_cache_alloc_node,
67{ 46 TP_PROTO(unsigned long call_site,
68 kmemtrace_mark_alloc_node(type_id, call_site, ptr, 47 const void *ptr,
69 bytes_req, bytes_alloc, gfp_flags, -1); 48 size_t bytes_req,
70} 49 size_t bytes_alloc,
50 gfp_t gfp_flags,
51 int node),
52 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
53DECLARE_TRACE(kfree,
54 TP_PROTO(unsigned long call_site, const void *ptr),
55 TP_ARGS(call_site, ptr));
56DECLARE_TRACE(kmem_cache_free,
57 TP_PROTO(unsigned long call_site, const void *ptr),
58 TP_ARGS(call_site, ptr));
71 59
72#endif /* __KERNEL__ */ 60#endif /* __KERNEL__ */
73 61
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 654c640a6b9c..0f2b0b311304 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -65,6 +65,7 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
66 .cpumask = CPU_BITS_NONE, 66 .cpumask = CPU_BITS_NONE,
67}; 67};
68
68static struct rcu_ctrlblk rcu_bh_ctrlblk = { 69static struct rcu_ctrlblk rcu_bh_ctrlblk = {
69 .cur = -300, 70 .cur = -300,
70 .completed = -300, 71 .completed = -300,
@@ -73,8 +74,26 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
73 .cpumask = CPU_BITS_NONE, 74 .cpumask = CPU_BITS_NONE,
74}; 75};
75 76
76DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; 77static DEFINE_PER_CPU(struct rcu_data, rcu_data);
77DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; 78static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
79
80/*
81 * Increment the quiescent state counter.
82 * The counter is a bit degenerated: We do not need to know
83 * how many quiescent states passed, just if there was at least
84 * one since the start of the grace period. Thus just a flag.
85 */
86void rcu_qsctr_inc(int cpu)
87{
88 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
89 rdp->passed_quiesc = 1;
90}
91
92void rcu_bh_qsctr_inc(int cpu)
93{
94 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
95 rdp->passed_quiesc = 1;
96}
78 97
79static int blimit = 10; 98static int blimit = 10;
80static int qhimark = 10000; 99static int qhimark = 10000;
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5d59e850fb71..ce97a4df64d3 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -147,7 +147,51 @@ struct rcu_ctrlblk {
147 wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ 147 wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
148}; 148};
149 149
150struct rcu_dyntick_sched {
151 int dynticks;
152 int dynticks_snap;
153 int sched_qs;
154 int sched_qs_snap;
155 int sched_dynticks_snap;
156};
157
158static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
159 .dynticks = 1,
160};
161
162void rcu_qsctr_inc(int cpu)
163{
164 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
165
166 rdssp->sched_qs++;
167}
168
169#ifdef CONFIG_NO_HZ
170
171void rcu_enter_nohz(void)
172{
173 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
174
175 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
176 __get_cpu_var(rcu_dyntick_sched).dynticks++;
177 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
178}
179
180void rcu_exit_nohz(void)
181{
182 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
183
184 __get_cpu_var(rcu_dyntick_sched).dynticks++;
185 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
186 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
187 &rs);
188}
189
190#endif /* CONFIG_NO_HZ */
191
192
150static DEFINE_PER_CPU(struct rcu_data, rcu_data); 193static DEFINE_PER_CPU(struct rcu_data, rcu_data);
194
151static struct rcu_ctrlblk rcu_ctrlblk = { 195static struct rcu_ctrlblk rcu_ctrlblk = {
152 .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), 196 .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
153 .completed = 0, 197 .completed = 0,
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
427 } 471 }
428} 472}
429 473
430DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
431 .dynticks = 1,
432};
433
434#ifdef CONFIG_NO_HZ 474#ifdef CONFIG_NO_HZ
435static DEFINE_PER_CPU(int, rcu_update_flag); 475static DEFINE_PER_CPU(int, rcu_update_flag);
436 476
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 97ce31579ec0..7f3266922572 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -78,6 +78,26 @@ DEFINE_PER_CPU(struct rcu_data, rcu_data);
78struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 78struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
80 80
81/*
82 * Increment the quiescent state counter.
83 * The counter is a bit degenerated: We do not need to know
84 * how many quiescent states passed, just if there was at least
85 * one since the start of the grace period. Thus just a flag.
86 */
87void rcu_qsctr_inc(int cpu)
88{
89 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
90 rdp->passed_quiesc = 1;
91 rdp->passed_quiesc_completed = rdp->completed;
92}
93
94void rcu_bh_qsctr_inc(int cpu)
95{
96 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
97 rdp->passed_quiesc = 1;
98 rdp->passed_quiesc_completed = rdp->completed;
99}
100
81#ifdef CONFIG_NO_HZ 101#ifdef CONFIG_NO_HZ
82DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 102DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
83 .dynticks_nesting = 1, 103 .dynticks_nesting = 1,
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
new file mode 100644
index 000000000000..5e872bbf07f5
--- /dev/null
+++ b/kernel/rcutree.h
@@ -0,0 +1,10 @@
1
2/*
3 * RCU implementation internal declarations:
4 */
5extern struct rcu_state rcu_state;
6DECLARE_PER_CPU(struct rcu_data, rcu_data);
7
8extern struct rcu_state rcu_bh_state;
9DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
10
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index d6db3e837826..4ee954f6a8d5 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -43,6 +43,8 @@
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45 45
46#include "rcutree.h"
47
46static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 48static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
47{ 49{
48 if (!rdp->beenonline) 50 if (!rdp->beenonline)
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index ae201b3eda89..5011f4d91e37 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -6,14 +6,16 @@
6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> 6 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7 */ 7 */
8 8
9#include <linux/dcache.h> 9#include <linux/tracepoint.h>
10#include <linux/seq_file.h>
10#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/dcache.h>
11#include <linux/fs.h> 13#include <linux/fs.h>
12#include <linux/seq_file.h> 14
13#include <trace/kmemtrace.h> 15#include <trace/kmemtrace.h>
14 16
15#include "trace.h"
16#include "trace_output.h" 17#include "trace_output.h"
18#include "trace.h"
17 19
18/* Select an alternative, minimalistic output than the original one */ 20/* Select an alternative, minimalistic output than the original one */
19#define TRACE_KMEM_OPT_MINIMAL 0x1 21#define TRACE_KMEM_OPT_MINIMAL 0x1
@@ -25,14 +27,156 @@ static struct tracer_opt kmem_opts[] = {
25}; 27};
26 28
27static struct tracer_flags kmem_tracer_flags = { 29static struct tracer_flags kmem_tracer_flags = {
28 .val = 0, 30 .val = 0,
29 .opts = kmem_opts 31 .opts = kmem_opts
30}; 32};
31 33
32
33static bool kmem_tracing_enabled __read_mostly;
34static struct trace_array *kmemtrace_array; 34static struct trace_array *kmemtrace_array;
35 35
36/* Trace allocations */
37static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
38 unsigned long call_site,
39 const void *ptr,
40 size_t bytes_req,
41 size_t bytes_alloc,
42 gfp_t gfp_flags,
43 int node)
44{
45 struct trace_array *tr = kmemtrace_array;
46 struct kmemtrace_alloc_entry *entry;
47 struct ring_buffer_event *event;
48
49 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
50 if (!event)
51 return;
52
53 entry = ring_buffer_event_data(event);
54 tracing_generic_entry_update(&entry->ent, 0, 0);
55
56 entry->ent.type = TRACE_KMEM_ALLOC;
57 entry->type_id = type_id;
58 entry->call_site = call_site;
59 entry->ptr = ptr;
60 entry->bytes_req = bytes_req;
61 entry->bytes_alloc = bytes_alloc;
62 entry->gfp_flags = gfp_flags;
63 entry->node = node;
64
65 ring_buffer_unlock_commit(tr->buffer, event);
66
67 trace_wake_up();
68}
69
70static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
71 unsigned long call_site,
72 const void *ptr)
73{
74 struct trace_array *tr = kmemtrace_array;
75 struct kmemtrace_free_entry *entry;
76 struct ring_buffer_event *event;
77
78 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
79 if (!event)
80 return;
81 entry = ring_buffer_event_data(event);
82 tracing_generic_entry_update(&entry->ent, 0, 0);
83
84 entry->ent.type = TRACE_KMEM_FREE;
85 entry->type_id = type_id;
86 entry->call_site = call_site;
87 entry->ptr = ptr;
88
89 ring_buffer_unlock_commit(tr->buffer, event);
90
91 trace_wake_up();
92}
93
94static void kmemtrace_kmalloc(unsigned long call_site,
95 const void *ptr,
96 size_t bytes_req,
97 size_t bytes_alloc,
98 gfp_t gfp_flags)
99{
100 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
101 bytes_req, bytes_alloc, gfp_flags, -1);
102}
103
104static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
105 const void *ptr,
106 size_t bytes_req,
107 size_t bytes_alloc,
108 gfp_t gfp_flags)
109{
110 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
111 bytes_req, bytes_alloc, gfp_flags, -1);
112}
113
114static void kmemtrace_kmalloc_node(unsigned long call_site,
115 const void *ptr,
116 size_t bytes_req,
117 size_t bytes_alloc,
118 gfp_t gfp_flags,
119 int node)
120{
121 kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
122 bytes_req, bytes_alloc, gfp_flags, node);
123}
124
125static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
126 const void *ptr,
127 size_t bytes_req,
128 size_t bytes_alloc,
129 gfp_t gfp_flags,
130 int node)
131{
132 kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
133 bytes_req, bytes_alloc, gfp_flags, node);
134}
135
136static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
137{
138 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
139}
140
141static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
142{
143 kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
144}
145
146static int kmemtrace_start_probes(void)
147{
148 int err;
149
150 err = register_trace_kmalloc(kmemtrace_kmalloc);
151 if (err)
152 return err;
153 err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
154 if (err)
155 return err;
156 err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
157 if (err)
158 return err;
159 err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
160 if (err)
161 return err;
162 err = register_trace_kfree(kmemtrace_kfree);
163 if (err)
164 return err;
165 err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
166
167 return err;
168}
169
170static void kmemtrace_stop_probes(void)
171{
172 unregister_trace_kmalloc(kmemtrace_kmalloc);
173 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
174 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
175 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
176 unregister_trace_kfree(kmemtrace_kfree);
177 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
178}
179
36static int kmem_trace_init(struct trace_array *tr) 180static int kmem_trace_init(struct trace_array *tr)
37{ 181{
38 int cpu; 182 int cpu;
@@ -41,14 +185,14 @@ static int kmem_trace_init(struct trace_array *tr)
41 for_each_cpu_mask(cpu, cpu_possible_map) 185 for_each_cpu_mask(cpu, cpu_possible_map)
42 tracing_reset(tr, cpu); 186 tracing_reset(tr, cpu);
43 187
44 kmem_tracing_enabled = true; 188 kmemtrace_start_probes();
45 189
46 return 0; 190 return 0;
47} 191}
48 192
49static void kmem_trace_reset(struct trace_array *tr) 193static void kmem_trace_reset(struct trace_array *tr)
50{ 194{
51 kmem_tracing_enabled = false; 195 kmemtrace_stop_probes();
52} 196}
53 197
54static void kmemtrace_headers(struct seq_file *s) 198static void kmemtrace_headers(struct seq_file *s)
@@ -66,47 +210,84 @@ static void kmemtrace_headers(struct seq_file *s)
66} 210}
67 211
68/* 212/*
69 * The two following functions give the original output from kmemtrace, 213 * The following functions give the original output from kmemtrace,
70 * or something close to....perhaps they need some missing things 214 * plus the origin CPU, since reordering occurs in-kernel now.
71 */ 215 */
216
217#define KMEMTRACE_USER_ALLOC 0
218#define KMEMTRACE_USER_FREE 1
219
220struct kmemtrace_user_event {
221 u8 event_id;
222 u8 type_id;
223 u16 event_size;
224 u32 cpu;
225 u64 timestamp;
226 unsigned long call_site;
227 unsigned long ptr;
228};
229
230struct kmemtrace_user_event_alloc {
231 size_t bytes_req;
232 size_t bytes_alloc;
233 unsigned gfp_flags;
234 int node;
235};
236
72static enum print_line_t 237static enum print_line_t
73kmemtrace_print_alloc_original(struct trace_iterator *iter, 238kmemtrace_print_alloc_user(struct trace_iterator *iter,
74 struct kmemtrace_alloc_entry *entry) 239 struct kmemtrace_alloc_entry *entry)
75{ 240{
241 struct kmemtrace_user_event_alloc *ev_alloc;
76 struct trace_seq *s = &iter->seq; 242 struct trace_seq *s = &iter->seq;
77 int ret; 243 struct kmemtrace_user_event *ev;
244
245 ev = trace_seq_reserve(s, sizeof(*ev));
246 if (!ev)
247 return TRACE_TYPE_PARTIAL_LINE;
78 248
79 /* Taken from the old linux/kmemtrace.h */ 249 ev->event_id = KMEMTRACE_USER_ALLOC;
80 ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " 250 ev->type_id = entry->type_id;
81 "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", 251 ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
82 entry->type_id, entry->call_site, (unsigned long) entry->ptr, 252 ev->cpu = iter->cpu;
83 (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, 253 ev->timestamp = iter->ts;
84 (unsigned long) entry->gfp_flags, entry->node); 254 ev->call_site = entry->call_site;
255 ev->ptr = (unsigned long)entry->ptr;
85 256
86 if (!ret) 257 ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
258 if (!ev_alloc)
87 return TRACE_TYPE_PARTIAL_LINE; 259 return TRACE_TYPE_PARTIAL_LINE;
88 260
261 ev_alloc->bytes_req = entry->bytes_req;
262 ev_alloc->bytes_alloc = entry->bytes_alloc;
263 ev_alloc->gfp_flags = entry->gfp_flags;
264 ev_alloc->node = entry->node;
265
89 return TRACE_TYPE_HANDLED; 266 return TRACE_TYPE_HANDLED;
90} 267}
91 268
92static enum print_line_t 269static enum print_line_t
93kmemtrace_print_free_original(struct trace_iterator *iter, 270kmemtrace_print_free_user(struct trace_iterator *iter,
94 struct kmemtrace_free_entry *entry) 271 struct kmemtrace_free_entry *entry)
95{ 272{
96 struct trace_seq *s = &iter->seq; 273 struct trace_seq *s = &iter->seq;
97 int ret; 274 struct kmemtrace_user_event *ev;
98 275
99 /* Taken from the old linux/kmemtrace.h */ 276 ev = trace_seq_reserve(s, sizeof(*ev));
100 ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", 277 if (!ev)
101 entry->type_id, entry->call_site, (unsigned long) entry->ptr);
102
103 if (!ret)
104 return TRACE_TYPE_PARTIAL_LINE; 278 return TRACE_TYPE_PARTIAL_LINE;
105 279
280 ev->event_id = KMEMTRACE_USER_FREE;
281 ev->type_id = entry->type_id;
282 ev->event_size = sizeof(*ev);
283 ev->cpu = iter->cpu;
284 ev->timestamp = iter->ts;
285 ev->call_site = entry->call_site;
286 ev->ptr = (unsigned long)entry->ptr;
287
106 return TRACE_TYPE_HANDLED; 288 return TRACE_TYPE_HANDLED;
107} 289}
108 290
109
110/* The two other following provide a more minimalistic output */ 291/* The two other following provide a more minimalistic output */
111static enum print_line_t 292static enum print_line_t
112kmemtrace_print_alloc_compress(struct trace_iterator *iter, 293kmemtrace_print_alloc_compress(struct trace_iterator *iter,
@@ -178,7 +359,7 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter,
178 359
179static enum print_line_t 360static enum print_line_t
180kmemtrace_print_free_compress(struct trace_iterator *iter, 361kmemtrace_print_free_compress(struct trace_iterator *iter,
181 struct kmemtrace_free_entry *entry) 362 struct kmemtrace_free_entry *entry)
182{ 363{
183 struct trace_seq *s = &iter->seq; 364 struct trace_seq *s = &iter->seq;
184 int ret; 365 int ret;
@@ -239,20 +420,22 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
239 switch (entry->type) { 420 switch (entry->type) {
240 case TRACE_KMEM_ALLOC: { 421 case TRACE_KMEM_ALLOC: {
241 struct kmemtrace_alloc_entry *field; 422 struct kmemtrace_alloc_entry *field;
423
242 trace_assign_type(field, entry); 424 trace_assign_type(field, entry);
243 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) 425 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
244 return kmemtrace_print_alloc_compress(iter, field); 426 return kmemtrace_print_alloc_compress(iter, field);
245 else 427 else
246 return kmemtrace_print_alloc_original(iter, field); 428 return kmemtrace_print_alloc_user(iter, field);
247 } 429 }
248 430
249 case TRACE_KMEM_FREE: { 431 case TRACE_KMEM_FREE: {
250 struct kmemtrace_free_entry *field; 432 struct kmemtrace_free_entry *field;
433
251 trace_assign_type(field, entry); 434 trace_assign_type(field, entry);
252 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) 435 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
253 return kmemtrace_print_free_compress(iter, field); 436 return kmemtrace_print_free_compress(iter, field);
254 else 437 else
255 return kmemtrace_print_free_original(iter, field); 438 return kmemtrace_print_free_user(iter, field);
256 } 439 }
257 440
258 default: 441 default:
@@ -260,70 +443,13 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
260 } 443 }
261} 444}
262 445
263/* Trace allocations */
264void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
265 unsigned long call_site,
266 const void *ptr,
267 size_t bytes_req,
268 size_t bytes_alloc,
269 gfp_t gfp_flags,
270 int node)
271{
272 struct ring_buffer_event *event;
273 struct kmemtrace_alloc_entry *entry;
274 struct trace_array *tr = kmemtrace_array;
275
276 if (!kmem_tracing_enabled)
277 return;
278
279 event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
280 sizeof(*entry), 0, 0);
281 if (!event)
282 return;
283 entry = ring_buffer_event_data(event);
284
285 entry->call_site = call_site;
286 entry->ptr = ptr;
287 entry->bytes_req = bytes_req;
288 entry->bytes_alloc = bytes_alloc;
289 entry->gfp_flags = gfp_flags;
290 entry->node = node;
291
292 trace_buffer_unlock_commit(tr, event, 0, 0);
293}
294EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
295
296void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
297 unsigned long call_site,
298 const void *ptr)
299{
300 struct ring_buffer_event *event;
301 struct kmemtrace_free_entry *entry;
302 struct trace_array *tr = kmemtrace_array;
303
304 if (!kmem_tracing_enabled)
305 return;
306
307 event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
308 sizeof(*entry), 0, 0);
309 if (!event)
310 return;
311 entry = ring_buffer_event_data(event);
312 entry->type_id = type_id;
313 entry->call_site = call_site;
314 entry->ptr = ptr;
315
316 trace_buffer_unlock_commit(tr, event, 0, 0);
317}
318EXPORT_SYMBOL(kmemtrace_mark_free);
319
320static struct tracer kmem_tracer __read_mostly = { 446static struct tracer kmem_tracer __read_mostly = {
321 .name = "kmemtrace", 447 .name = "kmemtrace",
322 .init = kmem_trace_init, 448 .init = kmem_trace_init,
323 .reset = kmem_trace_reset, 449 .reset = kmem_trace_reset,
324 .print_line = kmemtrace_print_line, 450 .print_line = kmemtrace_print_line,
325 .print_header = kmemtrace_headers, 451 .print_header = kmemtrace_headers,
326 .flags = &kmem_tracer_flags 452 .flags = &kmem_tracer_flags
327}; 453};
328 454
329void kmemtrace_init(void) 455void kmemtrace_init(void)
@@ -335,5 +461,4 @@ static int __init init_kmem_tracer(void)
335{ 461{
336 return register_tracer(&kmem_tracer); 462 return register_tracer(&kmem_tracer);
337} 463}
338
339device_initcall(init_kmem_tracer); 464device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cb0ce3fc36d3..cbc168f1e43d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -182,6 +182,12 @@ struct trace_power {
182 struct power_trace state_data; 182 struct power_trace state_data;
183}; 183};
184 184
185enum kmemtrace_type_id {
186 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
187 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
188 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
189};
190
185struct kmemtrace_alloc_entry { 191struct kmemtrace_alloc_entry {
186 struct trace_entry ent; 192 struct trace_entry ent;
187 enum kmemtrace_type_id type_id; 193 enum kmemtrace_type_id type_id;
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 5d3ddb5fcfd9..708e2a86d87b 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -50,6 +50,7 @@
50#endif /* !STATIC */ 50#endif /* !STATIC */
51 51
52#include <linux/decompress/mm.h> 52#include <linux/decompress/mm.h>
53#include <linux/slab.h>
53 54
54#ifndef INT_MAX 55#ifndef INT_MAX
55#define INT_MAX 0x7fffffff 56#define INT_MAX 0x7fffffff
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 839a329b4fc4..e36b296fc9f8 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -23,6 +23,7 @@
23#endif /* STATIC */ 23#endif /* STATIC */
24 24
25#include <linux/decompress/mm.h> 25#include <linux/decompress/mm.h>
26#include <linux/slab.h>
26 27
27#define INBUF_LEN (16*1024) 28#define INBUF_LEN (16*1024)
28 29
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 546f2f4c157e..32123a1340e6 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -34,6 +34,7 @@
34#endif /* STATIC */ 34#endif /* STATIC */
35 35
36#include <linux/decompress/mm.h> 36#include <linux/decompress/mm.h>
37#include <linux/slab.h>
37 38
38#define MIN(a, b) (((a) < (b)) ? (a) : (b)) 39#define MIN(a, b) (((a) < (b)) ? (a) : (b))
39 40
diff --git a/mm/failslab.c b/mm/failslab.c
index 7c6ea6493f80..9339de5f0a91 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -1,4 +1,5 @@
1#include <linux/fault-inject.h> 1#include <linux/fault-inject.h>
2#include <linux/gfp.h>
2 3
3static struct { 4static struct {
4 struct fault_attr attr; 5 struct fault_attr attr;
diff --git a/mm/slab.c b/mm/slab.c
index 4fc1761c6dc8..9a90b00d2f91 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3565,8 +3565,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3565{ 3565{
3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567 3567
3568 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 3568 trace_kmem_cache_alloc(_RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags); 3569 obj_size(cachep), cachep->buffer_size, flags);
3570 3570
3571 return ret; 3571 return ret;
3572} 3572}
@@ -3627,9 +3627,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3627 void *ret = __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3628 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629 3629
3630 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 3630 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size, 3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid); 3632 flags, nodeid);
3633 3633
3634 return ret; 3634 return ret;
3635} 3635}
@@ -3657,9 +3657,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3657 return cachep; 3657 return cachep;
3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659 3659
3660 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 3660 trace_kmalloc_node((unsigned long) caller, ret,
3661 (unsigned long) caller, ret, 3661 size, cachep->buffer_size, flags, node);
3662 size, cachep->buffer_size, flags, node);
3663 3662
3664 return ret; 3663 return ret;
3665} 3664}
@@ -3709,9 +3708,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3709 return cachep; 3708 return cachep;
3710 ret = __cache_alloc(cachep, flags, caller); 3709 ret = __cache_alloc(cachep, flags, caller);
3711 3710
3712 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, 3711 trace_kmalloc((unsigned long) caller, ret,
3713 (unsigned long) caller, ret, 3712 size, cachep->buffer_size, flags);
3714 size, cachep->buffer_size, flags);
3715 3713
3716 return ret; 3714 return ret;
3717} 3715}
@@ -3757,7 +3755,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3757 __cache_free(cachep, objp); 3755 __cache_free(cachep, objp);
3758 local_irq_restore(flags); 3756 local_irq_restore(flags);
3759 3757
3760 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); 3758 trace_kmem_cache_free(_RET_IP_, objp);
3761} 3759}
3762EXPORT_SYMBOL(kmem_cache_free); 3760EXPORT_SYMBOL(kmem_cache_free);
3763 3761
@@ -3775,6 +3773,8 @@ void kfree(const void *objp)
3775 struct kmem_cache *c; 3773 struct kmem_cache *c;
3776 unsigned long flags; 3774 unsigned long flags;
3777 3775
3776 trace_kfree(_RET_IP_, objp);
3777
3778 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3778 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3779 return; 3779 return;
3780 local_irq_save(flags); 3780 local_irq_save(flags);
@@ -3784,8 +3784,6 @@ void kfree(const void *objp)
3784 debug_check_no_obj_freed(objp, obj_size(c)); 3784 debug_check_no_obj_freed(objp, obj_size(c));
3785 __cache_free(c, (void *)objp); 3785 __cache_free(c, (void *)objp);
3786 local_irq_restore(flags); 3786 local_irq_restore(flags);
3787
3788 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3789} 3787}
3790EXPORT_SYMBOL(kfree); 3788EXPORT_SYMBOL(kfree);
3791 3789
diff --git a/mm/slob.c b/mm/slob.c
index 4dd6516447f2..a2d4ab32198d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -490,9 +490,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
490 *m = size; 490 *m = size;
491 ret = (void *)m + align; 491 ret = (void *)m + align;
492 492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 493 trace_kmalloc_node(_RET_IP_, ret,
494 _RET_IP_, ret, 494 size, size + align, gfp, node);
495 size, size + align, gfp, node);
496 } else { 495 } else {
497 unsigned int order = get_order(size); 496 unsigned int order = get_order(size);
498 497
@@ -503,9 +502,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
503 page->private = size; 502 page->private = size;
504 } 503 }
505 504
506 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 505 trace_kmalloc_node(_RET_IP_, ret,
507 _RET_IP_, ret, 506 size, PAGE_SIZE << order, gfp, node);
508 size, PAGE_SIZE << order, gfp, node);
509 } 507 }
510 508
511 return ret; 509 return ret;
@@ -516,6 +514,8 @@ void kfree(const void *block)
516{ 514{
517 struct slob_page *sp; 515 struct slob_page *sp;
518 516
517 trace_kfree(_RET_IP_, block);
518
519 if (unlikely(ZERO_OR_NULL_PTR(block))) 519 if (unlikely(ZERO_OR_NULL_PTR(block)))
520 return; 520 return;
521 521
@@ -526,8 +526,6 @@ void kfree(const void *block)
526 slob_free(m, *m + align); 526 slob_free(m, *m + align);
527 } else 527 } else
528 put_page(&sp->page); 528 put_page(&sp->page);
529
530 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
531} 529}
532EXPORT_SYMBOL(kfree); 530EXPORT_SYMBOL(kfree);
533 531
@@ -599,16 +597,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
599 597
600 if (c->size < PAGE_SIZE) { 598 if (c->size < PAGE_SIZE) {
601 b = slob_alloc(c->size, flags, c->align, node); 599 b = slob_alloc(c->size, flags, c->align, node);
602 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, 600 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
603 _RET_IP_, b, c->size, 601 SLOB_UNITS(c->size) * SLOB_UNIT,
604 SLOB_UNITS(c->size) * SLOB_UNIT, 602 flags, node);
605 flags, node);
606 } else { 603 } else {
607 b = slob_new_pages(flags, get_order(c->size), node); 604 b = slob_new_pages(flags, get_order(c->size), node);
608 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, 605 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
609 _RET_IP_, b, c->size, 606 PAGE_SIZE << get_order(c->size),
610 PAGE_SIZE << get_order(c->size), 607 flags, node);
611 flags, node);
612 } 608 }
613 609
614 if (c->ctor) 610 if (c->ctor)
@@ -646,7 +642,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
646 __kmem_cache_free(b, c->size); 642 __kmem_cache_free(b, c->size);
647 } 643 }
648 644
649 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); 645 trace_kmem_cache_free(_RET_IP_, b);
650} 646}
651EXPORT_SYMBOL(kmem_cache_free); 647EXPORT_SYMBOL(kmem_cache_free);
652 648
diff --git a/mm/slub.c b/mm/slub.c
index 7aaa121d0ea9..7ab54ecbd3f3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1621,8 +1621,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1621{ 1621{
1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623 1623
1624 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 1624 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1625 s->objsize, s->size, gfpflags);
1626 1625
1627 return ret; 1626 return ret;
1628} 1627}
@@ -1641,8 +1640,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1641{ 1640{
1642 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1641 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1643 1642
1644 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, 1643 trace_kmem_cache_alloc_node(_RET_IP_, ret,
1645 s->objsize, s->size, gfpflags, node); 1644 s->objsize, s->size, gfpflags, node);
1646 1645
1647 return ret; 1646 return ret;
1648} 1647}
@@ -1767,7 +1766,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1767 1766
1768 slab_free(s, page, x, _RET_IP_); 1767 slab_free(s, page, x, _RET_IP_);
1769 1768
1770 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); 1769 trace_kmem_cache_free(_RET_IP_, x);
1771} 1770}
1772EXPORT_SYMBOL(kmem_cache_free); 1771EXPORT_SYMBOL(kmem_cache_free);
1773 1772
@@ -2702,8 +2701,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2702 2701
2703 ret = slab_alloc(s, flags, -1, _RET_IP_); 2702 ret = slab_alloc(s, flags, -1, _RET_IP_);
2704 2703
2705 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, 2704 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2706 size, s->size, flags);
2707 2705
2708 return ret; 2706 return ret;
2709} 2707}
@@ -2729,10 +2727,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2729 if (unlikely(size > SLUB_MAX_SIZE)) { 2727 if (unlikely(size > SLUB_MAX_SIZE)) {
2730 ret = kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2731 2729
2732 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 2730 trace_kmalloc_node(_RET_IP_, ret,
2733 _RET_IP_, ret, 2731 size, PAGE_SIZE << get_order(size),
2734 size, PAGE_SIZE << get_order(size), 2732 flags, node);
2735 flags, node);
2736 2733
2737 return ret; 2734 return ret;
2738 } 2735 }
@@ -2744,8 +2741,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2744 2741
2745 ret = slab_alloc(s, flags, node, _RET_IP_); 2742 ret = slab_alloc(s, flags, node, _RET_IP_);
2746 2743
2747 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, 2744 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2748 size, s->size, flags, node);
2749 2745
2750 return ret; 2746 return ret;
2751} 2747}
@@ -2796,6 +2792,8 @@ void kfree(const void *x)
2796 struct page *page; 2792 struct page *page;
2797 void *object = (void *)x; 2793 void *object = (void *)x;
2798 2794
2795 trace_kfree(_RET_IP_, x);
2796
2799 if (unlikely(ZERO_OR_NULL_PTR(x))) 2797 if (unlikely(ZERO_OR_NULL_PTR(x)))
2800 return; 2798 return;
2801 2799
@@ -2806,8 +2804,6 @@ void kfree(const void *x)
2806 return; 2804 return;
2807 } 2805 }
2808 slab_free(page->slab, page, object, _RET_IP_); 2806 slab_free(page->slab, page, object, _RET_IP_);
2809
2810 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2811} 2807}
2812EXPORT_SYMBOL(kfree); 2808EXPORT_SYMBOL(kfree);
2813 2809
@@ -3290,8 +3286,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3290 ret = slab_alloc(s, gfpflags, -1, caller); 3286 ret = slab_alloc(s, gfpflags, -1, caller);
3291 3287
3292 /* Honor the call site pointer we recieved. */ 3288 /* Honor the call site pointer we recieved. */
3293 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, 3289 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3294 s->size, gfpflags);
3295 3290
3296 return ret; 3291 return ret;
3297} 3292}
@@ -3313,8 +3308,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3313 ret = slab_alloc(s, gfpflags, node, caller); 3308 ret = slab_alloc(s, gfpflags, node, caller);
3314 3309
3315 /* Honor the call site pointer we recieved. */ 3310 /* Honor the call site pointer we recieved. */
3316 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, 3311 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3317 size, s->size, gfpflags, node);
3318 3312
3319 return ret; 3313 return ret;
3320} 3314}
diff --git a/mm/util.c b/mm/util.c
index 7c122e49f769..2599e83eea17 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/tracepoint.h>
7#include <asm/uaccess.h> 8#include <asm/uaccess.h>
8 9
9/** 10/**
@@ -236,3 +237,18 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
236 return ret; 237 return ret;
237} 238}
238EXPORT_SYMBOL_GPL(get_user_pages_fast); 239EXPORT_SYMBOL_GPL(get_user_pages_fast);
240
241/* Tracepoints definitions. */
242DEFINE_TRACE(kmalloc);
243DEFINE_TRACE(kmem_cache_alloc);
244DEFINE_TRACE(kmalloc_node);
245DEFINE_TRACE(kmem_cache_alloc_node);
246DEFINE_TRACE(kfree);
247DEFINE_TRACE(kmem_cache_free);
248
249EXPORT_TRACEPOINT_SYMBOL(kmalloc);
250EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
251EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
252EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
253EXPORT_TRACEPOINT_SYMBOL(kfree);
254EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);