aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-05-19 02:01:55 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-19 02:01:55 -0400
commit2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (patch)
treefa7f8400ac685fb52e96f64997c7c682fc2aa021 /include
parent7b39f90fabcf9e2af0cd79d0a60440d821e22b56 (diff)
parent537b60d17894b7c19a6060feae40299d7109d6e7 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: include/linux/mod_devicetable.h scripts/mod/file2alias.c
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/bitops/arch_hweight.h25
-rw-r--r--include/asm-generic/bitops/const_hweight.h42
-rw-r--r--include/asm-generic/bitops/hweight.h8
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/bitops.h30
-rw-r--r--include/linux/cpufreq.h30
-rw-r--r--include/linux/cpuset.h16
-rw-r--r--include/linux/dcache.h14
-rw-r--r--include/linux/debugobjects.h11
-rw-r--r--include/linux/ftrace.h61
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/hw_breakpoint.h25
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/iommu.h24
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/perf_event.h83
-rw-r--r--include/linux/platform_device.h6
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/rbtree.h5
-rw-r--r--include/linux/rcupdate.h50
-rw-r--r--include/linux/rcutiny.h31
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/ring_buffer.h10
-rw-r--r--include/linux/sched.h79
-rw-r--r--include/linux/srcu.h6
-rw-r--r--include/linux/stop_machine.h122
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/tracepoint.h114
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/wait.h35
-rw-r--r--include/linux/zorro.h22
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/lock.h55
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h56
42 files changed, 704 insertions, 440 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index c99c64dc5f3d..c33749f95b32 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -33,7 +33,7 @@
33 * Atomically reads the value of @v. Note that the guaranteed 33 * Atomically reads the value of @v. Note that the guaranteed
34 * useful range of an atomic_t is only 24 bits. 34 * useful range of an atomic_t is only 24 bits.
35 */ 35 */
36#define atomic_read(v) ((v)->counter) 36#define atomic_read(v) (*(volatile int *)&(v)->counter)
37 37
38/** 38/**
39 * atomic_set - set atomic variable 39 * atomic_set - set atomic variable
diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000000..6a211f40665c
--- /dev/null
+++ b/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
3
4#include <asm/types.h>
5
6static inline unsigned int __arch_hweight32(unsigned int w)
7{
8 return __sw_hweight32(w);
9}
10
11static inline unsigned int __arch_hweight16(unsigned int w)
12{
13 return __sw_hweight16(w);
14}
15
16static inline unsigned int __arch_hweight8(unsigned int w)
17{
18 return __sw_hweight8(w);
19}
20
21static inline unsigned long __arch_hweight64(__u64 w)
22{
23 return __sw_hweight64(w);
24}
25#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000000..fa2a50b7ee66
--- /dev/null
+++ b/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
3
4/*
5 * Compile time versions of __arch_hweightN()
6 */
7#define __const_hweight8(w) \
8 ( (!!((w) & (1ULL << 0))) + \
9 (!!((w) & (1ULL << 1))) + \
10 (!!((w) & (1ULL << 2))) + \
11 (!!((w) & (1ULL << 3))) + \
12 (!!((w) & (1ULL << 4))) + \
13 (!!((w) & (1ULL << 5))) + \
14 (!!((w) & (1ULL << 6))) + \
15 (!!((w) & (1ULL << 7))) )
16
17#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
18#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
19#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
20
21/*
22 * Generic interface.
23 */
24#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
25#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
26#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
27#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
28
29/*
30 * Interface for known constant arguments
31 */
32#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
33#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
34#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
35#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
36
37/*
38 * Type invariant interface to the compile time constant hweight functions.
39 */
40#define HWEIGHT(w) HWEIGHT64((u64)w)
41
42#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
index fbbc383771da..a94d6519c7ed 100644
--- a/include/asm-generic/bitops/hweight.h
+++ b/include/asm-generic/bitops/hweight.h
@@ -1,11 +1,7 @@
1#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ 1#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ 2#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
3 3
4#include <asm/types.h> 4#include <asm-generic/bitops/arch_hweight.h>
5 5#include <asm-generic/bitops/const_hweight.h>
6extern unsigned int hweight32(unsigned int w);
7extern unsigned int hweight16(unsigned int w);
8extern unsigned int hweight8(unsigned int w);
9extern unsigned long hweight64(__u64 w);
10 6
11#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ 7#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b926afe8c03e..3da73f5f0ae9 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -116,11 +116,12 @@ extern unsigned long acpi_realmode_flags;
116 116
117int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); 117int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
118int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 118int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
119int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
119 120
120#ifdef CONFIG_X86_IO_APIC 121#ifdef CONFIG_X86_IO_APIC
121extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); 122extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
122#else 123#else
123#define acpi_get_override_irq(bus, trigger, polarity) (-1) 124#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
124#endif 125#endif
125/* 126/*
126 * This function undoes the effect of one call to acpi_register_gsi(). 127 * This function undoes the effect of one call to acpi_register_gsi().
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index b796eab5ca75..fc68053378ce 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -10,6 +10,11 @@
10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
11#endif 11#endif
12 12
13extern unsigned int __sw_hweight8(unsigned int w);
14extern unsigned int __sw_hweight16(unsigned int w);
15extern unsigned int __sw_hweight32(unsigned int w);
16extern unsigned long __sw_hweight64(__u64 w);
17
13/* 18/*
14 * Include this here because some architectures need generic_ffs/fls in 19 * Include this here because some architectures need generic_ffs/fls in
15 * scope 20 * scope
@@ -44,31 +49,6 @@ static inline unsigned long hweight_long(unsigned long w)
44 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 49 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
45} 50}
46 51
47/*
48 * Clearly slow versions of the hweightN() functions, their benefit is
49 * of course compile time evaluation of constant arguments.
50 */
51#define HWEIGHT8(w) \
52 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
53 (!!((w) & (1ULL << 0))) + \
54 (!!((w) & (1ULL << 1))) + \
55 (!!((w) & (1ULL << 2))) + \
56 (!!((w) & (1ULL << 3))) + \
57 (!!((w) & (1ULL << 4))) + \
58 (!!((w) & (1ULL << 5))) + \
59 (!!((w) & (1ULL << 6))) + \
60 (!!((w) & (1ULL << 7))) )
61
62#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
63#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
64#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
65
66/*
67 * Type invariant version that simply casts things to the
68 * largest type.
69 */
70#define HWEIGHT(w) HWEIGHT64((u64)(w))
71
72/** 52/**
73 * rol32 - rotate a 32-bit value left 53 * rol32 - rotate a 32-bit value left
74 * @word: value to rotate 54 * @word: value to rotate
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4de02b10007f..9f15150ce8d6 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -278,6 +278,27 @@ struct freq_attr {
278 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); 278 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
279}; 279};
280 280
281#define cpufreq_freq_attr_ro(_name) \
282static struct freq_attr _name = \
283__ATTR(_name, 0444, show_##_name, NULL)
284
285#define cpufreq_freq_attr_ro_perm(_name, _perm) \
286static struct freq_attr _name = \
287__ATTR(_name, _perm, show_##_name, NULL)
288
289#define cpufreq_freq_attr_ro_old(_name) \
290static struct freq_attr _name##_old = \
291__ATTR(_name, 0444, show_##_name##_old, NULL)
292
293#define cpufreq_freq_attr_rw(_name) \
294static struct freq_attr _name = \
295__ATTR(_name, 0644, show_##_name, store_##_name)
296
297#define cpufreq_freq_attr_rw_old(_name) \
298static struct freq_attr _name##_old = \
299__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
300
301
281struct global_attr { 302struct global_attr {
282 struct attribute attr; 303 struct attribute attr;
283 ssize_t (*show)(struct kobject *kobj, 304 ssize_t (*show)(struct kobject *kobj,
@@ -286,6 +307,15 @@ struct global_attr {
286 const char *c, size_t count); 307 const char *c, size_t count);
287}; 308};
288 309
310#define define_one_global_ro(_name) \
311static struct global_attr _name = \
312__ATTR(_name, 0444, show_##_name, NULL)
313
314#define define_one_global_rw(_name) \
315static struct global_attr _name = \
316__ATTR(_name, 0644, show_##_name, store_##_name)
317
318
289/********************************************************************* 319/*********************************************************************
290 * CPUFREQ 2.6. INTERFACE * 320 * CPUFREQ 2.6. INTERFACE *
291 *********************************************************************/ 321 *********************************************************************/
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a5740fc4d04b..a73454aec333 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
21extern int cpuset_init(void); 21extern int cpuset_init(void);
22extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24extern void cpuset_cpus_allowed_locked(struct task_struct *p, 24extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
25 struct cpumask *mask);
26extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 25extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27#define cpuset_current_mems_allowed (current->mems_allowed) 26#define cpuset_current_mems_allowed (current->mems_allowed)
28void cpuset_init_current_mems_allowed(void); 27void cpuset_init_current_mems_allowed(void);
@@ -69,9 +68,6 @@ struct seq_file;
69extern void cpuset_task_status_allowed(struct seq_file *m, 68extern void cpuset_task_status_allowed(struct seq_file *m,
70 struct task_struct *task); 69 struct task_struct *task);
71 70
72extern void cpuset_lock(void);
73extern void cpuset_unlock(void);
74
75extern int cpuset_mem_spread_node(void); 71extern int cpuset_mem_spread_node(void);
76 72
77static inline int cpuset_do_page_mem_spread(void) 73static inline int cpuset_do_page_mem_spread(void)
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
105{ 101{
106 cpumask_copy(mask, cpu_possible_mask); 102 cpumask_copy(mask, cpu_possible_mask);
107} 103}
108static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 104
109 struct cpumask *mask) 105static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
110{ 106{
111 cpumask_copy(mask, cpu_possible_mask); 107 cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
108 return cpumask_any(cpu_active_mask);
112} 109}
113 110
114static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 111static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
157{ 154{
158} 155}
159 156
160static inline void cpuset_lock(void) {}
161static inline void cpuset_unlock(void) {}
162
163static inline int cpuset_mem_spread_node(void) 157static inline int cpuset_mem_spread_node(void)
164{ 158{
165 return 0; 159 return 0;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 30b93b2a01a4..eebb617c17d8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -186,6 +186,8 @@ d_iput: no no no yes
186 186
187#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ 187#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
188 188
189#define DCACHE_CANT_MOUNT 0x0100
190
189extern spinlock_t dcache_lock; 191extern spinlock_t dcache_lock;
190extern seqlock_t rename_lock; 192extern seqlock_t rename_lock;
191 193
@@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry)
358 return d_unhashed(dentry) && !IS_ROOT(dentry); 360 return d_unhashed(dentry) && !IS_ROOT(dentry);
359} 361}
360 362
363static inline int cant_mount(struct dentry *dentry)
364{
365 return (dentry->d_flags & DCACHE_CANT_MOUNT);
366}
367
368static inline void dont_mount(struct dentry *dentry)
369{
370 spin_lock(&dentry->d_lock);
371 dentry->d_flags |= DCACHE_CANT_MOUNT;
372 spin_unlock(&dentry->d_lock);
373}
374
361static inline struct dentry *dget_parent(struct dentry *dentry) 375static inline struct dentry *dget_parent(struct dentry *dentry)
362{ 376{
363 struct dentry *ret; 377 struct dentry *ret;
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 8c243aaa86a7..597692f1fc8d 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -20,12 +20,14 @@ struct debug_obj_descr;
20 * struct debug_obj - representaion of an tracked object 20 * struct debug_obj - representaion of an tracked object
21 * @node: hlist node to link the object into the tracker list 21 * @node: hlist node to link the object into the tracker list
22 * @state: tracked object state 22 * @state: tracked object state
23 * @astate: current active state
23 * @object: pointer to the real object 24 * @object: pointer to the real object
24 * @descr: pointer to an object type specific debug description structure 25 * @descr: pointer to an object type specific debug description structure
25 */ 26 */
26struct debug_obj { 27struct debug_obj {
27 struct hlist_node node; 28 struct hlist_node node;
28 enum debug_obj_state state; 29 enum debug_obj_state state;
30 unsigned int astate;
29 void *object; 31 void *object;
30 struct debug_obj_descr *descr; 32 struct debug_obj_descr *descr;
31}; 33};
@@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
60extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); 62extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
61extern void debug_object_free (void *addr, struct debug_obj_descr *descr); 63extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
62 64
65/*
66 * Active state:
67 * - Set at 0 upon initialization.
68 * - Must return to 0 before deactivation.
69 */
70extern void
71debug_object_active_state(void *addr, struct debug_obj_descr *descr,
72 unsigned int expect, unsigned int next);
73
63extern void debug_objects_early_init(void); 74extern void debug_objects_early_init(void);
64extern void debug_objects_mem_init(void); 75extern void debug_objects_mem_init(void);
65#else 76#else
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 01e6adea07ec..41e46330d9be 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,9 +82,13 @@ void clear_ftrace_function(void);
82extern void ftrace_stub(unsigned long a0, unsigned long a1); 82extern void ftrace_stub(unsigned long a0, unsigned long a1);
83 83
84#else /* !CONFIG_FUNCTION_TRACER */ 84#else /* !CONFIG_FUNCTION_TRACER */
85# define register_ftrace_function(ops) do { } while (0) 85/*
86# define unregister_ftrace_function(ops) do { } while (0) 86 * (un)register_ftrace_function must be a macro since the ops parameter
87# define clear_ftrace_function(ops) do { } while (0) 87 * must not be evaluated.
88 */
89#define register_ftrace_function(ops) ({ 0; })
90#define unregister_ftrace_function(ops) ({ 0; })
91static inline void clear_ftrace_function(void) { }
88static inline void ftrace_kill(void) { } 92static inline void ftrace_kill(void) { }
89static inline void ftrace_stop(void) { } 93static inline void ftrace_stop(void) { }
90static inline void ftrace_start(void) { } 94static inline void ftrace_start(void) { }
@@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
237extern void ftrace_disable_daemon(void); 241extern void ftrace_disable_daemon(void);
238extern void ftrace_enable_daemon(void); 242extern void ftrace_enable_daemon(void);
239#else 243#else
240# define skip_trace(ip) ({ 0; }) 244static inline int skip_trace(unsigned long ip) { return 0; }
241# define ftrace_force_update() ({ 0; }) 245static inline int ftrace_force_update(void) { return 0; }
242# define ftrace_set_filter(buf, len, reset) do { } while (0) 246static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
243# define ftrace_disable_daemon() do { } while (0) 247{
244# define ftrace_enable_daemon() do { } while (0) 248}
249static inline void ftrace_disable_daemon(void) { }
250static inline void ftrace_enable_daemon(void) { }
245static inline void ftrace_release_mod(struct module *mod) {} 251static inline void ftrace_release_mod(struct module *mod) {}
246static inline int register_ftrace_command(struct ftrace_func_command *cmd) 252static inline int register_ftrace_command(struct ftrace_func_command *cmd)
247{ 253{
@@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
314 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 320 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
315 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 321 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
316#else 322#else
317# define time_hardirqs_on(a0, a1) do { } while (0) 323 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
318# define time_hardirqs_off(a0, a1) do { } while (0) 324 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
319#endif 325#endif
320 326
321#ifdef CONFIG_PREEMPT_TRACER 327#ifdef CONFIG_PREEMPT_TRACER
322 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 328 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
323 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 329 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
324#else 330#else
325# define trace_preempt_on(a0, a1) do { } while (0) 331 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
326# define trace_preempt_off(a0, a1) do { } while (0) 332 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
327#endif 333#endif
328 334
329#ifdef CONFIG_FTRACE_MCOUNT_RECORD 335#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -352,6 +358,10 @@ struct ftrace_graph_ret {
352 int depth; 358 int depth;
353}; 359};
354 360
361/* Type of the callback handlers for tracing function graph*/
362typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
363typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
364
355#ifdef CONFIG_FUNCTION_GRAPH_TRACER 365#ifdef CONFIG_FUNCTION_GRAPH_TRACER
356 366
357/* for init task */ 367/* for init task */
@@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
400 410
401#define FTRACE_RETFUNC_DEPTH 50 411#define FTRACE_RETFUNC_DEPTH 50
402#define FTRACE_RETSTACK_ALLOC_SIZE 32 412#define FTRACE_RETSTACK_ALLOC_SIZE 32
403/* Type of the callback handlers for tracing function graph*/
404typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
405typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
406
407extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 413extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
408 trace_func_graph_ent_t entryfunc); 414 trace_func_graph_ent_t entryfunc);
409 415
@@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
441static inline void ftrace_graph_init_task(struct task_struct *t) { } 447static inline void ftrace_graph_init_task(struct task_struct *t) { }
442static inline void ftrace_graph_exit_task(struct task_struct *t) { } 448static inline void ftrace_graph_exit_task(struct task_struct *t) { }
443 449
450static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
451 trace_func_graph_ent_t entryfunc)
452{
453 return -1;
454}
455static inline void unregister_ftrace_graph(void) { }
456
444static inline int task_curr_ret_stack(struct task_struct *tsk) 457static inline int task_curr_ret_stack(struct task_struct *tsk)
445{ 458{
446 return -1; 459 return -1;
@@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
492 return tsk->trace & TSK_TRACE_FL_GRAPH; 505 return tsk->trace & TSK_TRACE_FL_GRAPH;
493} 506}
494 507
495extern int ftrace_dump_on_oops; 508enum ftrace_dump_mode;
509
510extern enum ftrace_dump_mode ftrace_dump_on_oops;
496 511
497#ifdef CONFIG_PREEMPT 512#ifdef CONFIG_PREEMPT
498#define INIT_TRACE_RECURSION .trace_recursion = 0, 513#define INIT_TRACE_RECURSION .trace_recursion = 0,
@@ -504,18 +519,6 @@ extern int ftrace_dump_on_oops;
504#define INIT_TRACE_RECURSION 519#define INIT_TRACE_RECURSION
505#endif 520#endif
506 521
507#ifdef CONFIG_HW_BRANCH_TRACER
508
509void trace_hw_branch(u64 from, u64 to);
510void trace_hw_branch_oops(void);
511
512#else /* CONFIG_HW_BRANCH_TRACER */
513
514static inline void trace_hw_branch(u64 from, u64 to) {}
515static inline void trace_hw_branch_oops(void) {}
516
517#endif /* CONFIG_HW_BRANCH_TRACER */
518
519#ifdef CONFIG_FTRACE_SYSCALLS 522#ifdef CONFIG_FTRACE_SYSCALLS
520 523
521unsigned long arch_syscall_addr(int nr); 524unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c0f4b364c711..39e71b0a3bfd 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -58,6 +58,7 @@ struct trace_iterator {
58 /* The below is zeroed out in pipe_read */ 58 /* The below is zeroed out in pipe_read */
59 struct trace_seq seq; 59 struct trace_seq seq;
60 struct trace_entry *ent; 60 struct trace_entry *ent;
61 unsigned long lost_events;
61 int leftover; 62 int leftover;
62 int cpu; 63 int cpu;
63 u64 ts; 64 u64 ts;
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index c70d27af03f9..a2d6ea49ec56 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -9,9 +9,22 @@ enum {
9}; 9};
10 10
11enum { 11enum {
12 HW_BREAKPOINT_R = 1, 12 HW_BREAKPOINT_EMPTY = 0,
13 HW_BREAKPOINT_W = 2, 13 HW_BREAKPOINT_R = 1,
14 HW_BREAKPOINT_X = 4, 14 HW_BREAKPOINT_W = 2,
15 HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
16 HW_BREAKPOINT_X = 4,
17 HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
18};
19
20enum bp_type_idx {
21 TYPE_INST = 0,
22#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
23 TYPE_DATA = 0,
24#else
25 TYPE_DATA = 1,
26#endif
27 TYPE_MAX
15}; 28};
16 29
17#ifdef __KERNEL__ 30#ifdef __KERNEL__
@@ -34,6 +47,12 @@ static inline void hw_breakpoint_init(struct perf_event_attr *attr)
34 attr->sample_period = 1; 47 attr->sample_period = 1;
35} 48}
36 49
50static inline void ptrace_breakpoint_init(struct perf_event_attr *attr)
51{
52 hw_breakpoint_init(attr);
53 attr->exclude_kernel = 1;
54}
55
37static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) 56static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
38{ 57{
39 return bp->attr.bp_addr; 58 return bp->attr.bp_addr;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b1ed1cd8e2a8..7996fc2c9ba9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -49,7 +49,6 @@ extern struct group_info init_groups;
49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \ 49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \
50 { .first = &init_task.pids[PIDTYPE_SID].node }, \ 50 { .first = &init_task.pids[PIDTYPE_SID].node }, \
51 }, \ 51 }, \
52 .rcu = RCU_HEAD_INIT, \
53 .level = 0, \ 52 .level = 0, \
54 .numbers = { { \ 53 .numbers = { { \
55 .nr = 0, \ 54 .nr = 0, \
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3af4ffd591b9..be22ad83689c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -37,9 +37,9 @@ struct iommu_ops {
37 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 37 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
38 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 38 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
39 int (*map)(struct iommu_domain *domain, unsigned long iova, 39 int (*map)(struct iommu_domain *domain, unsigned long iova,
40 phys_addr_t paddr, size_t size, int prot); 40 phys_addr_t paddr, int gfp_order, int prot);
41 void (*unmap)(struct iommu_domain *domain, unsigned long iova, 41 int (*unmap)(struct iommu_domain *domain, unsigned long iova,
42 size_t size); 42 int gfp_order);
43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
44 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain, 45 int (*domain_has_cap)(struct iommu_domain *domain,
@@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain,
56 struct device *dev); 56 struct device *dev);
57extern void iommu_detach_device(struct iommu_domain *domain, 57extern void iommu_detach_device(struct iommu_domain *domain,
58 struct device *dev); 58 struct device *dev);
59extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, 59extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
60 phys_addr_t paddr, size_t size, int prot); 60 phys_addr_t paddr, int gfp_order, int prot);
61extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, 61extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
62 size_t size); 62 int gfp_order);
63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
64 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain, 65extern int iommu_domain_has_cap(struct iommu_domain *domain,
@@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
96{ 96{
97} 97}
98 98
99static inline int iommu_map_range(struct iommu_domain *domain, 99static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
100 unsigned long iova, phys_addr_t paddr, 100 phys_addr_t paddr, int gfp_order, int prot)
101 size_t size, int prot)
102{ 101{
103 return -ENODEV; 102 return -ENODEV;
104} 103}
105 104
106static inline void iommu_unmap_range(struct iommu_domain *domain, 105static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
107 unsigned long iova, size_t size) 106 int gfp_order)
108{ 107{
108 return -ENODEV;
109} 109}
110 110
111static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 111static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index a38d6bd6fde6..fc33af911852 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -492,6 +492,13 @@ static inline void tracing_off(void) { }
492static inline void tracing_off_permanent(void) { } 492static inline void tracing_off_permanent(void) { }
493static inline int tracing_is_on(void) { return 0; } 493static inline int tracing_is_on(void) { return 0; }
494#endif 494#endif
495
496enum ftrace_dump_mode {
497 DUMP_NONE,
498 DUMP_ALL,
499 DUMP_ORIG,
500};
501
495#ifdef CONFIG_TRACING 502#ifdef CONFIG_TRACING
496extern void tracing_start(void); 503extern void tracing_start(void);
497extern void tracing_stop(void); 504extern void tracing_stop(void);
@@ -573,7 +580,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
573extern int 580extern int
574__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 581__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
575 582
576extern void ftrace_dump(void); 583extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
577#else 584#else
578static inline void 585static inline void
579ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 586ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -594,7 +601,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
594{ 601{
595 return 0; 602 return 0;
596} 603}
597static inline void ftrace_dump(void) { } 604static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
598#endif /* CONFIG_TRACING */ 605#endif /* CONFIG_TRACING */
599 606
600/* 607/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 462acaf36f3a..fb19bb92b809 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,7 +19,6 @@ struct anon_vma;
19struct file_ra_state; 19struct file_ra_state;
20struct user_struct; 20struct user_struct;
21struct writeback_control; 21struct writeback_control;
22struct rlimit;
23 22
24#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
25extern unsigned long max_mapnr; 24extern unsigned long max_mapnr;
@@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page,
1449int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1448int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1450void vmemmap_populate_print_last(void); 1449void vmemmap_populate_print_last(void);
1451 1450
1452extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1453 size_t size);
1454extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1455 1451
1456enum mf_flags { 1452enum mf_flags {
1457 MF_COUNT_INCREASED = 1 << 0, 1453 MF_COUNT_INCREASED = 1 << 0,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 55f1f9c9506c..007fbaafead0 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -500,4 +500,13 @@ struct mdio_device_id {
500 __u32 phy_id_mask; 500 __u32 phy_id_mask;
501}; 501};
502 502
503struct zorro_device_id {
504 __u32 id; /* Device ID or ZORRO_WILDCARD */
505 kernel_ulong_t driver_data; /* Data private to the driver */
506};
507
508#define ZORRO_WILDCARD (0xffffffff) /* not official */
509
510#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X"
511
503#endif /* LINUX_MOD_DEVICETABLE_H */ 512#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 515d53ae6a79..6914fcad4673 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module)
465 if (module) { 465 if (module) {
466 preempt_disable(); 466 preempt_disable();
467 __this_cpu_inc(module->refptr->incs); 467 __this_cpu_inc(module->refptr->incs);
468 trace_module_get(module, _THIS_IP_, 468 trace_module_get(module, _THIS_IP_);
469 __this_cpu_read(module->refptr->incs));
470 preempt_enable(); 469 preempt_enable();
471 } 470 }
472} 471}
@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module)
480 479
481 if (likely(module_is_live(module))) { 480 if (likely(module_is_live(module))) {
482 __this_cpu_inc(module->refptr->incs); 481 __this_cpu_inc(module->refptr->incs);
483 trace_module_get(module, _THIS_IP_, 482 trace_module_get(module, _THIS_IP_);
484 __this_cpu_read(module->refptr->incs));
485 } else 483 } else
486 ret = 0; 484 ret = 0;
487 485
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8e375440403..3fd5c82e0e18 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -203,8 +203,19 @@ struct perf_event_attr {
203 enable_on_exec : 1, /* next exec enables */ 203 enable_on_exec : 1, /* next exec enables */
204 task : 1, /* trace fork/exit */ 204 task : 1, /* trace fork/exit */
205 watermark : 1, /* wakeup_watermark */ 205 watermark : 1, /* wakeup_watermark */
206 206 /*
207 __reserved_1 : 49; 207 * precise_ip:
208 *
209 * 0 - SAMPLE_IP can have arbitrary skid
210 * 1 - SAMPLE_IP must have constant skid
211 * 2 - SAMPLE_IP requested to have 0 skid
212 * 3 - SAMPLE_IP must have 0 skid
213 *
214 * See also PERF_RECORD_MISC_EXACT_IP
215 */
216 precise_ip : 2, /* skid constraint */
217
218 __reserved_1 : 47;
208 219
209 union { 220 union {
210 __u32 wakeup_events; /* wakeup every n events */ 221 __u32 wakeup_events; /* wakeup every n events */
@@ -287,11 +298,24 @@ struct perf_event_mmap_page {
287 __u64 data_tail; /* user-space written tail */ 298 __u64 data_tail; /* user-space written tail */
288}; 299};
289 300
290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 301#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 302#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292#define PERF_RECORD_MISC_KERNEL (1 << 0) 303#define PERF_RECORD_MISC_KERNEL (1 << 0)
293#define PERF_RECORD_MISC_USER (2 << 0) 304#define PERF_RECORD_MISC_USER (2 << 0)
294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 305#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
306#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
307#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
308
309/*
310 * Indicates that the content of PERF_SAMPLE_IP points to
311 * the actual instruction that triggered the event. See also
312 * perf_event_attr::precise_ip.
313 */
314#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
315/*
316 * Reserve the last bit to indicate some extended misc field
317 */
318#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
295 319
296struct perf_event_header { 320struct perf_event_header {
297 __u32 type; 321 __u32 type;
@@ -439,6 +463,12 @@ enum perf_callchain_context {
439# include <asm/perf_event.h> 463# include <asm/perf_event.h>
440#endif 464#endif
441 465
466struct perf_guest_info_callbacks {
467 int (*is_in_guest) (void);
468 int (*is_user_mode) (void);
469 unsigned long (*get_guest_ip) (void);
470};
471
442#ifdef CONFIG_HAVE_HW_BREAKPOINT 472#ifdef CONFIG_HAVE_HW_BREAKPOINT
443#include <asm/hw_breakpoint.h> 473#include <asm/hw_breakpoint.h>
444#endif 474#endif
@@ -468,6 +498,17 @@ struct perf_raw_record {
468 void *data; 498 void *data;
469}; 499};
470 500
501struct perf_branch_entry {
502 __u64 from;
503 __u64 to;
504 __u64 flags;
505};
506
507struct perf_branch_stack {
508 __u64 nr;
509 struct perf_branch_entry entries[0];
510};
511
471struct task_struct; 512struct task_struct;
472 513
473/** 514/**
@@ -506,6 +547,8 @@ struct hw_perf_event {
506 547
507struct perf_event; 548struct perf_event;
508 549
550#define PERF_EVENT_TXN_STARTED 1
551
509/** 552/**
510 * struct pmu - generic performance monitoring unit 553 * struct pmu - generic performance monitoring unit
511 */ 554 */
@@ -516,6 +559,16 @@ struct pmu {
516 void (*stop) (struct perf_event *event); 559 void (*stop) (struct perf_event *event);
517 void (*read) (struct perf_event *event); 560 void (*read) (struct perf_event *event);
518 void (*unthrottle) (struct perf_event *event); 561 void (*unthrottle) (struct perf_event *event);
562
563 /*
564 * group events scheduling is treated as a transaction,
565 * add group events as a whole and perform one schedulability test.
566 * If test fails, roll back the whole group
567 */
568
569 void (*start_txn) (const struct pmu *pmu);
570 void (*cancel_txn) (const struct pmu *pmu);
571 int (*commit_txn) (const struct pmu *pmu);
519}; 572};
520 573
521/** 574/**
@@ -571,6 +624,14 @@ enum perf_group_flag {
571 PERF_GROUP_SOFTWARE = 0x1, 624 PERF_GROUP_SOFTWARE = 0x1,
572}; 625};
573 626
627#define SWEVENT_HLIST_BITS 8
628#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
629
630struct swevent_hlist {
631 struct hlist_head heads[SWEVENT_HLIST_SIZE];
632 struct rcu_head rcu_head;
633};
634
574/** 635/**
575 * struct perf_event - performance event kernel representation: 636 * struct perf_event - performance event kernel representation:
576 */ 637 */
@@ -579,6 +640,7 @@ struct perf_event {
579 struct list_head group_entry; 640 struct list_head group_entry;
580 struct list_head event_entry; 641 struct list_head event_entry;
581 struct list_head sibling_list; 642 struct list_head sibling_list;
643 struct hlist_node hlist_entry;
582 int nr_siblings; 644 int nr_siblings;
583 int group_flags; 645 int group_flags;
584 struct perf_event *group_leader; 646 struct perf_event *group_leader;
@@ -726,6 +788,9 @@ struct perf_cpu_context {
726 int active_oncpu; 788 int active_oncpu;
727 int max_pertask; 789 int max_pertask;
728 int exclusive; 790 int exclusive;
791 struct swevent_hlist *swevent_hlist;
792 struct mutex hlist_mutex;
793 int hlist_refcount;
729 794
730 /* 795 /*
731 * Recursion avoidance: 796 * Recursion avoidance:
@@ -769,9 +834,6 @@ extern void perf_disable(void);
769extern void perf_enable(void); 834extern void perf_enable(void);
770extern int perf_event_task_disable(void); 835extern int perf_event_task_disable(void);
771extern int perf_event_task_enable(void); 836extern int perf_event_task_enable(void);
772extern int hw_perf_group_sched_in(struct perf_event *group_leader,
773 struct perf_cpu_context *cpuctx,
774 struct perf_event_context *ctx);
775extern void perf_event_update_userpage(struct perf_event *event); 837extern void perf_event_update_userpage(struct perf_event *event);
776extern int perf_event_release_kernel(struct perf_event *event); 838extern int perf_event_release_kernel(struct perf_event *event);
777extern struct perf_event * 839extern struct perf_event *
@@ -902,6 +964,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma)
902 __perf_event_mmap(vma); 964 __perf_event_mmap(vma);
903} 965}
904 966
967extern struct perf_guest_info_callbacks *perf_guest_cbs;
968extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
969extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
970
905extern void perf_event_comm(struct task_struct *tsk); 971extern void perf_event_comm(struct task_struct *tsk);
906extern void perf_event_fork(struct task_struct *tsk); 972extern void perf_event_fork(struct task_struct *tsk);
907 973
@@ -971,6 +1037,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
971static inline void 1037static inline void
972perf_bp_event(struct perf_event *event, void *data) { } 1038perf_bp_event(struct perf_event *event, void *data) { }
973 1039
1040static inline int perf_register_guest_info_callbacks
1041(struct perf_guest_info_callbacks *callbacks) { return 0; }
1042static inline int perf_unregister_guest_info_callbacks
1043(struct perf_guest_info_callbacks *callbacks) { return 0; }
1044
974static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1045static inline void perf_event_mmap(struct vm_area_struct *vma) { }
975static inline void perf_event_comm(struct task_struct *tsk) { } 1046static inline void perf_event_comm(struct task_struct *tsk) { }
976static inline void perf_event_fork(struct task_struct *tsk) { } 1047static inline void perf_event_fork(struct task_struct *tsk) { }
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 212da17d06af..5417944d3687 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *);
44extern int platform_add_devices(struct platform_device **, int); 44extern int platform_add_devices(struct platform_device **, int);
45 45
46extern struct platform_device *platform_device_register_simple(const char *, int id, 46extern struct platform_device *platform_device_register_simple(const char *, int id,
47 struct resource *, unsigned int); 47 const struct resource *, unsigned int);
48extern struct platform_device *platform_device_register_data(struct device *, 48extern struct platform_device *platform_device_register_data(struct device *,
49 const char *, int, const void *, size_t); 49 const char *, int, const void *, size_t);
50 50
51extern struct platform_device *platform_device_alloc(const char *name, int id); 51extern struct platform_device *platform_device_alloc(const char *name, int id);
52extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); 52extern int platform_device_add_resources(struct platform_device *pdev,
53 const struct resource *res,
54 unsigned int num);
53extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); 55extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size);
54extern int platform_device_add(struct platform_device *pdev); 56extern int platform_device_add(struct platform_device *pdev);
55extern void platform_device_del(struct platform_device *pdev); 57extern void platform_device_del(struct platform_device *pdev);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e1fb60729979..4272521e29e9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
345#define arch_ptrace_stop(code, info) do { } while (0) 345#define arch_ptrace_stop(code, info) do { } while (0)
346#endif 346#endif
347 347
348#ifndef arch_ptrace_untrace
349/*
350 * Do machine-specific work before untracing child.
351 *
352 * This is called for a normal detach as well as from ptrace_exit()
353 * when the tracing task dies.
354 *
355 * Called with write_lock(&tasklist_lock) held.
356 */
357#define arch_ptrace_untrace(task) do { } while (0)
358#endif
359
360extern int task_current_syscall(struct task_struct *target, long *callno, 348extern int task_current_syscall(struct task_struct *target, long *callno,
361 unsigned long args[6], unsigned int maxargs, 349 unsigned long args[6], unsigned int maxargs,
362 unsigned long *sp, unsigned long *pc); 350 unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 5210a5c60877..fe1872e5b37e 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -110,6 +110,7 @@ struct rb_node
110struct rb_root 110struct rb_root
111{ 111{
112 struct rb_node *rb_node; 112 struct rb_node *rb_node;
113 void (*augment_cb)(struct rb_node *node);
113}; 114};
114 115
115 116
@@ -129,7 +130,9 @@ static inline void rb_set_color(struct rb_node *rb, int color)
129 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; 130 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
130} 131}
131 132
132#define RB_ROOT (struct rb_root) { NULL, } 133#define RB_ROOT (struct rb_root) { NULL, NULL, }
134#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x}
135
133#define rb_entry(ptr, type, member) container_of(ptr, type, member) 136#define rb_entry(ptr, type, member) container_of(ptr, type, member)
134 137
135#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) 138#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index db266bbed23f..b653b4aaa8a6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -56,8 +56,6 @@ struct rcu_head {
56}; 56};
57 57
58/* Exported common interfaces */ 58/* Exported common interfaces */
59extern void synchronize_rcu_bh(void);
60extern void synchronize_sched(void);
61extern void rcu_barrier(void); 59extern void rcu_barrier(void);
62extern void rcu_barrier_bh(void); 60extern void rcu_barrier_bh(void);
63extern void rcu_barrier_sched(void); 61extern void rcu_barrier_sched(void);
@@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page);
66 64
67/* Internal to kernel */ 65/* Internal to kernel */
68extern void rcu_init(void); 66extern void rcu_init(void);
69extern int rcu_scheduler_active;
70extern void rcu_scheduler_starting(void);
71 67
72#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 68#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
73#include <linux/rcutree.h> 69#include <linux/rcutree.h>
@@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void);
83 (ptr)->next = NULL; (ptr)->func = NULL; \ 79 (ptr)->next = NULL; (ptr)->func = NULL; \
84} while (0) 80} while (0)
85 81
82static inline void init_rcu_head_on_stack(struct rcu_head *head)
83{
84}
85
86static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
87{
88}
89
86#ifdef CONFIG_DEBUG_LOCK_ALLOC 90#ifdef CONFIG_DEBUG_LOCK_ALLOC
87 91
88extern struct lockdep_map rcu_lock_map; 92extern struct lockdep_map rcu_lock_map;
@@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void);
106/** 110/**
107 * rcu_read_lock_held - might we be in RCU read-side critical section? 111 * rcu_read_lock_held - might we be in RCU read-side critical section?
108 * 112 *
109 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in 113 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
110 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, 114 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
111 * this assumes we are in an RCU read-side critical section unless it can 115 * this assumes we are in an RCU read-side critical section unless it can
112 * prove otherwise. 116 * prove otherwise.
113 * 117 *
114 * Check rcu_scheduler_active to prevent false positives during boot. 118 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
119 * and while lockdep is disabled.
115 */ 120 */
116static inline int rcu_read_lock_held(void) 121static inline int rcu_read_lock_held(void)
117{ 122{
@@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void);
129/** 134/**
130 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? 135 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
131 * 136 *
132 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an 137 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
133 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, 138 * RCU-sched read-side critical section. In absence of
134 * this assumes we are in an RCU-sched read-side critical section unless it 139 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
135 * can prove otherwise. Note that disabling of preemption (including 140 * critical section unless it can prove otherwise. Note that disabling
136 * disabling irqs) counts as an RCU-sched read-side critical section. 141 * of preemption (including disabling irqs) counts as an RCU-sched
142 * read-side critical section.
137 * 143 *
138 * Check rcu_scheduler_active to prevent false positives during boot. 144 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
145 * and while lockdep is disabled.
139 */ 146 */
140#ifdef CONFIG_PREEMPT 147#ifdef CONFIG_PREEMPT
141static inline int rcu_read_lock_sched_held(void) 148static inline int rcu_read_lock_sched_held(void)
@@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void)
177#ifdef CONFIG_PREEMPT 184#ifdef CONFIG_PREEMPT
178static inline int rcu_read_lock_sched_held(void) 185static inline int rcu_read_lock_sched_held(void)
179{ 186{
180 return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); 187 return preempt_count() != 0 || irqs_disabled();
181} 188}
182#else /* #ifdef CONFIG_PREEMPT */ 189#else /* #ifdef CONFIG_PREEMPT */
183static inline int rcu_read_lock_sched_held(void) 190static inline int rcu_read_lock_sched_held(void)
@@ -192,6 +199,15 @@ static inline int rcu_read_lock_sched_held(void)
192 199
193extern int rcu_my_thread_group_empty(void); 200extern int rcu_my_thread_group_empty(void);
194 201
202#define __do_rcu_dereference_check(c) \
203 do { \
204 static bool __warned; \
205 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
206 __warned = true; \
207 lockdep_rcu_dereference(__FILE__, __LINE__); \
208 } \
209 } while (0)
210
195/** 211/**
196 * rcu_dereference_check - rcu_dereference with debug checking 212 * rcu_dereference_check - rcu_dereference with debug checking
197 * @p: The pointer to read, prior to dereferencing 213 * @p: The pointer to read, prior to dereferencing
@@ -221,8 +237,7 @@ extern int rcu_my_thread_group_empty(void);
221 */ 237 */
222#define rcu_dereference_check(p, c) \ 238#define rcu_dereference_check(p, c) \
223 ({ \ 239 ({ \
224 if (debug_lockdep_rcu_enabled() && !(c)) \ 240 __do_rcu_dereference_check(c); \
225 lockdep_rcu_dereference(__FILE__, __LINE__); \
226 rcu_dereference_raw(p); \ 241 rcu_dereference_raw(p); \
227 }) 242 })
228 243
@@ -239,8 +254,7 @@ extern int rcu_my_thread_group_empty(void);
239 */ 254 */
240#define rcu_dereference_protected(p, c) \ 255#define rcu_dereference_protected(p, c) \
241 ({ \ 256 ({ \
242 if (debug_lockdep_rcu_enabled() && !(c)) \ 257 __do_rcu_dereference_check(c); \
243 lockdep_rcu_dereference(__FILE__, __LINE__); \
244 (p); \ 258 (p); \
245 }) 259 })
246 260
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index a5195875480a..e2e893144a84 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -29,6 +29,10 @@
29 29
30void rcu_sched_qs(int cpu); 30void rcu_sched_qs(int cpu);
31void rcu_bh_qs(int cpu); 31void rcu_bh_qs(int cpu);
32static inline void rcu_note_context_switch(int cpu)
33{
34 rcu_sched_qs(cpu);
35}
32 36
33#define __rcu_read_lock() preempt_disable() 37#define __rcu_read_lock() preempt_disable()
34#define __rcu_read_unlock() preempt_enable() 38#define __rcu_read_unlock() preempt_enable()
@@ -60,8 +64,6 @@ static inline long rcu_batches_completed_bh(void)
60 return 0; 64 return 0;
61} 65}
62 66
63extern int rcu_expedited_torture_stats(char *page);
64
65static inline void rcu_force_quiescent_state(void) 67static inline void rcu_force_quiescent_state(void)
66{ 68{
67} 69}
@@ -74,7 +76,17 @@ static inline void rcu_sched_force_quiescent_state(void)
74{ 76{
75} 77}
76 78
77#define synchronize_rcu synchronize_sched 79extern void synchronize_sched(void);
80
81static inline void synchronize_rcu(void)
82{
83 synchronize_sched();
84}
85
86static inline void synchronize_rcu_bh(void)
87{
88 synchronize_sched();
89}
78 90
79static inline void synchronize_rcu_expedited(void) 91static inline void synchronize_rcu_expedited(void)
80{ 92{
@@ -114,4 +126,17 @@ static inline int rcu_preempt_depth(void)
114 return 0; 126 return 0;
115} 127}
116 128
129#ifdef CONFIG_DEBUG_LOCK_ALLOC
130
131extern int rcu_scheduler_active __read_mostly;
132extern void rcu_scheduler_starting(void);
133
134#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
135
136static inline void rcu_scheduler_starting(void)
137{
138}
139
140#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
141
117#endif /* __LINUX_RCUTINY_H */ 142#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 42cc3a04779e..c0ed1c056f29 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,8 +34,8 @@ struct notifier_block;
34 34
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern void rcu_note_context_switch(int cpu);
37extern int rcu_needs_cpu(int cpu); 38extern int rcu_needs_cpu(int cpu);
38extern int rcu_expedited_torture_stats(char *page);
39 39
40#ifdef CONFIG_TREE_PREEMPT_RCU 40#ifdef CONFIG_TREE_PREEMPT_RCU
41 41
@@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void)
86 86
87extern void call_rcu_sched(struct rcu_head *head, 87extern void call_rcu_sched(struct rcu_head *head,
88 void (*func)(struct rcu_head *rcu)); 88 void (*func)(struct rcu_head *rcu));
89extern void synchronize_rcu_bh(void);
90extern void synchronize_sched(void);
89extern void synchronize_rcu_expedited(void); 91extern void synchronize_rcu_expedited(void);
90 92
91static inline void synchronize_rcu_bh_expedited(void) 93static inline void synchronize_rcu_bh_expedited(void)
@@ -120,4 +122,7 @@ static inline int rcu_blocking_is_gp(void)
120 return num_online_cpus() == 1; 122 return num_online_cpus() == 1;
121} 123}
122 124
125extern void rcu_scheduler_starting(void);
126extern int rcu_scheduler_active __read_mostly;
127
123#endif /* __LINUX_RCUTREE_H */ 128#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 5fcc31ed5771..25b4f686d918 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer,
120 unsigned long length, void *data); 120 unsigned long length, void *data);
121 121
122struct ring_buffer_event * 122struct ring_buffer_event *
123ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); 123ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
124 unsigned long *lost_events);
124struct ring_buffer_event * 125struct ring_buffer_event *
125ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); 126ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
127 unsigned long *lost_events);
126 128
127struct ring_buffer_iter * 129struct ring_buffer_iter *
128ring_buffer_read_start(struct ring_buffer *buffer, int cpu); 130ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
131void ring_buffer_read_prepare_sync(void);
132void ring_buffer_read_start(struct ring_buffer_iter *iter);
129void ring_buffer_read_finish(struct ring_buffer_iter *iter); 133void ring_buffer_read_finish(struct ring_buffer_iter *iter);
130 134
131struct ring_buffer_event * 135struct ring_buffer_event *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b7b81df78b3..b55e988988b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,7 +99,6 @@ struct futex_pi_state;
99struct robust_list_head; 99struct robust_list_head;
100struct bio_list; 100struct bio_list;
101struct fs_struct; 101struct fs_struct;
102struct bts_context;
103struct perf_event_context; 102struct perf_event_context;
104 103
105/* 104/*
@@ -275,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
275#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
276extern int select_nohz_load_balancer(int cpu); 275extern int select_nohz_load_balancer(int cpu);
277extern int get_nohz_load_balancer(void); 276extern int get_nohz_load_balancer(void);
277extern int nohz_ratelimit(int cpu);
278#else 278#else
279static inline int select_nohz_load_balancer(int cpu) 279static inline int select_nohz_load_balancer(int cpu)
280{ 280{
281 return 0; 281 return 0;
282} 282}
283
284static inline int nohz_ratelimit(int cpu)
285{
286 return 0;
287}
283#endif 288#endif
284 289
285/* 290/*
@@ -954,6 +959,7 @@ struct sched_domain {
954 char *name; 959 char *name;
955#endif 960#endif
956 961
962 unsigned int span_weight;
957 /* 963 /*
958 * Span of all CPUs in this domain. 964 * Span of all CPUs in this domain.
959 * 965 *
@@ -1026,12 +1032,17 @@ struct sched_domain;
1026#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1032#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1027#define WF_FORK 0x02 /* child wakeup after fork */ 1033#define WF_FORK 0x02 /* child wakeup after fork */
1028 1034
1035#define ENQUEUE_WAKEUP 1
1036#define ENQUEUE_WAKING 2
1037#define ENQUEUE_HEAD 4
1038
1039#define DEQUEUE_SLEEP 1
1040
1029struct sched_class { 1041struct sched_class {
1030 const struct sched_class *next; 1042 const struct sched_class *next;
1031 1043
1032 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, 1044 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1033 bool head); 1045 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1034 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1035 void (*yield_task) (struct rq *rq); 1046 void (*yield_task) (struct rq *rq);
1036 1047
1037 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1040,7 +1051,8 @@ struct sched_class {
1040 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1051 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1041 1052
1042#ifdef CONFIG_SMP 1053#ifdef CONFIG_SMP
1043 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1054 int (*select_task_rq)(struct rq *rq, struct task_struct *p,
1055 int sd_flag, int flags);
1044 1056
1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1057 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1046 void (*post_schedule) (struct rq *this_rq); 1058 void (*post_schedule) (struct rq *this_rq);
@@ -1077,36 +1089,8 @@ struct load_weight {
1077 unsigned long weight, inv_weight; 1089 unsigned long weight, inv_weight;
1078}; 1090};
1079 1091
1080/*
1081 * CFS stats for a schedulable entity (task, task-group etc)
1082 *
1083 * Current field usage histogram:
1084 *
1085 * 4 se->block_start
1086 * 4 se->run_node
1087 * 4 se->sleep_start
1088 * 6 se->load.weight
1089 */
1090struct sched_entity {
1091 struct load_weight load; /* for load-balancing */
1092 struct rb_node run_node;
1093 struct list_head group_node;
1094 unsigned int on_rq;
1095
1096 u64 exec_start;
1097 u64 sum_exec_runtime;
1098 u64 vruntime;
1099 u64 prev_sum_exec_runtime;
1100
1101 u64 last_wakeup;
1102 u64 avg_overlap;
1103
1104 u64 nr_migrations;
1105
1106 u64 start_runtime;
1107 u64 avg_wakeup;
1108
1109#ifdef CONFIG_SCHEDSTATS 1092#ifdef CONFIG_SCHEDSTATS
1093struct sched_statistics {
1110 u64 wait_start; 1094 u64 wait_start;
1111 u64 wait_max; 1095 u64 wait_max;
1112 u64 wait_count; 1096 u64 wait_count;
@@ -1138,6 +1122,24 @@ struct sched_entity {
1138 u64 nr_wakeups_affine_attempts; 1122 u64 nr_wakeups_affine_attempts;
1139 u64 nr_wakeups_passive; 1123 u64 nr_wakeups_passive;
1140 u64 nr_wakeups_idle; 1124 u64 nr_wakeups_idle;
1125};
1126#endif
1127
1128struct sched_entity {
1129 struct load_weight load; /* for load-balancing */
1130 struct rb_node run_node;
1131 struct list_head group_node;
1132 unsigned int on_rq;
1133
1134 u64 exec_start;
1135 u64 sum_exec_runtime;
1136 u64 vruntime;
1137 u64 prev_sum_exec_runtime;
1138
1139 u64 nr_migrations;
1140
1141#ifdef CONFIG_SCHEDSTATS
1142 struct sched_statistics statistics;
1141#endif 1143#endif
1142 1144
1143#ifdef CONFIG_FAIR_GROUP_SCHED 1145#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1272,12 +1274,6 @@ struct task_struct {
1272 struct list_head ptraced; 1274 struct list_head ptraced;
1273 struct list_head ptrace_entry; 1275 struct list_head ptrace_entry;
1274 1276
1275 /*
1276 * This is the tracer handle for the ptrace BTS extension.
1277 * This field actually belongs to the ptracer task.
1278 */
1279 struct bts_context *bts;
1280
1281 /* PID/PID hash table linkage. */ 1277 /* PID/PID hash table linkage. */
1282 struct pid_link pids[PIDTYPE_MAX]; 1278 struct pid_link pids[PIDTYPE_MAX];
1283 struct list_head thread_group; 1279 struct list_head thread_group;
@@ -1846,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void);
1846extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1842extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1847 1843
1848#ifdef CONFIG_HOTPLUG_CPU 1844#ifdef CONFIG_HOTPLUG_CPU
1845extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1849extern void idle_task_exit(void); 1846extern void idle_task_exit(void);
1850#else 1847#else
1851static inline void idle_task_exit(void) {} 1848static inline void idle_task_exit(void) {}
@@ -2122,10 +2119,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2122extern char *get_task_comm(char *to, struct task_struct *tsk); 2119extern char *get_task_comm(char *to, struct task_struct *tsk);
2123 2120
2124#ifdef CONFIG_SMP 2121#ifdef CONFIG_SMP
2125extern void wait_task_context_switch(struct task_struct *p);
2126extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2122extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2127#else 2123#else
2128static inline void wait_task_context_switch(struct task_struct *p) {}
2129static inline unsigned long wait_task_inactive(struct task_struct *p, 2124static inline unsigned long wait_task_inactive(struct task_struct *p,
2130 long match_state) 2125 long match_state)
2131{ 2126{
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4d5ecb222af9..4d5d2f546dbf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -27,6 +27,8 @@
27#ifndef _LINUX_SRCU_H 27#ifndef _LINUX_SRCU_H
28#define _LINUX_SRCU_H 28#define _LINUX_SRCU_H
29 29
30#include <linux/mutex.h>
31
30struct srcu_struct_array { 32struct srcu_struct_array {
31 int c[2]; 33 int c[2];
32}; 34};
@@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp);
84/** 86/**
85 * srcu_read_lock_held - might we be in SRCU read-side critical section? 87 * srcu_read_lock_held - might we be in SRCU read-side critical section?
86 * 88 *
87 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in 89 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
88 * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, 90 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
89 * this assumes we are in an SRCU read-side critical section unless it can 91 * this assumes we are in an SRCU read-side critical section unless it can
90 * prove otherwise. 92 * prove otherwise.
91 */ 93 */
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index baba3a23a814..6b524a0d02e4 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -1,13 +1,101 @@
1#ifndef _LINUX_STOP_MACHINE 1#ifndef _LINUX_STOP_MACHINE
2#define _LINUX_STOP_MACHINE 2#define _LINUX_STOP_MACHINE
3/* "Bogolock": stop the entire machine, disable interrupts. This is a 3
4 very heavy lock, which is equivalent to grabbing every spinlock
5 (and more). So the "read" side to such a lock is anything which
6 disables preeempt. */
7#include <linux/cpu.h> 4#include <linux/cpu.h>
8#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <linux/list.h>
9#include <asm/system.h> 7#include <asm/system.h>
10 8
9/*
10 * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
11 * monopolization mechanism. The caller can specify a non-sleeping
12 * function to be executed on a single or multiple cpus preempting all
13 * other processes and monopolizing those cpus until it finishes.
14 *
15 * Resources for this mechanism are preallocated when a cpu is brought
16 * up and requests are guaranteed to be served as long as the target
17 * cpus are online.
18 */
19typedef int (*cpu_stop_fn_t)(void *arg);
20
21#ifdef CONFIG_SMP
22
23struct cpu_stop_work {
24 struct list_head list; /* cpu_stopper->works */
25 cpu_stop_fn_t fn;
26 void *arg;
27 struct cpu_stop_done *done;
28};
29
30int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
31void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
32 struct cpu_stop_work *work_buf);
33int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
34int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
35
36#else /* CONFIG_SMP */
37
38#include <linux/workqueue.h>
39
40struct cpu_stop_work {
41 struct work_struct work;
42 cpu_stop_fn_t fn;
43 void *arg;
44};
45
46static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
47{
48 int ret = -ENOENT;
49 preempt_disable();
50 if (cpu == smp_processor_id())
51 ret = fn(arg);
52 preempt_enable();
53 return ret;
54}
55
56static void stop_one_cpu_nowait_workfn(struct work_struct *work)
57{
58 struct cpu_stop_work *stwork =
59 container_of(work, struct cpu_stop_work, work);
60 preempt_disable();
61 stwork->fn(stwork->arg);
62 preempt_enable();
63}
64
65static inline void stop_one_cpu_nowait(unsigned int cpu,
66 cpu_stop_fn_t fn, void *arg,
67 struct cpu_stop_work *work_buf)
68{
69 if (cpu == smp_processor_id()) {
70 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
71 work_buf->fn = fn;
72 work_buf->arg = arg;
73 schedule_work(&work_buf->work);
74 }
75}
76
77static inline int stop_cpus(const struct cpumask *cpumask,
78 cpu_stop_fn_t fn, void *arg)
79{
80 if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
81 return stop_one_cpu(raw_smp_processor_id(), fn, arg);
82 return -ENOENT;
83}
84
85static inline int try_stop_cpus(const struct cpumask *cpumask,
86 cpu_stop_fn_t fn, void *arg)
87{
88 return stop_cpus(cpumask, fn, arg);
89}
90
91#endif /* CONFIG_SMP */
92
93/*
94 * stop_machine "Bogolock": stop the entire machine, disable
95 * interrupts. This is a very heavy lock, which is equivalent to
96 * grabbing every spinlock (and more). So the "read" side to such a
97 * lock is anything which disables preeempt.
98 */
11#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 99#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
12 100
13/** 101/**
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
36 */ 124 */
37int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 125int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
38 126
39/** 127#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
40 * stop_machine_create: create all stop_machine threads
41 *
42 * Description: This causes all stop_machine threads to be created before
43 * stop_machine actually gets called. This can be used by subsystems that
44 * need a non failing stop_machine infrastructure.
45 */
46int stop_machine_create(void);
47
48/**
49 * stop_machine_destroy: destroy all stop_machine threads
50 *
51 * Description: This causes all stop_machine threads which were created with
52 * stop_machine_create to be destroyed again.
53 */
54void stop_machine_destroy(void);
55
56#else
57 128
58static inline int stop_machine(int (*fn)(void *), void *data, 129static inline int stop_machine(int (*fn)(void *), void *data,
59 const struct cpumask *cpus) 130 const struct cpumask *cpus)
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
65 return ret; 136 return ret;
66} 137}
67 138
68static inline int stop_machine_create(void) { return 0; } 139#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
69static inline void stop_machine_destroy(void) { } 140#endif /* _LINUX_STOP_MACHINE */
70
71#endif /* CONFIG_SMP */
72#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index d2ae79e21be3..b232ccc0ee29 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -42,6 +42,7 @@ enum tick_nohz_mode {
42 * @idle_waketime: Time when the idle was interrupted 42 * @idle_waketime: Time when the idle was interrupted
43 * @idle_exittime: Time when the idle state was left 43 * @idle_exittime: Time when the idle state was left
44 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped 44 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
45 * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
45 * @sleep_length: Duration of the current idle sleep 46 * @sleep_length: Duration of the current idle sleep
46 * @do_timer_lst: CPU was the last one doing do_timer before going idle 47 * @do_timer_lst: CPU was the last one doing do_timer before going idle
47 */ 48 */
@@ -60,7 +61,7 @@ struct tick_sched {
60 ktime_t idle_waketime; 61 ktime_t idle_waketime;
61 ktime_t idle_exittime; 62 ktime_t idle_exittime;
62 ktime_t idle_sleeptime; 63 ktime_t idle_sleeptime;
63 ktime_t idle_lastupdate; 64 ktime_t iowait_sleeptime;
64 ktime_t sleep_length; 65 ktime_t sleep_length;
65 unsigned long last_jiffies; 66 unsigned long last_jiffies;
66 unsigned long next_jiffies; 67 unsigned long next_jiffies;
@@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
124extern void tick_nohz_restart_sched_tick(void); 125extern void tick_nohz_restart_sched_tick(void);
125extern ktime_t tick_nohz_get_sleep_length(void); 126extern ktime_t tick_nohz_get_sleep_length(void);
126extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 127extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
128extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
127# else 129# else
128static inline void tick_nohz_stop_sched_tick(int inidle) { } 130static inline void tick_nohz_stop_sched_tick(int inidle) { }
129static inline void tick_nohz_restart_sched_tick(void) { } 131static inline void tick_nohz_restart_sched_tick(void) { }
@@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
134 return len; 136 return len;
135} 137}
136static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 138static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
139static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
137# endif /* !NO_HZ */ 140# endif /* !NO_HZ */
138 141
139#endif 142#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 78b4bd3be496..1d85f9a6a199 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -33,6 +33,65 @@ struct tracepoint {
33 * Keep in sync with vmlinux.lds.h. 33 * Keep in sync with vmlinux.lds.h.
34 */ 34 */
35 35
36/*
37 * Connect a probe to a tracepoint.
38 * Internal API, should not be used directly.
39 */
40extern int tracepoint_probe_register(const char *name, void *probe);
41
42/*
43 * Disconnect a probe from a tracepoint.
44 * Internal API, should not be used directly.
45 */
46extern int tracepoint_probe_unregister(const char *name, void *probe);
47
48extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
49extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
50extern void tracepoint_probe_update_all(void);
51
52struct tracepoint_iter {
53 struct module *module;
54 struct tracepoint *tracepoint;
55};
56
57extern void tracepoint_iter_start(struct tracepoint_iter *iter);
58extern void tracepoint_iter_next(struct tracepoint_iter *iter);
59extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
60extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
61extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
62 struct tracepoint *begin, struct tracepoint *end);
63
64/*
65 * tracepoint_synchronize_unregister must be called between the last tracepoint
66 * probe unregistration and the end of module exit to make sure there is no
67 * caller executing a probe when it is freed.
68 */
69static inline void tracepoint_synchronize_unregister(void)
70{
71 synchronize_sched();
72}
73
74#define PARAMS(args...) args
75
76#ifdef CONFIG_TRACEPOINTS
77extern void tracepoint_update_probe_range(struct tracepoint *begin,
78 struct tracepoint *end);
79#else
80static inline void tracepoint_update_probe_range(struct tracepoint *begin,
81 struct tracepoint *end)
82{ }
83#endif /* CONFIG_TRACEPOINTS */
84
85#endif /* _LINUX_TRACEPOINT_H */
86
87/*
88 * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
89 * file ifdef protection.
90 * This is due to the way trace events work. If a file includes two
91 * trace event headers under one "CREATE_TRACE_POINTS" the first include
92 * will override the TRACE_EVENT and break the second include.
93 */
94
36#ifndef DECLARE_TRACE 95#ifndef DECLARE_TRACE
37 96
38#define TP_PROTO(args...) args 97#define TP_PROTO(args...) args
@@ -96,9 +155,6 @@ struct tracepoint {
96#define EXPORT_TRACEPOINT_SYMBOL(name) \ 155#define EXPORT_TRACEPOINT_SYMBOL(name) \
97 EXPORT_SYMBOL(__tracepoint_##name) 156 EXPORT_SYMBOL(__tracepoint_##name)
98 157
99extern void tracepoint_update_probe_range(struct tracepoint *begin,
100 struct tracepoint *end);
101
102#else /* !CONFIG_TRACEPOINTS */ 158#else /* !CONFIG_TRACEPOINTS */
103#define DECLARE_TRACE(name, proto, args) \ 159#define DECLARE_TRACE(name, proto, args) \
104 static inline void _do_trace_##name(struct tracepoint *tp, proto) \ 160 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
@@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
119#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) 175#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
120#define EXPORT_TRACEPOINT_SYMBOL(name) 176#define EXPORT_TRACEPOINT_SYMBOL(name)
121 177
122static inline void tracepoint_update_probe_range(struct tracepoint *begin,
123 struct tracepoint *end)
124{ }
125#endif /* CONFIG_TRACEPOINTS */ 178#endif /* CONFIG_TRACEPOINTS */
126#endif /* DECLARE_TRACE */ 179#endif /* DECLARE_TRACE */
127 180
128/*
129 * Connect a probe to a tracepoint.
130 * Internal API, should not be used directly.
131 */
132extern int tracepoint_probe_register(const char *name, void *probe);
133
134/*
135 * Disconnect a probe from a tracepoint.
136 * Internal API, should not be used directly.
137 */
138extern int tracepoint_probe_unregister(const char *name, void *probe);
139
140extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
141extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
142extern void tracepoint_probe_update_all(void);
143
144struct tracepoint_iter {
145 struct module *module;
146 struct tracepoint *tracepoint;
147};
148
149extern void tracepoint_iter_start(struct tracepoint_iter *iter);
150extern void tracepoint_iter_next(struct tracepoint_iter *iter);
151extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
152extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
153extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
154 struct tracepoint *begin, struct tracepoint *end);
155
156/*
157 * tracepoint_synchronize_unregister must be called between the last tracepoint
158 * probe unregistration and the end of module exit to make sure there is no
159 * caller executing a probe when it is freed.
160 */
161static inline void tracepoint_synchronize_unregister(void)
162{
163 synchronize_sched();
164}
165
166#define PARAMS(args...) args
167
168#endif /* _LINUX_TRACEPOINT_H */
169
170/*
171 * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
172 * This is due to the way trace events work. If a file includes two
173 * trace event headers under one "CREATE_TRACE_POINTS" the first include
174 * will override the TRACE_EVENT and break the second include.
175 */
176
177#ifndef TRACE_EVENT 181#ifndef TRACE_EVENT
178/* 182/*
179 * For use with the TRACE_EVENT macro: 183 * For use with the TRACE_EVENT macro:
diff --git a/include/linux/types.h b/include/linux/types.h
index c42724f8c802..23d237a075e2 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -188,12 +188,12 @@ typedef u32 phys_addr_t;
188typedef phys_addr_t resource_size_t; 188typedef phys_addr_t resource_size_t;
189 189
190typedef struct { 190typedef struct {
191 volatile int counter; 191 int counter;
192} atomic_t; 192} atomic_t;
193 193
194#ifdef CONFIG_64BIT 194#ifdef CONFIG_64BIT
195typedef struct { 195typedef struct {
196 volatile long counter; 196 long counter;
197} atomic64_t; 197} atomic64_t;
198#endif 198#endif
199 199
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a48e16b77d5e..76d96d035ea0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
127/* 127/*
128 * Used for wake-one threads: 128 * Used for wake-one threads:
129 */ 129 */
130static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue(q, wait);
135}
136
130static inline void __add_wait_queue_tail(wait_queue_head_t *head, 137static inline void __add_wait_queue_tail(wait_queue_head_t *head,
131 wait_queue_t *new) 138 wait_queue_t *new)
132{ 139{
133 list_add_tail(&new->task_list, &head->task_list); 140 list_add_tail(&new->task_list, &head->task_list);
134} 141}
135 142
143static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
144 wait_queue_t *wait)
145{
146 wait->flags |= WQ_FLAG_EXCLUSIVE;
147 __add_wait_queue_tail(q, wait);
148}
149
136static inline void __remove_wait_queue(wait_queue_head_t *head, 150static inline void __remove_wait_queue(wait_queue_head_t *head,
137 wait_queue_t *old) 151 wait_queue_t *old)
138{ 152{
@@ -404,25 +418,6 @@ do { \
404}) 418})
405 419
406/* 420/*
407 * Must be called with the spinlock in the wait_queue_head_t held.
408 */
409static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
410 wait_queue_t * wait)
411{
412 wait->flags |= WQ_FLAG_EXCLUSIVE;
413 __add_wait_queue_tail(q, wait);
414}
415
416/*
417 * Must be called with the spinlock in the wait_queue_head_t held.
418 */
419static inline void remove_wait_queue_locked(wait_queue_head_t *q,
420 wait_queue_t * wait)
421{
422 __remove_wait_queue(q, wait);
423}
424
425/*
426 * These are the old interfaces to sleep waiting for an event. 421 * These are the old interfaces to sleep waiting for an event.
427 * They are racy. DO NOT use them, use the wait_event* interfaces above. 422 * They are racy. DO NOT use them, use the wait_event* interfaces above.
428 * We plan to remove these interfaces. 423 * We plan to remove these interfaces.
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index 913bfc226dda..7bf9db525e9e 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -38,8 +38,6 @@
38typedef __u32 zorro_id; 38typedef __u32 zorro_id;
39 39
40 40
41#define ZORRO_WILDCARD (0xffffffff) /* not official */
42
43/* Include the ID list */ 41/* Include the ID list */
44#include <linux/zorro_ids.h> 42#include <linux/zorro_ids.h>
45 43
@@ -116,6 +114,7 @@ struct ConfigDev {
116 114
117#include <linux/init.h> 115#include <linux/init.h>
118#include <linux/ioport.h> 116#include <linux/ioport.h>
117#include <linux/mod_devicetable.h>
119 118
120#include <asm/zorro.h> 119#include <asm/zorro.h>
121 120
@@ -142,29 +141,10 @@ struct zorro_dev {
142 * Zorro bus 141 * Zorro bus
143 */ 142 */
144 143
145struct zorro_bus {
146 struct list_head devices; /* list of devices on this bus */
147 unsigned int num_resources; /* number of resources */
148 struct resource resources[4]; /* address space routed to this bus */
149 struct device dev;
150 char name[10];
151};
152
153extern struct zorro_bus zorro_bus; /* single Zorro bus */
154extern struct bus_type zorro_bus_type; 144extern struct bus_type zorro_bus_type;
155 145
156 146
157 /* 147 /*
158 * Zorro device IDs
159 */
160
161struct zorro_device_id {
162 zorro_id id; /* Device ID or ZORRO_WILDCARD */
163 unsigned long driver_data; /* Data private to the driver */
164};
165
166
167 /*
168 * Zorro device drivers 148 * Zorro device drivers
169 */ 149 */
170 150
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
65 65
66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
67 67
68/* Make all open coded DECLARE_TRACE nops */
69#undef DECLARE_TRACE
70#define DECLARE_TRACE(name, proto, args)
71
68#ifdef CONFIG_EVENT_TRACING 72#ifdef CONFIG_EVENT_TRACING
69#include <trace/ftrace.h> 73#include <trace/ftrace.h>
70#endif 74#endif
@@ -75,6 +79,7 @@
75#undef DEFINE_EVENT 79#undef DEFINE_EVENT
76#undef DEFINE_EVENT_PRINT 80#undef DEFINE_EVENT_PRINT
77#undef TRACE_HEADER_MULTI_READ 81#undef TRACE_HEADER_MULTI_READ
82#undef DECLARE_TRACE
78 83
79/* Only undef what we defined in this file */ 84/* Only undef what we defined in this file */
80#ifdef UNDEF_TRACE_INCLUDE_FILE 85#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index 5c1dcfc16c60..2821b86de63b 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -35,15 +35,15 @@ TRACE_EVENT(lock_acquire,
35 __get_str(name)) 35 __get_str(name))
36); 36);
37 37
38TRACE_EVENT(lock_release, 38DECLARE_EVENT_CLASS(lock,
39 39
40 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), 40 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
41 41
42 TP_ARGS(lock, nested, ip), 42 TP_ARGS(lock, ip),
43 43
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __string(name, lock->name) 45 __string( name, lock->name )
46 __field(void *, lockdep_addr) 46 __field( void *, lockdep_addr )
47 ), 47 ),
48 48
49 TP_fast_assign( 49 TP_fast_assign(
@@ -51,51 +51,30 @@ TRACE_EVENT(lock_release,
51 __entry->lockdep_addr = lock; 51 __entry->lockdep_addr = lock;
52 ), 52 ),
53 53
54 TP_printk("%p %s", 54 TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
55 __entry->lockdep_addr, __get_str(name))
56); 55);
57 56
58#ifdef CONFIG_LOCK_STAT 57DEFINE_EVENT(lock, lock_release,
59
60TRACE_EVENT(lock_contended,
61 58
62 TP_PROTO(struct lockdep_map *lock, unsigned long ip), 59 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
63 60
64 TP_ARGS(lock, ip), 61 TP_ARGS(lock, ip)
62);
65 63
66 TP_STRUCT__entry( 64#ifdef CONFIG_LOCK_STAT
67 __string(name, lock->name)
68 __field(void *, lockdep_addr)
69 ),
70 65
71 TP_fast_assign( 66DEFINE_EVENT(lock, lock_contended,
72 __assign_str(name, lock->name);
73 __entry->lockdep_addr = lock;
74 ),
75 67
76 TP_printk("%p %s", 68 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
77 __entry->lockdep_addr, __get_str(name))
78);
79 69
80TRACE_EVENT(lock_acquired, 70 TP_ARGS(lock, ip)
81 TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), 71);
82 72
83 TP_ARGS(lock, ip, waittime), 73DEFINE_EVENT(lock, lock_acquired,
84 74
85 TP_STRUCT__entry( 75 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
86 __string(name, lock->name)
87 __field(s64, wait_nsec)
88 __field(void *, lockdep_addr)
89 ),
90 76
91 TP_fast_assign( 77 TP_ARGS(lock, ip)
92 __assign_str(name, lock->name);
93 __entry->wait_nsec = waittime;
94 __entry->lockdep_addr = lock;
95 ),
96 TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
97 __get_str(name),
98 __entry->wait_nsec)
99); 78);
100 79
101#endif 80#endif
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 4b0f48ba16a6..c7bb2f0482fe 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
51 TP_printk("%s", __get_str(name)) 51 TP_printk("%s", __get_str(name))
52); 52);
53 53
54#ifdef CONFIG_MODULE_UNLOAD
55/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
56
54DECLARE_EVENT_CLASS(module_refcnt, 57DECLARE_EVENT_CLASS(module_refcnt,
55 58
56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 59 TP_PROTO(struct module *mod, unsigned long ip),
57 60
58 TP_ARGS(mod, ip, refcnt), 61 TP_ARGS(mod, ip),
59 62
60 TP_STRUCT__entry( 63 TP_STRUCT__entry(
61 __field( unsigned long, ip ) 64 __field( unsigned long, ip )
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
65 68
66 TP_fast_assign( 69 TP_fast_assign(
67 __entry->ip = ip; 70 __entry->ip = ip;
68 __entry->refcnt = refcnt; 71 __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
69 __assign_str(name, mod->name); 72 __assign_str(name, mod->name);
70 ), 73 ),
71 74
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
75 78
76DEFINE_EVENT(module_refcnt, module_get, 79DEFINE_EVENT(module_refcnt, module_get,
77 80
78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 81 TP_PROTO(struct module *mod, unsigned long ip),
79 82
80 TP_ARGS(mod, ip, refcnt) 83 TP_ARGS(mod, ip)
81); 84);
82 85
83DEFINE_EVENT(module_refcnt, module_put, 86DEFINE_EVENT(module_refcnt, module_put,
84 87
85 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 88 TP_PROTO(struct module *mod, unsigned long ip),
86 89
87 TP_ARGS(mod, ip, refcnt) 90 TP_ARGS(mod, ip)
88); 91);
92#endif /* CONFIG_MODULE_UNLOAD */
89 93
90TRACE_EVENT(module_request, 94TRACE_EVENT(module_request,
91 95
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
1#ifndef _TRACE_NAPI_H_ 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM napi
3
4#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_NAPI_H_ 5#define _TRACE_NAPI_H_
3 6
4#include <linux/netdevice.h> 7#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
8 TP_PROTO(struct napi_struct *napi), 11 TP_PROTO(struct napi_struct *napi),
9 TP_ARGS(napi)); 12 TP_ARGS(napi));
10 13
11#endif 14#endif /* _TRACE_NAPI_H_ */
15
16/* This part must be outside protection */
17#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cfceb0b73e20..4f733ecea46e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret,
51 51
52/* 52/*
53 * Tracepoint for waiting on task to unschedule: 53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */ 54 */
58TRACE_EVENT(sched_wait_task, 55TRACE_EVENT(sched_wait_task,
59 56
60 TP_PROTO(struct rq *rq, struct task_struct *p), 57 TP_PROTO(struct task_struct *p),
61 58
62 TP_ARGS(rq, p), 59 TP_ARGS(p),
63 60
64 TP_STRUCT__entry( 61 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN ) 62 __array( char, comm, TASK_COMM_LEN )
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task,
79 76
80/* 77/*
81 * Tracepoint for waking up a task: 78 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */ 79 */
86DECLARE_EVENT_CLASS(sched_wakeup_template, 80DECLARE_EVENT_CLASS(sched_wakeup_template,
87 81
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 82 TP_PROTO(struct task_struct *p, int success),
89 83
90 TP_ARGS(rq, p, success), 84 TP_ARGS(p, success),
91 85
92 TP_STRUCT__entry( 86 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN ) 87 __array( char, comm, TASK_COMM_LEN )
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
111); 105);
112 106
113DEFINE_EVENT(sched_wakeup_template, sched_wakeup, 107DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
114 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 108 TP_PROTO(struct task_struct *p, int success),
115 TP_ARGS(rq, p, success)); 109 TP_ARGS(p, success));
116 110
117/* 111/*
118 * Tracepoint for waking up a new task: 112 * Tracepoint for waking up a new task:
119 *
120 * (NOTE: the 'rq' argument is not used by generic trace events,
121 * but used by the latency tracer plugin. )
122 */ 113 */
123DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, 114DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
124 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 115 TP_PROTO(struct task_struct *p, int success),
125 TP_ARGS(rq, p, success)); 116 TP_ARGS(p, success));
126 117
127/* 118/*
128 * Tracepoint for task switches, performed by the scheduler: 119 * Tracepoint for task switches, performed by the scheduler:
129 *
130 * (NOTE: the 'rq' argument is not used by generic trace events,
131 * but used by the latency tracer plugin. )
132 */ 120 */
133TRACE_EVENT(sched_switch, 121TRACE_EVENT(sched_switch,
134 122
135 TP_PROTO(struct rq *rq, struct task_struct *prev, 123 TP_PROTO(struct task_struct *prev,
136 struct task_struct *next), 124 struct task_struct *next),
137 125
138 TP_ARGS(rq, prev, next), 126 TP_ARGS(prev, next),
139 127
140 TP_STRUCT__entry( 128 TP_STRUCT__entry(
141 __array( char, prev_comm, TASK_COMM_LEN ) 129 __array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75ac304..814566c99d29 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
100 __entry->sa_handler, __entry->sa_flags) 100 __entry->sa_handler, __entry->sa_flags)
101); 101);
102 102
103/** 103DECLARE_EVENT_CLASS(signal_queue_overflow,
104 * signal_overflow_fail - called when signal queue is overflow
105 * @sig: signal number
106 * @group: signal to process group or not (bool)
107 * @info: pointer to struct siginfo
108 *
109 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
110 * siginfo queue is overflow, and the signal is dropped.
111 * 'group' is not 0 if the signal will be sent to a process group.
112 * 'sig' is always one of RT signals.
113 */
114TRACE_EVENT(signal_overflow_fail,
115 104
116 TP_PROTO(int sig, int group, struct siginfo *info), 105 TP_PROTO(int sig, int group, struct siginfo *info),
117 106
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail,
135); 124);
136 125
137/** 126/**
127 * signal_overflow_fail - called when signal queue is overflow
128 * @sig: signal number
129 * @group: signal to process group or not (bool)
130 * @info: pointer to struct siginfo
131 *
132 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
133 * siginfo queue is overflow, and the signal is dropped.
134 * 'group' is not 0 if the signal will be sent to a process group.
135 * 'sig' is always one of RT signals.
136 */
137DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
138
139 TP_PROTO(int sig, int group, struct siginfo *info),
140
141 TP_ARGS(sig, group, info)
142);
143
144/**
138 * signal_lose_info - called when siginfo is lost 145 * signal_lose_info - called when siginfo is lost
139 * @sig: signal number 146 * @sig: signal number
140 * @group: signal to process group or not (bool) 147 * @group: signal to process group or not (bool)
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
145 * 'group' is not 0 if the signal will be sent to a process group. 152 * 'group' is not 0 if the signal will be sent to a process group.
146 * 'sig' is always one of non-RT signals. 153 * 'sig' is always one of non-RT signals.
147 */ 154 */
148TRACE_EVENT(signal_lose_info, 155DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
149 156
150 TP_PROTO(int sig, int group, struct siginfo *info), 157 TP_PROTO(int sig, int group, struct siginfo *info),
151 158
152 TP_ARGS(sig, group, info), 159 TP_ARGS(sig, group, info)
153
154 TP_STRUCT__entry(
155 __field( int, sig )
156 __field( int, group )
157 __field( int, errno )
158 __field( int, code )
159 ),
160
161 TP_fast_assign(
162 __entry->sig = sig;
163 __entry->group = group;
164 TP_STORE_SIGINFO(__entry, info);
165 ),
166
167 TP_printk("sig=%d group=%d errno=%d code=%d",
168 __entry->sig, __entry->group, __entry->errno, __entry->code)
169); 160);
161
170#endif /* _TRACE_SIGNAL_H */ 162#endif /* _TRACE_SIGNAL_H */
171 163
172/* This part must be outside protection */ 164/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index ea6f9d4a20e9..16253db38d73 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -154,9 +154,11 @@
154 * 154 *
155 * field = (typeof(field))entry; 155 * field = (typeof(field))entry;
156 * 156 *
157 * p = get_cpu_var(ftrace_event_seq); 157 * p = &get_cpu_var(ftrace_event_seq);
158 * trace_seq_init(p); 158 * trace_seq_init(p);
159 * ret = trace_seq_printf(s, <TP_printk> "\n"); 159 * ret = trace_seq_printf(s, "%s: ", <call>);
160 * if (ret)
161 * ret = trace_seq_printf(s, <TP_printk> "\n");
160 * put_cpu(); 162 * put_cpu();
161 * if (!ret) 163 * if (!ret)
162 * return TRACE_TYPE_PARTIAL_LINE; 164 * return TRACE_TYPE_PARTIAL_LINE;
@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
450 * 452 *
451 * static void ftrace_raw_event_<call>(proto) 453 * static void ftrace_raw_event_<call>(proto)
452 * { 454 * {
455 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
453 * struct ring_buffer_event *event; 456 * struct ring_buffer_event *event;
454 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 457 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
455 * struct ring_buffer *buffer; 458 * struct ring_buffer *buffer;
456 * unsigned long irq_flags; 459 * unsigned long irq_flags;
460 * int __data_size;
457 * int pc; 461 * int pc;
458 * 462 *
459 * local_save_flags(irq_flags); 463 * local_save_flags(irq_flags);
460 * pc = preempt_count(); 464 * pc = preempt_count();
461 * 465 *
466 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
467 *
462 * event = trace_current_buffer_lock_reserve(&buffer, 468 * event = trace_current_buffer_lock_reserve(&buffer,
463 * event_<call>.id, 469 * event_<call>.id,
464 * sizeof(struct ftrace_raw_<call>), 470 * sizeof(*entry) + __data_size,
465 * irq_flags, pc); 471 * irq_flags, pc);
466 * if (!event) 472 * if (!event)
467 * return; 473 * return;
468 * entry = ring_buffer_event_data(event); 474 * entry = ring_buffer_event_data(event);
469 * 475 *
470 * <assign>; <-- Here we assign the entries by the __field and 476 * { <assign>; } <-- Here we assign the entries by the __field and
471 * __array macros. 477 * __array macros.
472 * 478 *
473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 479 * if (!filter_current_check_discard(buffer, event_call, entry, event))
480 * trace_current_buffer_unlock_commit(buffer,
481 * event, irq_flags, pc);
474 * } 482 * }
475 * 483 *
476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) 484 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
477 * { 485 * {
478 * int ret; 486 * return register_trace_<call>(ftrace_raw_event_<call>);
479 *
480 * ret = register_trace_<call>(ftrace_raw_event_<call>);
481 * if (!ret)
482 * pr_info("event trace: Could not activate trace point "
483 * "probe to <call>");
484 * return ret;
485 * } 487 * }
486 * 488 *
487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 489 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
493 * .trace = ftrace_raw_output_<call>, <-- stage 2 495 * .trace = ftrace_raw_output_<call>, <-- stage 2
494 * }; 496 * };
495 * 497 *
498 * static const char print_fmt_<call>[] = <TP_printk>;
499 *
496 * static struct ftrace_event_call __used 500 * static struct ftrace_event_call __used
497 * __attribute__((__aligned__(4))) 501 * __attribute__((__aligned__(4)))
498 * __attribute__((section("_ftrace_events"))) event_<call> = { 502 * __attribute__((section("_ftrace_events"))) event_<call> = {
@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
501 * .raw_init = trace_event_raw_init, 505 * .raw_init = trace_event_raw_init,
502 * .regfunc = ftrace_reg_event_<call>, 506 * .regfunc = ftrace_reg_event_<call>,
503 * .unregfunc = ftrace_unreg_event_<call>, 507 * .unregfunc = ftrace_unreg_event_<call>,
508 * .print_fmt = print_fmt_<call>,
509 * .define_fields = ftrace_define_fields_<call>,
504 * } 510 * }
505 * 511 *
506 */ 512 */
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
569 return; \ 575 return; \
570 entry = ring_buffer_event_data(event); \ 576 entry = ring_buffer_event_data(event); \
571 \ 577 \
572 \
573 tstruct \ 578 tstruct \
574 \ 579 \
575 { assign; } \ 580 { assign; } \
@@ -758,13 +763,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 763#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759static notrace void \ 764static notrace void \
760perf_trace_templ_##call(struct ftrace_event_call *event_call, \ 765perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 766 struct pt_regs *__regs, proto) \
762{ \ 767{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 768 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 struct ftrace_raw_##call *entry; \ 769 struct ftrace_raw_##call *entry; \
765 u64 __addr = 0, __count = 1; \ 770 u64 __addr = 0, __count = 1; \
766 unsigned long irq_flags; \ 771 unsigned long irq_flags; \
767 struct pt_regs *__regs; \
768 int __entry_size; \ 772 int __entry_size; \
769 int __data_size; \ 773 int __data_size; \
770 int rctx; \ 774 int rctx; \
@@ -785,20 +789,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
785 \ 789 \
786 { assign; } \ 790 { assign; } \
787 \ 791 \
788 __regs = &__get_cpu_var(perf_trace_regs); \
789 perf_fetch_caller_regs(__regs, 2); \
790 \
791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 792 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 __count, irq_flags, __regs); \ 793 __count, irq_flags, __regs); \
793} 794}
794 795
795#undef DEFINE_EVENT 796#undef DEFINE_EVENT
796#define DEFINE_EVENT(template, call, proto, args) \ 797#define DEFINE_EVENT(template, call, proto, args) \
797static notrace void perf_trace_##call(proto) \ 798static notrace void perf_trace_##call(proto) \
798{ \ 799{ \
799 struct ftrace_event_call *event_call = &event_##call; \ 800 struct ftrace_event_call *event_call = &event_##call; \
800 \ 801 struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
801 perf_trace_templ_##template(event_call, args); \ 802 \
803 perf_fetch_caller_regs(__regs, 1); \
804 \
805 perf_trace_templ_##template(event_call, __regs, args); \
806 \
807 put_cpu_var(perf_trace_regs); \
802} 808}
803 809
804#undef DEFINE_EVENT_PRINT 810#undef DEFINE_EVENT_PRINT