aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amba/serial.h2
-rw-r--r--include/linux/clocksource.h7
-rw-r--r--include/linux/dcache.h20
-rw-r--r--include/linux/ftrace.h77
-rw-r--r--include/linux/ftrace_event.h9
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/iocontext.h10
-rw-r--r--include/linux/jump_label.h162
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/kmsg_dump.h9
-rw-r--r--include/linux/math64.h4
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter.h6
-rw-r--r--include/linux/of.h8
-rw-r--r--include/linux/percpu.h29
-rw-r--r--include/linux/perf_event.h108
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/rcupdate.h83
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/sched.h57
-rw-r--r--include/linux/srcu.h15
-rw-r--r--include/linux/static_key.h1
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/timex.h17
-rw-r--r--include/linux/tracepoint.h28
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/workqueue.h4
32 files changed, 569 insertions, 171 deletions
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 514ed45c462..d117b29d106 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -23,6 +23,8 @@
23#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H 23#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
24#define ASM_ARM_HARDWARE_SERIAL_AMBA_H 24#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
25 25
26#include <linux/types.h>
27
26/* ------------------------------------------------------------------------------- 28/* -------------------------------------------------------------------------------
27 * From AMBA UART (PL010) Block Specification 29 * From AMBA UART (PL010) Block Specification
28 * ------------------------------------------------------------------------------- 30 * -------------------------------------------------------------------------------
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 081147da056..fbe89e17124 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -319,13 +319,6 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
319 __clocksource_updatefreq_scale(cs, 1000, khz); 319 __clocksource_updatefreq_scale(cs, 1000, khz);
320} 320}
321 321
322static inline void
323clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
324{
325 return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
326 NSEC_PER_SEC, minsec);
327}
328
329#ifdef CONFIG_GENERIC_TIME_VSYSCALL 322#ifdef CONFIG_GENERIC_TIME_VSYSCALL
330extern void 323extern void
331update_vsyscall(struct timespec *ts, struct timespec *wtm, 324update_vsyscall(struct timespec *ts, struct timespec *wtm,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4270bedd230..ff5f5256d17 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -47,26 +47,6 @@ struct dentry_stat_t {
47}; 47};
48extern struct dentry_stat_t dentry_stat; 48extern struct dentry_stat_t dentry_stat;
49 49
50/*
51 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
52 * The strings are both count bytes long, and count is non-zero.
53 */
54static inline int dentry_cmp(const unsigned char *cs, size_t scount,
55 const unsigned char *ct, size_t tcount)
56{
57 if (scount != tcount)
58 return 1;
59
60 do {
61 if (*cs != *ct)
62 return 1;
63 cs++;
64 ct++;
65 tcount--;
66 } while (tcount);
67 return 0;
68}
69
70/* Name hashing routines. Initial hash value */ 50/* Name hashing routines. Initial hash value */
71/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ 51/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
72#define init_name_hash() 0 52#define init_name_hash() 0
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 028e26f0bf0..72a6cabb4d5 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
31 31
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
33 33
34/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
36 * set in the flags member.
37 *
38 * ENABLED - set/unset when ftrace_ops is registered/unregistered
39 * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
40 * is part of the global tracers sharing the same filter
41 * via set_ftrace_* debugfs files.
42 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
43 * allocated ftrace_ops which need special care
44 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
45 * could be controled by following calls:
46 * ftrace_function_local_enable
47 * ftrace_function_local_disable
48 */
34enum { 49enum {
35 FTRACE_OPS_FL_ENABLED = 1 << 0, 50 FTRACE_OPS_FL_ENABLED = 1 << 0,
36 FTRACE_OPS_FL_GLOBAL = 1 << 1, 51 FTRACE_OPS_FL_GLOBAL = 1 << 1,
37 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 52 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
53 FTRACE_OPS_FL_CONTROL = 1 << 3,
38}; 54};
39 55
40struct ftrace_ops { 56struct ftrace_ops {
41 ftrace_func_t func; 57 ftrace_func_t func;
42 struct ftrace_ops *next; 58 struct ftrace_ops *next;
43 unsigned long flags; 59 unsigned long flags;
60 int __percpu *disabled;
44#ifdef CONFIG_DYNAMIC_FTRACE 61#ifdef CONFIG_DYNAMIC_FTRACE
45 struct ftrace_hash *notrace_hash; 62 struct ftrace_hash *notrace_hash;
46 struct ftrace_hash *filter_hash; 63 struct ftrace_hash *filter_hash;
@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
97int unregister_ftrace_function(struct ftrace_ops *ops); 114int unregister_ftrace_function(struct ftrace_ops *ops);
98void clear_ftrace_function(void); 115void clear_ftrace_function(void);
99 116
117/**
118 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
119 *
120 * This function enables tracing on current cpu by decreasing
121 * the per cpu control variable.
122 * It must be called with preemption disabled and only on ftrace_ops
123 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
124 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
125 */
126static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
127{
128 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
129 return;
130
131 (*this_cpu_ptr(ops->disabled))--;
132}
133
134/**
135 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
136 *
137 * This function enables tracing on current cpu by decreasing
138 * the per cpu control variable.
139 * It must be called with preemption disabled and only on ftrace_ops
140 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
141 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
142 */
143static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
144{
145 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
146 return;
147
148 (*this_cpu_ptr(ops->disabled))++;
149}
150
151/**
152 * ftrace_function_local_disabled - returns ftrace_ops disabled value
153 * on current cpu
154 *
155 * This function returns value of ftrace_ops::disabled on current cpu.
156 * It must be called with preemption disabled and only on ftrace_ops
157 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
158 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
159 */
160static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
161{
162 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
163 return *this_cpu_ptr(ops->disabled);
164}
165
100extern void ftrace_stub(unsigned long a0, unsigned long a1); 166extern void ftrace_stub(unsigned long a0, unsigned long a1);
101 167
102#else /* !CONFIG_FUNCTION_TRACER */ 168#else /* !CONFIG_FUNCTION_TRACER */
@@ -178,12 +244,13 @@ struct dyn_ftrace {
178}; 244};
179 245
180int ftrace_force_update(void); 246int ftrace_force_update(void);
181void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 247int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
182 int len, int reset); 248 int len, int reset);
183void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 249int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
184 int len, int reset); 250 int len, int reset);
185void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 251void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
186void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 252void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
253void ftrace_free_filter(struct ftrace_ops *ops);
187 254
188int register_ftrace_command(struct ftrace_func_command *cmd); 255int register_ftrace_command(struct ftrace_func_command *cmd);
189int unregister_ftrace_command(struct ftrace_func_command *cmd); 256int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -314,9 +381,6 @@ extern void ftrace_enable_daemon(void);
314#else 381#else
315static inline int skip_trace(unsigned long ip) { return 0; } 382static inline int skip_trace(unsigned long ip) { return 0; }
316static inline int ftrace_force_update(void) { return 0; } 383static inline int ftrace_force_update(void) { return 0; }
317static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
318{
319}
320static inline void ftrace_disable_daemon(void) { } 384static inline void ftrace_disable_daemon(void) { }
321static inline void ftrace_enable_daemon(void) { } 385static inline void ftrace_enable_daemon(void) { }
322static inline void ftrace_release_mod(struct module *mod) {} 386static inline void ftrace_release_mod(struct module *mod) {}
@@ -340,6 +404,9 @@ static inline int ftrace_text_reserved(void *start, void *end)
340 */ 404 */
341#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 405#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
342#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 406#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
407#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
408#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
409#define ftrace_free_filter(ops) do { } while (0)
343 410
344static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 411static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
345 size_t cnt, loff_t *ppos) { return -ENODEV; } 412 size_t cnt, loff_t *ppos) { return -ENODEV; }
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c3da42dd22b..dd478fc8f9f 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -146,6 +146,10 @@ enum trace_reg {
146 TRACE_REG_UNREGISTER, 146 TRACE_REG_UNREGISTER,
147 TRACE_REG_PERF_REGISTER, 147 TRACE_REG_PERF_REGISTER,
148 TRACE_REG_PERF_UNREGISTER, 148 TRACE_REG_PERF_UNREGISTER,
149 TRACE_REG_PERF_OPEN,
150 TRACE_REG_PERF_CLOSE,
151 TRACE_REG_PERF_ADD,
152 TRACE_REG_PERF_DEL,
149}; 153};
150 154
151struct ftrace_event_call; 155struct ftrace_event_call;
@@ -157,7 +161,7 @@ struct ftrace_event_class {
157 void *perf_probe; 161 void *perf_probe;
158#endif 162#endif
159 int (*reg)(struct ftrace_event_call *event, 163 int (*reg)(struct ftrace_event_call *event,
160 enum trace_reg type); 164 enum trace_reg type, void *data);
161 int (*define_fields)(struct ftrace_event_call *); 165 int (*define_fields)(struct ftrace_event_call *);
162 struct list_head *(*get_fields)(struct ftrace_event_call *); 166 struct list_head *(*get_fields)(struct ftrace_event_call *);
163 struct list_head fields; 167 struct list_head fields;
@@ -165,7 +169,7 @@ struct ftrace_event_class {
165}; 169};
166 170
167extern int ftrace_event_reg(struct ftrace_event_call *event, 171extern int ftrace_event_reg(struct ftrace_event_call *event,
168 enum trace_reg type); 172 enum trace_reg type, void *data);
169 173
170enum { 174enum {
171 TRACE_EVENT_FL_ENABLED_BIT, 175 TRACE_EVENT_FL_ENABLED_BIT,
@@ -241,6 +245,7 @@ enum {
241 FILTER_STATIC_STRING, 245 FILTER_STATIC_STRING,
242 FILTER_DYN_STRING, 246 FILTER_DYN_STRING,
243 FILTER_PTR_STRING, 247 FILTER_PTR_STRING,
248 FILTER_TRACE_FN,
244}; 249};
245 250
246#define EVENT_STORAGE_SIZE 128 251#define EVENT_STORAGE_SIZE 128
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index fe23ee76858..e61d3192448 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
596 596
597extern int disk_expand_part_tbl(struct gendisk *disk, int target); 597extern int disk_expand_part_tbl(struct gendisk *disk, int target);
598extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 598extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
599extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
599extern struct hd_struct * __must_check add_partition(struct gendisk *disk, 600extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
600 int partno, sector_t start, 601 int partno, sector_t start,
601 sector_t len, int flags, 602 sector_t len, int flags,
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9c66b1ada9d..f994d51f70f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -149,7 +149,7 @@ extern struct cred init_cred;
149 }, \ 149 }, \
150 .rt = { \ 150 .rt = { \
151 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 151 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
152 .time_slice = HZ, \ 152 .time_slice = RR_TIMESLICE, \
153 .nr_cpus_allowed = NR_CPUS, \ 153 .nr_cpus_allowed = NR_CPUS, \
154 }, \ 154 }, \
155 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 155 .tasks = LIST_HEAD_INIT(tsk.tasks), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a64b00e286f..3f830e00511 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -20,7 +20,6 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/system.h> 22#include <asm/system.h>
23#include <trace/events/irq.h>
24 23
25/* 24/*
26 * These correspond to the IORESOURCE_IRQ_* defines in 25 * These correspond to the IORESOURCE_IRQ_* defines in
@@ -456,11 +455,7 @@ asmlinkage void do_softirq(void);
456asmlinkage void __do_softirq(void); 455asmlinkage void __do_softirq(void);
457extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 456extern void open_softirq(int nr, void (*action)(struct softirq_action *));
458extern void softirq_init(void); 457extern void softirq_init(void);
459static inline void __raise_softirq_irqoff(unsigned int nr) 458extern void __raise_softirq_irqoff(unsigned int nr);
460{
461 trace_softirq_raise(nr);
462 or_softirq_pending(1UL << nr);
463}
464 459
465extern void raise_softirq_irqoff(unsigned int nr); 460extern void raise_softirq_irqoff(unsigned int nr);
466extern void raise_softirq(unsigned int nr); 461extern void raise_softirq(unsigned int nr);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 119773eebe3..1a301806303 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,8 +6,11 @@
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7 7
8enum { 8enum {
9 ICQ_IOPRIO_CHANGED, 9 ICQ_IOPRIO_CHANGED = 1 << 0,
10 ICQ_CGROUP_CHANGED, 10 ICQ_CGROUP_CHANGED = 1 << 1,
11 ICQ_EXITED = 1 << 2,
12
13 ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
11}; 14};
12 15
13/* 16/*
@@ -88,7 +91,7 @@ struct io_cq {
88 struct rcu_head __rcu_head; 91 struct rcu_head __rcu_head;
89 }; 92 };
90 93
91 unsigned long changed; 94 unsigned int flags;
92}; 95};
93 96
94/* 97/*
@@ -139,6 +142,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
139 gfp_t gfp_flags, int node); 142 gfp_t gfp_flags, int node);
140void ioc_ioprio_changed(struct io_context *ioc, int ioprio); 143void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc); 144void ioc_cgroup_changed(struct io_context *ioc);
145unsigned int icq_get_changed(struct io_cq *icq);
142#else 146#else
143struct io_context; 147struct io_context;
144static inline void put_io_context(struct io_context *ioc) { } 148static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 5ce8b140428..c513a40510f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,22 +1,69 @@
1#ifndef _LINUX_JUMP_LABEL_H 1#ifndef _LINUX_JUMP_LABEL_H
2#define _LINUX_JUMP_LABEL_H 2#define _LINUX_JUMP_LABEL_H
3 3
4/*
5 * Jump label support
6 *
7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
9 *
10 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support the result
12 * of a "if (static_key_false(&key))" statement is a unconditional branch (which
13 * defaults to false - and the true block is placed out of line).
14 *
15 * However at runtime we can change the branch target using
16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
17 * object and for as long as there are references all branches referring to
18 * that particular key will point to the (out of line) true block.
19 *
20 * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
21 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total
24 * effect is a single NOP of appropriate size. The on case will patch in a jump
25 * to the out-of-line block.
26 *
27 * When the control is directly exposed to userspace it is prudent to delay the
28 * decrement to avoid high frequency code modifications which can (and do)
29 * cause significant performance degradation. Struct static_key_deferred and
30 * static_key_slow_dec_deferred() provide for this.
31 *
32 * Lacking toolchain and or architecture support, it falls back to a simple
33 * conditional branch.
34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
36 *
37 * if (static_key_true(&my_key)) {
38 * }
39 *
40 * will result in the true case being in-line and starts the key with a single
41 * reference. Mixing static_key_true() and static_key_false() on the same key is not
42 * allowed.
43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
46 * equivalent with static_branch().
47 *
48*/
49
4#include <linux/types.h> 50#include <linux/types.h>
5#include <linux/compiler.h> 51#include <linux/compiler.h>
6#include <linux/workqueue.h> 52#include <linux/workqueue.h>
7 53
8#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 54#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
9 55
10struct jump_label_key { 56struct static_key {
11 atomic_t enabled; 57 atomic_t enabled;
58/* Set lsb bit to 1 if branch is default true, 0 ot */
12 struct jump_entry *entries; 59 struct jump_entry *entries;
13#ifdef CONFIG_MODULES 60#ifdef CONFIG_MODULES
14 struct jump_label_mod *next; 61 struct static_key_mod *next;
15#endif 62#endif
16}; 63};
17 64
18struct jump_label_key_deferred { 65struct static_key_deferred {
19 struct jump_label_key key; 66 struct static_key key;
20 unsigned long timeout; 67 unsigned long timeout;
21 struct delayed_work work; 68 struct delayed_work work;
22}; 69};
@@ -34,13 +81,34 @@ struct module;
34 81
35#ifdef HAVE_JUMP_LABEL 82#ifdef HAVE_JUMP_LABEL
36 83
37#ifdef CONFIG_MODULES 84#define JUMP_LABEL_TRUE_BRANCH 1UL
38#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL} 85
39#else 86static
40#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL} 87inline struct jump_entry *jump_label_get_entries(struct static_key *key)
41#endif 88{
89 return (struct jump_entry *)((unsigned long)key->entries
90 & ~JUMP_LABEL_TRUE_BRANCH);
91}
42 92
43static __always_inline bool static_branch(struct jump_label_key *key) 93static inline bool jump_label_get_branch_default(struct static_key *key)
94{
95 if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
96 return true;
97 return false;
98}
99
100static __always_inline bool static_key_false(struct static_key *key)
101{
102 return arch_static_branch(key);
103}
104
105static __always_inline bool static_key_true(struct static_key *key)
106{
107 return !static_key_false(key);
108}
109
110/* Deprecated. Please use 'static_key_false() instead. */
111static __always_inline bool static_branch(struct static_key *key)
44{ 112{
45 return arch_static_branch(key); 113 return arch_static_branch(key);
46} 114}
@@ -56,21 +124,23 @@ extern void arch_jump_label_transform(struct jump_entry *entry,
56extern void arch_jump_label_transform_static(struct jump_entry *entry, 124extern void arch_jump_label_transform_static(struct jump_entry *entry,
57 enum jump_label_type type); 125 enum jump_label_type type);
58extern int jump_label_text_reserved(void *start, void *end); 126extern int jump_label_text_reserved(void *start, void *end);
59extern void jump_label_inc(struct jump_label_key *key); 127extern void static_key_slow_inc(struct static_key *key);
60extern void jump_label_dec(struct jump_label_key *key); 128extern void static_key_slow_dec(struct static_key *key);
61extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); 129extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
62extern bool jump_label_enabled(struct jump_label_key *key);
63extern void jump_label_apply_nops(struct module *mod); 130extern void jump_label_apply_nops(struct module *mod);
64extern void jump_label_rate_limit(struct jump_label_key_deferred *key, 131extern void
65 unsigned long rl); 132jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
133
134#define STATIC_KEY_INIT_TRUE ((struct static_key) \
135 { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
136#define STATIC_KEY_INIT_FALSE ((struct static_key) \
137 { .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
66 138
67#else /* !HAVE_JUMP_LABEL */ 139#else /* !HAVE_JUMP_LABEL */
68 140
69#include <linux/atomic.h> 141#include <linux/atomic.h>
70 142
71#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} 143struct static_key {
72
73struct jump_label_key {
74 atomic_t enabled; 144 atomic_t enabled;
75}; 145};
76 146
@@ -78,30 +148,45 @@ static __always_inline void jump_label_init(void)
78{ 148{
79} 149}
80 150
81struct jump_label_key_deferred { 151struct static_key_deferred {
82 struct jump_label_key key; 152 struct static_key key;
83}; 153};
84 154
85static __always_inline bool static_branch(struct jump_label_key *key) 155static __always_inline bool static_key_false(struct static_key *key)
156{
157 if (unlikely(atomic_read(&key->enabled)) > 0)
158 return true;
159 return false;
160}
161
162static __always_inline bool static_key_true(struct static_key *key)
86{ 163{
87 if (unlikely(atomic_read(&key->enabled))) 164 if (likely(atomic_read(&key->enabled)) > 0)
88 return true; 165 return true;
89 return false; 166 return false;
90} 167}
91 168
92static inline void jump_label_inc(struct jump_label_key *key) 169/* Deprecated. Please use 'static_key_false() instead. */
170static __always_inline bool static_branch(struct static_key *key)
171{
172 if (unlikely(atomic_read(&key->enabled)) > 0)
173 return true;
174 return false;
175}
176
177static inline void static_key_slow_inc(struct static_key *key)
93{ 178{
94 atomic_inc(&key->enabled); 179 atomic_inc(&key->enabled);
95} 180}
96 181
97static inline void jump_label_dec(struct jump_label_key *key) 182static inline void static_key_slow_dec(struct static_key *key)
98{ 183{
99 atomic_dec(&key->enabled); 184 atomic_dec(&key->enabled);
100} 185}
101 186
102static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) 187static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
103{ 188{
104 jump_label_dec(&key->key); 189 static_key_slow_dec(&key->key);
105} 190}
106 191
107static inline int jump_label_text_reserved(void *start, void *end) 192static inline int jump_label_text_reserved(void *start, void *end)
@@ -112,23 +197,30 @@ static inline int jump_label_text_reserved(void *start, void *end)
112static inline void jump_label_lock(void) {} 197static inline void jump_label_lock(void) {}
113static inline void jump_label_unlock(void) {} 198static inline void jump_label_unlock(void) {}
114 199
115static inline bool jump_label_enabled(struct jump_label_key *key)
116{
117 return !!atomic_read(&key->enabled);
118}
119
120static inline int jump_label_apply_nops(struct module *mod) 200static inline int jump_label_apply_nops(struct module *mod)
121{ 201{
122 return 0; 202 return 0;
123} 203}
124 204
125static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, 205static inline void
206jump_label_rate_limit(struct static_key_deferred *key,
126 unsigned long rl) 207 unsigned long rl)
127{ 208{
128} 209}
210
211#define STATIC_KEY_INIT_TRUE ((struct static_key) \
212 { .enabled = ATOMIC_INIT(1) })
213#define STATIC_KEY_INIT_FALSE ((struct static_key) \
214 { .enabled = ATOMIC_INIT(0) })
215
129#endif /* HAVE_JUMP_LABEL */ 216#endif /* HAVE_JUMP_LABEL */
130 217
131#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) 218#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
132#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) 219#define jump_label_enabled static_key_enabled
220
221static inline bool static_key_enabled(struct static_key *key)
222{
223 return (atomic_read(&key->enabled) > 0);
224}
133 225
134#endif /* _LINUX_JUMP_LABEL_H */ 226#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8343422240..d801acb5e68 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -85,6 +85,19 @@
85} \ 85} \
86) 86)
87 87
88/*
89 * Multiplies an integer by a fraction, while avoiding unnecessary
90 * overflow or loss of precision.
91 */
92#define mult_frac(x, numer, denom)( \
93{ \
94 typeof(x) quot = (x) / (denom); \
95 typeof(x) rem = (x) % (denom); \
96 (quot * (numer)) + ((rem * (numer)) / (denom)); \
97} \
98)
99
100
88#define _RET_IP_ (unsigned long)__builtin_return_address(0) 101#define _RET_IP_ (unsigned long)__builtin_return_address(0)
89#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) 102#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
90 103
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index fee66317e07..35f7237ec97 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -15,13 +15,18 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/list.h> 16#include <linux/list.h>
17 17
18/*
19 * Keep this list arranged in rough order of priority. Anything listed after
20 * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
21 * is passed to the kernel.
22 */
18enum kmsg_dump_reason { 23enum kmsg_dump_reason {
19 KMSG_DUMP_OOPS,
20 KMSG_DUMP_PANIC, 24 KMSG_DUMP_PANIC,
25 KMSG_DUMP_OOPS,
26 KMSG_DUMP_EMERG,
21 KMSG_DUMP_RESTART, 27 KMSG_DUMP_RESTART,
22 KMSG_DUMP_HALT, 28 KMSG_DUMP_HALT,
23 KMSG_DUMP_POWEROFF, 29 KMSG_DUMP_POWEROFF,
24 KMSG_DUMP_EMERG,
25}; 30};
26 31
27/** 32/**
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 23fcdfcba81..b8ba8554472 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -6,6 +6,8 @@
6 6
7#if BITS_PER_LONG == 64 7#if BITS_PER_LONG == 64
8 8
9#define div64_long(x,y) div64_s64((x),(y))
10
9/** 11/**
10 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder 12 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
11 * 13 *
@@ -45,6 +47,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
45 47
46#elif BITS_PER_LONG == 32 48#elif BITS_PER_LONG == 32
47 49
50#define div64_long(x,y) div_s64((x),(y))
51
48#ifndef div_u64_rem 52#ifndef div_u64_rem
49static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 53static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
50{ 54{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4d34356fe64..b80de520670 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
129extern void mem_cgroup_replace_page_cache(struct page *oldpage, 129extern void mem_cgroup_replace_page_cache(struct page *oldpage,
130 struct page *newpage); 130 struct page *newpage);
131 131
132extern void mem_cgroup_reset_owner(struct page *page);
133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 132#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
134extern int do_swap_account; 133extern int do_swap_account;
135#endif 134#endif
@@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
392 struct page *newpage) 391 struct page *newpage)
393{ 392{
394} 393}
395
396static inline void mem_cgroup_reset_owner(struct page *page)
397{
398}
399#endif /* CONFIG_CGROUP_MEM_CONT */ 394#endif /* CONFIG_CGROUP_MEM_CONT */
400 395
401#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 396#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f486f635e7b..3e5cb2546e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -214,8 +214,8 @@ enum {
214#include <linux/skbuff.h> 214#include <linux/skbuff.h>
215 215
216#ifdef CONFIG_RPS 216#ifdef CONFIG_RPS
217#include <linux/jump_label.h> 217#include <linux/static_key.h>
218extern struct jump_label_key rps_needed; 218extern struct static_key rps_needed;
219#endif 219#endif
220 220
221struct neighbour; 221struct neighbour;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b809265607d..29734be334c 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
164 164
165#if defined(CONFIG_JUMP_LABEL) 165#if defined(CONFIG_JUMP_LABEL)
166#include <linux/jump_label.h> 166#include <linux/static_key.h>
167extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 167extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
169{ 169{
170 if (__builtin_constant_p(pf) && 170 if (__builtin_constant_p(pf) &&
171 __builtin_constant_p(hook)) 171 __builtin_constant_p(hook))
172 return static_branch(&nf_hooks_needed[pf][hook]); 172 return static_key_false(&nf_hooks_needed[pf][hook]);
173 173
174 return !list_empty(&nf_hooks[pf][hook]); 174 return !list_empty(&nf_hooks[pf][hook]);
175} 175}
diff --git a/include/linux/of.h b/include/linux/of.h
index a75a831e205..92cf6ad35e0 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -281,6 +281,14 @@ static inline struct property *of_find_property(const struct device_node *np,
281 return NULL; 281 return NULL;
282} 282}
283 283
284static inline struct device_node *of_find_compatible_node(
285 struct device_node *from,
286 const char *type,
287 const char *compat)
288{
289 return NULL;
290}
291
284static inline int of_property_read_u32_array(const struct device_node *np, 292static inline int of_property_read_u32_array(const struct device_node *np,
285 const char *propname, 293 const char *propname,
286 u32 *out_values, size_t sz) 294 u32 *out_values, size_t sz)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 32cd1f67462..21638ae14e0 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -348,9 +348,9 @@ do { \
348#define _this_cpu_generic_to_op(pcp, val, op) \ 348#define _this_cpu_generic_to_op(pcp, val, op) \
349do { \ 349do { \
350 unsigned long flags; \ 350 unsigned long flags; \
351 local_irq_save(flags); \ 351 raw_local_irq_save(flags); \
352 *__this_cpu_ptr(&(pcp)) op val; \ 352 *__this_cpu_ptr(&(pcp)) op val; \
353 local_irq_restore(flags); \ 353 raw_local_irq_restore(flags); \
354} while (0) 354} while (0)
355 355
356#ifndef this_cpu_write 356#ifndef this_cpu_write
@@ -449,10 +449,10 @@ do { \
449({ \ 449({ \
450 typeof(pcp) ret__; \ 450 typeof(pcp) ret__; \
451 unsigned long flags; \ 451 unsigned long flags; \
452 local_irq_save(flags); \ 452 raw_local_irq_save(flags); \
453 __this_cpu_add(pcp, val); \ 453 __this_cpu_add(pcp, val); \
454 ret__ = __this_cpu_read(pcp); \ 454 ret__ = __this_cpu_read(pcp); \
455 local_irq_restore(flags); \ 455 raw_local_irq_restore(flags); \
456 ret__; \ 456 ret__; \
457}) 457})
458 458
@@ -479,10 +479,10 @@ do { \
479#define _this_cpu_generic_xchg(pcp, nval) \ 479#define _this_cpu_generic_xchg(pcp, nval) \
480({ typeof(pcp) ret__; \ 480({ typeof(pcp) ret__; \
481 unsigned long flags; \ 481 unsigned long flags; \
482 local_irq_save(flags); \ 482 raw_local_irq_save(flags); \
483 ret__ = __this_cpu_read(pcp); \ 483 ret__ = __this_cpu_read(pcp); \
484 __this_cpu_write(pcp, nval); \ 484 __this_cpu_write(pcp, nval); \
485 local_irq_restore(flags); \ 485 raw_local_irq_restore(flags); \
486 ret__; \ 486 ret__; \
487}) 487})
488 488
@@ -507,11 +507,11 @@ do { \
507({ \ 507({ \
508 typeof(pcp) ret__; \ 508 typeof(pcp) ret__; \
509 unsigned long flags; \ 509 unsigned long flags; \
510 local_irq_save(flags); \ 510 raw_local_irq_save(flags); \
511 ret__ = __this_cpu_read(pcp); \ 511 ret__ = __this_cpu_read(pcp); \
512 if (ret__ == (oval)) \ 512 if (ret__ == (oval)) \
513 __this_cpu_write(pcp, nval); \ 513 __this_cpu_write(pcp, nval); \
514 local_irq_restore(flags); \ 514 raw_local_irq_restore(flags); \
515 ret__; \ 515 ret__; \
516}) 516})
517 517
@@ -544,10 +544,10 @@ do { \
544({ \ 544({ \
545 int ret__; \ 545 int ret__; \
546 unsigned long flags; \ 546 unsigned long flags; \
547 local_irq_save(flags); \ 547 raw_local_irq_save(flags); \
548 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 548 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
549 oval1, oval2, nval1, nval2); \ 549 oval1, oval2, nval1, nval2); \
550 local_irq_restore(flags); \ 550 raw_local_irq_restore(flags); \
551 ret__; \ 551 ret__; \
552}) 552})
553 553
@@ -718,12 +718,13 @@ do { \
718# ifndef __this_cpu_add_return_8 718# ifndef __this_cpu_add_return_8
719# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) 719# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
720# endif 720# endif
721# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) 721# define __this_cpu_add_return(pcp, val) \
722 __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
722#endif 723#endif
723 724
724#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) 725#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
725#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) 726#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
726#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) 727#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
727 728
728#define __this_cpu_generic_xchg(pcp, nval) \ 729#define __this_cpu_generic_xchg(pcp, nval) \
729({ typeof(pcp) ret__; \ 730({ typeof(pcp) ret__; \
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index abb2776be1b..bd9f55a5958 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -129,11 +129,40 @@ enum perf_event_sample_format {
129 PERF_SAMPLE_PERIOD = 1U << 8, 129 PERF_SAMPLE_PERIOD = 1U << 8,
130 PERF_SAMPLE_STREAM_ID = 1U << 9, 130 PERF_SAMPLE_STREAM_ID = 1U << 9,
131 PERF_SAMPLE_RAW = 1U << 10, 131 PERF_SAMPLE_RAW = 1U << 10,
132 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
132 133
133 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ 134 PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */
134}; 135};
135 136
136/* 137/*
138 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
139 *
140 * If the user does not pass priv level information via branch_sample_type,
141 * the kernel uses the event's priv level. Branch and event priv levels do
142 * not have to match. Branch priv level is checked for permissions.
143 *
144 * The branch types can be combined, however BRANCH_ANY covers all types
145 * of branches and therefore it supersedes all the other types.
146 */
147enum perf_branch_sample_type {
148 PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
149 PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
150 PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
151
152 PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
153 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
154 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
155 PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
156
157 PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */
158};
159
160#define PERF_SAMPLE_BRANCH_PLM_ALL \
161 (PERF_SAMPLE_BRANCH_USER|\
162 PERF_SAMPLE_BRANCH_KERNEL|\
163 PERF_SAMPLE_BRANCH_HV)
164
165/*
137 * The format of the data returned by read() on a perf event fd, 166 * The format of the data returned by read() on a perf event fd,
138 * as specified by attr.read_format: 167 * as specified by attr.read_format:
139 * 168 *
@@ -163,6 +192,8 @@ enum perf_event_read_format {
163}; 192};
164 193
165#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 194#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
195#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
196#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
166 197
167/* 198/*
168 * Hardware event_id to monitor via a performance monitoring event: 199 * Hardware event_id to monitor via a performance monitoring event:
@@ -240,6 +271,7 @@ struct perf_event_attr {
240 __u64 bp_len; 271 __u64 bp_len;
241 __u64 config2; /* extension of config1 */ 272 __u64 config2; /* extension of config1 */
242 }; 273 };
274 __u64 branch_sample_type; /* enum branch_sample_type */
243}; 275};
244 276
245/* 277/*
@@ -291,12 +323,14 @@ struct perf_event_mmap_page {
291 __s64 offset; /* add to hardware event value */ 323 __s64 offset; /* add to hardware event value */
292 __u64 time_enabled; /* time event active */ 324 __u64 time_enabled; /* time event active */
293 __u64 time_running; /* time event on cpu */ 325 __u64 time_running; /* time event on cpu */
326 __u32 time_mult, time_shift;
327 __u64 time_offset;
294 328
295 /* 329 /*
296 * Hole for extension of the self monitor capabilities 330 * Hole for extension of the self monitor capabilities
297 */ 331 */
298 332
299 __u64 __reserved[123]; /* align to 1k */ 333 __u64 __reserved[121]; /* align to 1k */
300 334
301 /* 335 /*
302 * Control data for the mmap() data buffer. 336 * Control data for the mmap() data buffer.
@@ -456,6 +490,8 @@ enum perf_event_type {
456 * 490 *
457 * { u32 size; 491 * { u32 size;
458 * char data[size];}&& PERF_SAMPLE_RAW 492 * char data[size];}&& PERF_SAMPLE_RAW
493 *
494 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
459 * }; 495 * };
460 */ 496 */
461 PERF_RECORD_SAMPLE = 9, 497 PERF_RECORD_SAMPLE = 9,
@@ -512,7 +548,7 @@ struct perf_guest_info_callbacks {
512#include <linux/ftrace.h> 548#include <linux/ftrace.h>
513#include <linux/cpu.h> 549#include <linux/cpu.h>
514#include <linux/irq_work.h> 550#include <linux/irq_work.h>
515#include <linux/jump_label.h> 551#include <linux/static_key.h>
516#include <linux/atomic.h> 552#include <linux/atomic.h>
517#include <asm/local.h> 553#include <asm/local.h>
518 554
@@ -528,12 +564,34 @@ struct perf_raw_record {
528 void *data; 564 void *data;
529}; 565};
530 566
567/*
568 * single taken branch record layout:
569 *
570 * from: source instruction (may not always be a branch insn)
571 * to: branch target
572 * mispred: branch target was mispredicted
573 * predicted: branch target was predicted
574 *
575 * support for mispred, predicted is optional. In case it
576 * is not supported mispred = predicted = 0.
577 */
531struct perf_branch_entry { 578struct perf_branch_entry {
532 __u64 from; 579 __u64 from;
533 __u64 to; 580 __u64 to;
534 __u64 flags; 581 __u64 mispred:1, /* target mispredicted */
582 predicted:1,/* target predicted */
583 reserved:62;
535}; 584};
536 585
586/*
587 * branch stack layout:
588 * nr: number of taken branches stored in entries[]
589 *
590 * Note that nr can vary from sample to sample
591 * branches (to, from) are stored from most recent
592 * to least recent, i.e., entries[0] contains the most
593 * recent branch.
594 */
537struct perf_branch_stack { 595struct perf_branch_stack {
538 __u64 nr; 596 __u64 nr;
539 struct perf_branch_entry entries[0]; 597 struct perf_branch_entry entries[0];
@@ -564,7 +622,9 @@ struct hw_perf_event {
564 unsigned long event_base; 622 unsigned long event_base;
565 int idx; 623 int idx;
566 int last_cpu; 624 int last_cpu;
625
567 struct hw_perf_event_extra extra_reg; 626 struct hw_perf_event_extra extra_reg;
627 struct hw_perf_event_extra branch_reg;
568 }; 628 };
569 struct { /* software */ 629 struct { /* software */
570 struct hrtimer hrtimer; 630 struct hrtimer hrtimer;
@@ -616,6 +676,7 @@ struct pmu {
616 struct list_head entry; 676 struct list_head entry;
617 677
618 struct device *dev; 678 struct device *dev;
679 const struct attribute_group **attr_groups;
619 char *name; 680 char *name;
620 int type; 681 int type;
621 682
@@ -681,6 +742,17 @@ struct pmu {
681 * for each successful ->add() during the transaction. 742 * for each successful ->add() during the transaction.
682 */ 743 */
683 void (*cancel_txn) (struct pmu *pmu); /* optional */ 744 void (*cancel_txn) (struct pmu *pmu); /* optional */
745
746 /*
747 * Will return the value for perf_event_mmap_page::index for this event,
748 * if no implementation is provided it will default to: event->hw.idx + 1.
749 */
750 int (*event_idx) (struct perf_event *event); /*optional */
751
752 /*
753 * flush branch stack on context-switches (needed in cpu-wide mode)
754 */
755 void (*flush_branch_stack) (void);
684}; 756};
685 757
686/** 758/**
@@ -850,6 +922,9 @@ struct perf_event {
850#ifdef CONFIG_EVENT_TRACING 922#ifdef CONFIG_EVENT_TRACING
851 struct ftrace_event_call *tp_event; 923 struct ftrace_event_call *tp_event;
852 struct event_filter *filter; 924 struct event_filter *filter;
925#ifdef CONFIG_FUNCTION_TRACER
926 struct ftrace_ops ftrace_ops;
927#endif
853#endif 928#endif
854 929
855#ifdef CONFIG_CGROUP_PERF 930#ifdef CONFIG_CGROUP_PERF
@@ -911,7 +986,8 @@ struct perf_event_context {
911 u64 parent_gen; 986 u64 parent_gen;
912 u64 generation; 987 u64 generation;
913 int pin_count; 988 int pin_count;
914 int nr_cgroups; /* cgroup events present */ 989 int nr_cgroups; /* cgroup evts */
990 int nr_branch_stack; /* branch_stack evt */
915 struct rcu_head rcu_head; 991 struct rcu_head rcu_head;
916}; 992};
917 993
@@ -976,6 +1052,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
976extern u64 perf_event_read_value(struct perf_event *event, 1052extern u64 perf_event_read_value(struct perf_event *event,
977 u64 *enabled, u64 *running); 1053 u64 *enabled, u64 *running);
978 1054
1055
979struct perf_sample_data { 1056struct perf_sample_data {
980 u64 type; 1057 u64 type;
981 1058
@@ -995,12 +1072,14 @@ struct perf_sample_data {
995 u64 period; 1072 u64 period;
996 struct perf_callchain_entry *callchain; 1073 struct perf_callchain_entry *callchain;
997 struct perf_raw_record *raw; 1074 struct perf_raw_record *raw;
1075 struct perf_branch_stack *br_stack;
998}; 1076};
999 1077
1000static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) 1078static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1001{ 1079{
1002 data->addr = addr; 1080 data->addr = addr;
1003 data->raw = NULL; 1081 data->raw = NULL;
1082 data->br_stack = NULL;
1004} 1083}
1005 1084
1006extern void perf_output_sample(struct perf_output_handle *handle, 1085extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1029,7 +1108,7 @@ static inline int is_software_event(struct perf_event *event)
1029 return event->pmu->task_ctx_nr == perf_sw_context; 1108 return event->pmu->task_ctx_nr == perf_sw_context;
1030} 1109}
1031 1110
1032extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1111extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1033 1112
1034extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 1113extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1035 1114
@@ -1057,7 +1136,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1057{ 1136{
1058 struct pt_regs hot_regs; 1137 struct pt_regs hot_regs;
1059 1138
1060 if (static_branch(&perf_swevent_enabled[event_id])) { 1139 if (static_key_false(&perf_swevent_enabled[event_id])) {
1061 if (!regs) { 1140 if (!regs) {
1062 perf_fetch_caller_regs(&hot_regs); 1141 perf_fetch_caller_regs(&hot_regs);
1063 regs = &hot_regs; 1142 regs = &hot_regs;
@@ -1066,12 +1145,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1066 } 1145 }
1067} 1146}
1068 1147
1069extern struct jump_label_key_deferred perf_sched_events; 1148extern struct static_key_deferred perf_sched_events;
1070 1149
1071static inline void perf_event_task_sched_in(struct task_struct *prev, 1150static inline void perf_event_task_sched_in(struct task_struct *prev,
1072 struct task_struct *task) 1151 struct task_struct *task)
1073{ 1152{
1074 if (static_branch(&perf_sched_events.key)) 1153 if (static_key_false(&perf_sched_events.key))
1075 __perf_event_task_sched_in(prev, task); 1154 __perf_event_task_sched_in(prev, task);
1076} 1155}
1077 1156
@@ -1080,7 +1159,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
1080{ 1159{
1081 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1160 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1082 1161
1083 if (static_branch(&perf_sched_events.key)) 1162 if (static_key_false(&perf_sched_events.key))
1084 __perf_event_task_sched_out(prev, next); 1163 __perf_event_task_sched_out(prev, next);
1085} 1164}
1086 1165
@@ -1139,6 +1218,11 @@ extern void perf_bp_event(struct perf_event *event, void *data);
1139# define perf_instruction_pointer(regs) instruction_pointer(regs) 1218# define perf_instruction_pointer(regs) instruction_pointer(regs)
1140#endif 1219#endif
1141 1220
1221static inline bool has_branch_stack(struct perf_event *event)
1222{
1223 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1224}
1225
1142extern int perf_output_begin(struct perf_output_handle *handle, 1226extern int perf_output_begin(struct perf_output_handle *handle,
1143 struct perf_event *event, unsigned int size); 1227 struct perf_event *event, unsigned int size);
1144extern void perf_output_end(struct perf_output_handle *handle); 1228extern void perf_output_end(struct perf_output_handle *handle);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 58969b2a8a8..5a710b9c578 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -48,12 +48,14 @@ do { \
48 barrier(); \ 48 barrier(); \
49} while (0) 49} while (0)
50 50
51#define preempt_enable_no_resched() \ 51#define sched_preempt_enable_no_resched() \
52do { \ 52do { \
53 barrier(); \ 53 barrier(); \
54 dec_preempt_count(); \ 54 dec_preempt_count(); \
55} while (0) 55} while (0)
56 56
57#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
58
57#define preempt_enable() \ 59#define preempt_enable() \
58do { \ 60do { \
59 preempt_enable_no_resched(); \ 61 preempt_enable_no_resched(); \
@@ -92,6 +94,7 @@ do { \
92#else /* !CONFIG_PREEMPT_COUNT */ 94#else /* !CONFIG_PREEMPT_COUNT */
93 95
94#define preempt_disable() do { } while (0) 96#define preempt_disable() do { } while (0)
97#define sched_preempt_enable_no_resched() do { } while (0)
95#define preempt_enable_no_resched() do { } while (0) 98#define preempt_enable_no_resched() do { } while (0)
96#define preempt_enable() do { } while (0) 99#define preempt_enable() do { } while (0)
97 100
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f9abd9357a0..0525927f203 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -101,6 +101,11 @@ asmlinkage __printf(1, 2) __cold
101int printk(const char *fmt, ...); 101int printk(const char *fmt, ...);
102 102
103/* 103/*
104 * Special printk facility for scheduler use only, _DO_NOT_USE_ !
105 */
106__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
107
108/*
104 * Please don't use printk_ratelimit(), because it shares ratelimiting state 109 * Please don't use printk_ratelimit(), because it shares ratelimiting state
105 * with all other unrelated printk_ratelimit() callsites. Instead use 110 * with all other unrelated printk_ratelimit() callsites. Instead use
106 * printk_ratelimited() or plain old __ratelimit(). 111 * printk_ratelimited() or plain old __ratelimit().
@@ -127,6 +132,11 @@ int printk(const char *s, ...)
127{ 132{
128 return 0; 133 return 0;
129} 134}
135static inline __printf(1, 2) __cold
136int printk_sched(const char *s, ...)
137{
138 return 0;
139}
130static inline int printk_ratelimit(void) 140static inline int printk_ratelimit(void)
131{ 141{
132 return 0; 142 return 0;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 81c04f4348e..937217425c4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -190,6 +190,33 @@ extern void rcu_idle_exit(void);
190extern void rcu_irq_enter(void); 190extern void rcu_irq_enter(void);
191extern void rcu_irq_exit(void); 191extern void rcu_irq_exit(void);
192 192
193/**
194 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
195 * @a: Code that RCU needs to pay attention to.
196 *
197 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
198 * in the inner idle loop, that is, between the rcu_idle_enter() and
199 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
200 * critical sections. However, things like powertop need tracepoints
201 * in the inner idle loop.
202 *
203 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
204 * will tell RCU that it needs to pay attending, invoke its argument
205 * (in this example, a call to the do_something_with_RCU() function),
206 * and then tell RCU to go back to ignoring this CPU. It is permissible
207 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
208 * quite limited. If deeper nesting is required, it will be necessary
209 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
210 *
211 * This macro may be used from process-level code only.
212 */
213#define RCU_NONIDLE(a) \
214 do { \
215 rcu_idle_exit(); \
216 do { a; } while (0); \
217 rcu_idle_enter(); \
218 } while (0)
219
193/* 220/*
194 * Infrastructure to implement the synchronize_() primitives in 221 * Infrastructure to implement the synchronize_() primitives in
195 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 222 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -226,6 +253,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
226} 253}
227#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 254#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
228 255
256#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
257bool rcu_lockdep_current_cpu_online(void);
258#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
259static inline bool rcu_lockdep_current_cpu_online(void)
260{
261 return 1;
262}
263#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
264
229#ifdef CONFIG_DEBUG_LOCK_ALLOC 265#ifdef CONFIG_DEBUG_LOCK_ALLOC
230 266
231#ifdef CONFIG_PROVE_RCU 267#ifdef CONFIG_PROVE_RCU
@@ -239,13 +275,11 @@ static inline int rcu_is_cpu_idle(void)
239 275
240static inline void rcu_lock_acquire(struct lockdep_map *map) 276static inline void rcu_lock_acquire(struct lockdep_map *map)
241{ 277{
242 WARN_ON_ONCE(rcu_is_cpu_idle());
243 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); 278 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
244} 279}
245 280
246static inline void rcu_lock_release(struct lockdep_map *map) 281static inline void rcu_lock_release(struct lockdep_map *map)
247{ 282{
248 WARN_ON_ONCE(rcu_is_cpu_idle());
249 lock_release(map, 1, _THIS_IP_); 283 lock_release(map, 1, _THIS_IP_);
250} 284}
251 285
@@ -270,6 +304,9 @@ extern int debug_lockdep_rcu_enabled(void);
270 * occur in the same context, for example, it is illegal to invoke 304 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock() 305 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler. 306 * was invoked from within an irq handler.
307 *
308 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
309 * offline from an RCU perspective, so check for those as well.
273 */ 310 */
274static inline int rcu_read_lock_held(void) 311static inline int rcu_read_lock_held(void)
275{ 312{
@@ -277,6 +314,8 @@ static inline int rcu_read_lock_held(void)
277 return 1; 314 return 1;
278 if (rcu_is_cpu_idle()) 315 if (rcu_is_cpu_idle())
279 return 0; 316 return 0;
317 if (!rcu_lockdep_current_cpu_online())
318 return 0;
280 return lock_is_held(&rcu_lock_map); 319 return lock_is_held(&rcu_lock_map);
281} 320}
282 321
@@ -313,6 +352,9 @@ extern int rcu_read_lock_bh_held(void);
313 * notice an extended quiescent state to other CPUs that started a grace 352 * notice an extended quiescent state to other CPUs that started a grace
314 * period. Otherwise we would delay any grace period as long as we run in 353 * period. Otherwise we would delay any grace period as long as we run in
315 * the idle task. 354 * the idle task.
355 *
356 * Similarly, we avoid claiming an SRCU read lock held if the current
357 * CPU is offline.
316 */ 358 */
317#ifdef CONFIG_PREEMPT_COUNT 359#ifdef CONFIG_PREEMPT_COUNT
318static inline int rcu_read_lock_sched_held(void) 360static inline int rcu_read_lock_sched_held(void)
@@ -323,6 +365,8 @@ static inline int rcu_read_lock_sched_held(void)
323 return 1; 365 return 1;
324 if (rcu_is_cpu_idle()) 366 if (rcu_is_cpu_idle())
325 return 0; 367 return 0;
368 if (!rcu_lockdep_current_cpu_online())
369 return 0;
326 if (debug_locks) 370 if (debug_locks)
327 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 371 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
328 return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); 372 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -381,8 +425,22 @@ extern int rcu_my_thread_group_empty(void);
381 } \ 425 } \
382 } while (0) 426 } while (0)
383 427
428#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
429static inline void rcu_preempt_sleep_check(void)
430{
431 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
432 "Illegal context switch in RCU read-side "
433 "critical section");
434}
435#else /* #ifdef CONFIG_PROVE_RCU */
436static inline void rcu_preempt_sleep_check(void)
437{
438}
439#endif /* #else #ifdef CONFIG_PROVE_RCU */
440
384#define rcu_sleep_check() \ 441#define rcu_sleep_check() \
385 do { \ 442 do { \
443 rcu_preempt_sleep_check(); \
386 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ 444 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
387 "Illegal context switch in RCU-bh" \ 445 "Illegal context switch in RCU-bh" \
388 " read-side critical section"); \ 446 " read-side critical section"); \
@@ -470,6 +528,13 @@ extern int rcu_my_thread_group_empty(void);
470 * NULL. Although rcu_access_pointer() may also be used in cases where 528 * NULL. Although rcu_access_pointer() may also be used in cases where
471 * update-side locks prevent the value of the pointer from changing, you 529 * update-side locks prevent the value of the pointer from changing, you
472 * should instead use rcu_dereference_protected() for this use case. 530 * should instead use rcu_dereference_protected() for this use case.
531 *
532 * It is also permissible to use rcu_access_pointer() when read-side
533 * access to the pointer was removed at least one grace period ago, as
534 * is the case in the context of the RCU callback that is freeing up
535 * the data, or after a synchronize_rcu() returns. This can be useful
536 * when tearing down multi-linked structures after a grace period
537 * has elapsed.
473 */ 538 */
474#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 539#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
475 540
@@ -659,6 +724,8 @@ static inline void rcu_read_lock(void)
659 __rcu_read_lock(); 724 __rcu_read_lock();
660 __acquire(RCU); 725 __acquire(RCU);
661 rcu_lock_acquire(&rcu_lock_map); 726 rcu_lock_acquire(&rcu_lock_map);
727 rcu_lockdep_assert(!rcu_is_cpu_idle(),
728 "rcu_read_lock() used illegally while idle");
662} 729}
663 730
664/* 731/*
@@ -678,6 +745,8 @@ static inline void rcu_read_lock(void)
678 */ 745 */
679static inline void rcu_read_unlock(void) 746static inline void rcu_read_unlock(void)
680{ 747{
748 rcu_lockdep_assert(!rcu_is_cpu_idle(),
749 "rcu_read_unlock() used illegally while idle");
681 rcu_lock_release(&rcu_lock_map); 750 rcu_lock_release(&rcu_lock_map);
682 __release(RCU); 751 __release(RCU);
683 __rcu_read_unlock(); 752 __rcu_read_unlock();
@@ -705,6 +774,8 @@ static inline void rcu_read_lock_bh(void)
705 local_bh_disable(); 774 local_bh_disable();
706 __acquire(RCU_BH); 775 __acquire(RCU_BH);
707 rcu_lock_acquire(&rcu_bh_lock_map); 776 rcu_lock_acquire(&rcu_bh_lock_map);
777 rcu_lockdep_assert(!rcu_is_cpu_idle(),
778 "rcu_read_lock_bh() used illegally while idle");
708} 779}
709 780
710/* 781/*
@@ -714,6 +785,8 @@ static inline void rcu_read_lock_bh(void)
714 */ 785 */
715static inline void rcu_read_unlock_bh(void) 786static inline void rcu_read_unlock_bh(void)
716{ 787{
788 rcu_lockdep_assert(!rcu_is_cpu_idle(),
789 "rcu_read_unlock_bh() used illegally while idle");
717 rcu_lock_release(&rcu_bh_lock_map); 790 rcu_lock_release(&rcu_bh_lock_map);
718 __release(RCU_BH); 791 __release(RCU_BH);
719 local_bh_enable(); 792 local_bh_enable();
@@ -737,6 +810,8 @@ static inline void rcu_read_lock_sched(void)
737 preempt_disable(); 810 preempt_disable();
738 __acquire(RCU_SCHED); 811 __acquire(RCU_SCHED);
739 rcu_lock_acquire(&rcu_sched_lock_map); 812 rcu_lock_acquire(&rcu_sched_lock_map);
813 rcu_lockdep_assert(!rcu_is_cpu_idle(),
814 "rcu_read_lock_sched() used illegally while idle");
740} 815}
741 816
742/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 817/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -753,6 +828,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
753 */ 828 */
754static inline void rcu_read_unlock_sched(void) 829static inline void rcu_read_unlock_sched(void)
755{ 830{
831 rcu_lockdep_assert(!rcu_is_cpu_idle(),
832 "rcu_read_unlock_sched() used illegally while idle");
756 rcu_lock_release(&rcu_sched_lock_map); 833 rcu_lock_release(&rcu_sched_lock_map);
757 __release(RCU_SCHED); 834 __release(RCU_SCHED);
758 preempt_enable(); 835 preempt_enable();
@@ -841,7 +918,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
841 /* See the kfree_rcu() header comment. */ 918 /* See the kfree_rcu() header comment. */
842 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); 919 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
843 920
844 call_rcu(head, (rcu_callback)offset); 921 kfree_call_rcu(head, (rcu_callback)offset);
845} 922}
846 923
847/** 924/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 00b7a5e493d..e93df77176d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,13 +27,9 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30#ifdef CONFIG_RCU_BOOST
31static inline void rcu_init(void) 30static inline void rcu_init(void)
32{ 31{
33} 32}
34#else /* #ifdef CONFIG_RCU_BOOST */
35void rcu_init(void);
36#endif /* #else #ifdef CONFIG_RCU_BOOST */
37 33
38static inline void rcu_barrier_bh(void) 34static inline void rcu_barrier_bh(void)
39{ 35{
@@ -83,6 +79,12 @@ static inline void synchronize_sched_expedited(void)
83 synchronize_sched(); 79 synchronize_sched();
84} 80}
85 81
82static inline void kfree_call_rcu(struct rcu_head *head,
83 void (*func)(struct rcu_head *rcu))
84{
85 call_rcu(head, func);
86}
87
86#ifdef CONFIG_TINY_RCU 88#ifdef CONFIG_TINY_RCU
87 89
88static inline void rcu_preempt_note_context_switch(void) 90static inline void rcu_preempt_note_context_switch(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 67458468f1a..e8ee5dd0854 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -61,6 +61,24 @@ extern void synchronize_rcu_bh(void);
61extern void synchronize_sched_expedited(void); 61extern void synchronize_sched_expedited(void);
62extern void synchronize_rcu_expedited(void); 62extern void synchronize_rcu_expedited(void);
63 63
64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
65
66/**
67 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
68 *
69 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
70 * approach to force the grace period to end quickly. This consumes
71 * significant time on all CPUs and is unfriendly to real-time workloads,
72 * so is thus not recommended for any sort of common-case code. In fact,
73 * if you are using synchronize_rcu_bh_expedited() in a loop, please
74 * restructure your code to batch your updates, and then use a single
75 * synchronize_rcu_bh() instead.
76 *
77 * Note that it is illegal to call this function while holding any lock
78 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
79 * to call this function from a CPU-hotplug notifier. Failing to observe
80 * these restriction will result in deadlock.
81 */
64static inline void synchronize_rcu_bh_expedited(void) 82static inline void synchronize_rcu_bh_expedited(void)
65{ 83{
66 synchronize_sched_expedited(); 84 synchronize_sched_expedited();
@@ -83,6 +101,7 @@ extern void rcu_sched_force_quiescent_state(void);
83/* A context switch is a grace period for RCU-sched and RCU-bh. */ 101/* A context switch is a grace period for RCU-sched and RCU-bh. */
84static inline int rcu_blocking_is_gp(void) 102static inline int rcu_blocking_is_gp(void)
85{ 103{
104 might_sleep(); /* Check for RCU read-side critical section. */
86 return num_online_cpus() == 1; 105 return num_online_cpus() == 1;
87} 106}
88 107
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d379a6bfd8..e074e1e54f8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
361extern signed long schedule_timeout_killable(signed long timeout); 361extern signed long schedule_timeout_killable(signed long timeout);
362extern signed long schedule_timeout_uninterruptible(signed long timeout); 362extern signed long schedule_timeout_uninterruptible(signed long timeout);
363asmlinkage void schedule(void); 363asmlinkage void schedule(void);
364extern void schedule_preempt_disabled(void);
364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 365extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 366
366struct nsproxy; 367struct nsproxy;
@@ -905,6 +906,7 @@ struct sched_group_power {
905 * single CPU. 906 * single CPU.
906 */ 907 */
907 unsigned int power, power_orig; 908 unsigned int power, power_orig;
909 unsigned long next_update;
908 /* 910 /*
909 * Number of busy cpus in this group. 911 * Number of busy cpus in this group.
910 */ 912 */
@@ -1052,6 +1054,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
1052unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1054unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1053unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1055unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1054 1056
1057bool cpus_share_cache(int this_cpu, int that_cpu);
1058
1055#else /* CONFIG_SMP */ 1059#else /* CONFIG_SMP */
1056 1060
1057struct sched_domain_attr; 1061struct sched_domain_attr;
@@ -1061,6 +1065,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1061 struct sched_domain_attr *dattr_new) 1065 struct sched_domain_attr *dattr_new)
1062{ 1066{
1063} 1067}
1068
1069static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1070{
1071 return true;
1072}
1073
1064#endif /* !CONFIG_SMP */ 1074#endif /* !CONFIG_SMP */
1065 1075
1066 1076
@@ -1225,6 +1235,12 @@ struct sched_rt_entity {
1225#endif 1235#endif
1226}; 1236};
1227 1237
1238/*
1239 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1240 * Timeslices get refilled after they expire.
1241 */
1242#define RR_TIMESLICE (100 * HZ / 1000)
1243
1228struct rcu_node; 1244struct rcu_node;
1229 1245
1230enum perf_event_task_context { 1246enum perf_event_task_context {
@@ -1319,6 +1335,11 @@ struct task_struct {
1319 unsigned sched_reset_on_fork:1; 1335 unsigned sched_reset_on_fork:1;
1320 unsigned sched_contributes_to_load:1; 1336 unsigned sched_contributes_to_load:1;
1321 1337
1338#ifdef CONFIG_GENERIC_HARDIRQS
1339 /* IRQ handler threads */
1340 unsigned irq_thread:1;
1341#endif
1342
1322 pid_t pid; 1343 pid_t pid;
1323 pid_t tgid; 1344 pid_t tgid;
1324 1345
@@ -1427,11 +1448,6 @@ struct task_struct {
1427 * mempolicy */ 1448 * mempolicy */
1428 spinlock_t alloc_lock; 1449 spinlock_t alloc_lock;
1429 1450
1430#ifdef CONFIG_GENERIC_HARDIRQS
1431 /* IRQ handler threads */
1432 struct irqaction *irqaction;
1433#endif
1434
1435 /* Protection of the PI data structures: */ 1451 /* Protection of the PI data structures: */
1436 raw_spinlock_t pi_lock; 1452 raw_spinlock_t pi_lock;
1437 1453
@@ -1777,7 +1793,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1777/* 1793/*
1778 * Per process flags 1794 * Per process flags
1779 */ 1795 */
1780#define PF_STARTING 0x00000002 /* being created */
1781#define PF_EXITING 0x00000004 /* getting shut down */ 1796#define PF_EXITING 0x00000004 /* getting shut down */
1782#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1797#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1783#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1798#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1864,8 +1879,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
1864#ifdef CONFIG_PREEMPT_RCU 1879#ifdef CONFIG_PREEMPT_RCU
1865 1880
1866#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1881#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1867#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1882#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1868#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1869 1883
1870static inline void rcu_copy_process(struct task_struct *p) 1884static inline void rcu_copy_process(struct task_struct *p)
1871{ 1885{
@@ -2049,7 +2063,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
2049extern void sched_autogroup_exit(struct signal_struct *sig); 2063extern void sched_autogroup_exit(struct signal_struct *sig);
2050#ifdef CONFIG_PROC_FS 2064#ifdef CONFIG_PROC_FS
2051extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2065extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2052extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2066extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2053#endif 2067#endif
2054#else 2068#else
2055static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2069static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2066,12 +2080,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2066extern int rt_mutex_getprio(struct task_struct *p); 2080extern int rt_mutex_getprio(struct task_struct *p);
2067extern void rt_mutex_setprio(struct task_struct *p, int prio); 2081extern void rt_mutex_setprio(struct task_struct *p, int prio);
2068extern void rt_mutex_adjust_pi(struct task_struct *p); 2082extern void rt_mutex_adjust_pi(struct task_struct *p);
2083static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2084{
2085 return tsk->pi_blocked_on != NULL;
2086}
2069#else 2087#else
2070static inline int rt_mutex_getprio(struct task_struct *p) 2088static inline int rt_mutex_getprio(struct task_struct *p)
2071{ 2089{
2072 return p->normal_prio; 2090 return p->normal_prio;
2073} 2091}
2074# define rt_mutex_adjust_pi(p) do { } while (0) 2092# define rt_mutex_adjust_pi(p) do { } while (0)
2093static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2094{
2095 return false;
2096}
2075#endif 2097#endif
2076 2098
2077extern bool yield_to(struct task_struct *p, bool preempt); 2099extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2371,7 +2393,7 @@ static inline int thread_group_empty(struct task_struct *p)
2371 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2393 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2372 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2394 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2373 * pins the final release of task.io_context. Also protects ->cpuset and 2395 * pins the final release of task.io_context. Also protects ->cpuset and
2374 * ->cgroup.subsys[]. 2396 * ->cgroup.subsys[]. And ->vfork_done.
2375 * 2397 *
2376 * Nests both inside and outside of read_lock(&tasklist_lock). 2398 * Nests both inside and outside of read_lock(&tasklist_lock).
2377 * It must not be nested with write_lock_irq(&tasklist_lock), 2399 * It must not be nested with write_lock_irq(&tasklist_lock),
@@ -2390,12 +2412,15 @@ static inline void task_unlock(struct task_struct *p)
2390extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2412extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2391 unsigned long *flags); 2413 unsigned long *flags);
2392 2414
2393#define lock_task_sighand(tsk, flags) \ 2415static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2394({ struct sighand_struct *__ss; \ 2416 unsigned long *flags)
2395 __cond_lock(&(tsk)->sighand->siglock, \ 2417{
2396 (__ss = __lock_task_sighand(tsk, flags))); \ 2418 struct sighand_struct *ret;
2397 __ss; \ 2419
2398}) \ 2420 ret = __lock_task_sighand(tsk, flags);
2421 (void)__cond_lock(&tsk->sighand->siglock, ret);
2422 return ret;
2423}
2399 2424
2400static inline void unlock_task_sighand(struct task_struct *tsk, 2425static inline void unlock_task_sighand(struct task_struct *tsk,
2401 unsigned long *flags) 2426 unsigned long *flags)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e1b005918bb..d3d5fa54f25 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp);
99 * power mode. This way we can notice an extended quiescent state to 99 * power mode. This way we can notice an extended quiescent state to
100 * other CPUs that started a grace period. Otherwise we would delay any 100 * other CPUs that started a grace period. Otherwise we would delay any
101 * grace period as long as we run in the idle task. 101 * grace period as long as we run in the idle task.
102 *
103 * Similarly, we avoid claiming an SRCU read lock held if the current
104 * CPU is offline.
102 */ 105 */
103static inline int srcu_read_lock_held(struct srcu_struct *sp) 106static inline int srcu_read_lock_held(struct srcu_struct *sp)
104{ 107{
105 if (rcu_is_cpu_idle())
106 return 0;
107
108 if (!debug_lockdep_rcu_enabled()) 108 if (!debug_lockdep_rcu_enabled())
109 return 1; 109 return 1;
110 110 if (rcu_is_cpu_idle())
111 return 0;
112 if (!rcu_lockdep_current_cpu_online())
113 return 0;
111 return lock_is_held(&sp->dep_map); 114 return lock_is_held(&sp->dep_map);
112} 115}
113 116
@@ -169,6 +172,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
169 int retval = __srcu_read_lock(sp); 172 int retval = __srcu_read_lock(sp);
170 173
171 rcu_lock_acquire(&(sp)->dep_map); 174 rcu_lock_acquire(&(sp)->dep_map);
175 rcu_lockdep_assert(!rcu_is_cpu_idle(),
176 "srcu_read_lock() used illegally while idle");
172 return retval; 177 return retval;
173} 178}
174 179
@@ -182,6 +187,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
182static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 187static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
183 __releases(sp) 188 __releases(sp)
184{ 189{
190 rcu_lockdep_assert(!rcu_is_cpu_idle(),
191 "srcu_read_unlock() used illegally while idle");
185 rcu_lock_release(&(sp)->dep_map); 192 rcu_lock_release(&(sp)->dep_map);
186 __srcu_read_unlock(sp, idx); 193 __srcu_read_unlock(sp, idx);
187} 194}
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644
index 00000000000..27bd3f8a085
--- /dev/null
+++ b/include/linux/static_key.h
@@ -0,0 +1 @@
#include <linux/jump_label.h>
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 46a85c9e1f2..3c7ffdb40dc 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -412,7 +412,8 @@ struct tcp_sock {
412 412
413 struct tcp_sack_block recv_sack_cache[4]; 413 struct tcp_sack_block recv_sack_cache[4];
414 414
415 struct sk_buff *highest_sack; /* highest skb with SACK received 415 struct sk_buff *highest_sack; /* skb just after the highest
416 * skb with SACKed bit set
416 * (validity guaranteed only if 417 * (validity guaranteed only if
417 * sacked_out > 0) 418 * sacked_out > 0)
418 */ 419 */
diff --git a/include/linux/timex.h b/include/linux/timex.h
index aa60fe7b6ed..b75e1864ed1 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -234,23 +234,9 @@ struct timex {
234extern unsigned long tick_usec; /* USER_HZ period (usec) */ 234extern unsigned long tick_usec; /* USER_HZ period (usec) */
235extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ 235extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
236 236
237/*
238 * phase-lock loop variables
239 */
240extern int time_status; /* clock synchronization status bits */
241
242extern void ntp_init(void); 237extern void ntp_init(void);
243extern void ntp_clear(void); 238extern void ntp_clear(void);
244 239
245/**
246 * ntp_synced - Returns 1 if the NTP status is not UNSYNC
247 *
248 */
249static inline int ntp_synced(void)
250{
251 return !(time_status & STA_UNSYNC);
252}
253
254/* Required to safely shift negative values */ 240/* Required to safely shift negative values */
255#define shift_right(x, s) ({ \ 241#define shift_right(x, s) ({ \
256 __typeof__(x) __x = (x); \ 242 __typeof__(x) __x = (x); \
@@ -264,10 +250,9 @@ static inline int ntp_synced(void)
264#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) 250#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
265 251
266/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ 252/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
267extern u64 tick_length; 253extern u64 ntp_tick_length(void);
268 254
269extern void second_overflow(void); 255extern void second_overflow(void);
270extern void update_ntp_one_tick(void);
271extern int do_adjtimex(struct timex *); 256extern int do_adjtimex(struct timex *);
272extern void hardpps(const struct timespec *, const struct timespec *); 257extern void hardpps(const struct timespec *, const struct timespec *);
273 258
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index df0a779c1bb..bd96ecd0e05 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,7 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/jump_label.h> 20#include <linux/static_key.h>
21 21
22struct module; 22struct module;
23struct tracepoint; 23struct tracepoint;
@@ -29,7 +29,7 @@ struct tracepoint_func {
29 29
30struct tracepoint { 30struct tracepoint {
31 const char *name; /* Tracepoint name */ 31 const char *name; /* Tracepoint name */
32 struct jump_label_key key; 32 struct static_key key;
33 void (*regfunc)(void); 33 void (*regfunc)(void);
34 void (*unregfunc)(void); 34 void (*unregfunc)(void);
35 struct tracepoint_func __rcu *funcs; 35 struct tracepoint_func __rcu *funcs;
@@ -114,7 +114,7 @@ static inline void tracepoint_synchronize_unregister(void)
114 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just 114 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
115 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". 115 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
116 */ 116 */
117#define __DO_TRACE(tp, proto, args, cond) \ 117#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
118 do { \ 118 do { \
119 struct tracepoint_func *it_func_ptr; \ 119 struct tracepoint_func *it_func_ptr; \
120 void *it_func; \ 120 void *it_func; \
@@ -122,6 +122,7 @@ static inline void tracepoint_synchronize_unregister(void)
122 \ 122 \
123 if (!(cond)) \ 123 if (!(cond)) \
124 return; \ 124 return; \
125 prercu; \
125 rcu_read_lock_sched_notrace(); \ 126 rcu_read_lock_sched_notrace(); \
126 it_func_ptr = rcu_dereference_sched((tp)->funcs); \ 127 it_func_ptr = rcu_dereference_sched((tp)->funcs); \
127 if (it_func_ptr) { \ 128 if (it_func_ptr) { \
@@ -132,6 +133,7 @@ static inline void tracepoint_synchronize_unregister(void)
132 } while ((++it_func_ptr)->func); \ 133 } while ((++it_func_ptr)->func); \
133 } \ 134 } \
134 rcu_read_unlock_sched_notrace(); \ 135 rcu_read_unlock_sched_notrace(); \
136 postrcu; \
135 } while (0) 137 } while (0)
136 138
137/* 139/*
@@ -139,15 +141,25 @@ static inline void tracepoint_synchronize_unregister(void)
139 * not add unwanted padding between the beginning of the section and the 141 * not add unwanted padding between the beginning of the section and the
140 * structure. Force alignment to the same alignment as the section start. 142 * structure. Force alignment to the same alignment as the section start.
141 */ 143 */
142#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 144#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
143 extern struct tracepoint __tracepoint_##name; \ 145 extern struct tracepoint __tracepoint_##name; \
144 static inline void trace_##name(proto) \ 146 static inline void trace_##name(proto) \
145 { \ 147 { \
148 if (static_key_false(&__tracepoint_##name.key)) \
149 __DO_TRACE(&__tracepoint_##name, \
150 TP_PROTO(data_proto), \
151 TP_ARGS(data_args), \
152 TP_CONDITION(cond),,); \
153 } \
154 static inline void trace_##name##_rcuidle(proto) \
155 { \
146 if (static_branch(&__tracepoint_##name.key)) \ 156 if (static_branch(&__tracepoint_##name.key)) \
147 __DO_TRACE(&__tracepoint_##name, \ 157 __DO_TRACE(&__tracepoint_##name, \
148 TP_PROTO(data_proto), \ 158 TP_PROTO(data_proto), \
149 TP_ARGS(data_args), \ 159 TP_ARGS(data_args), \
150 TP_CONDITION(cond)); \ 160 TP_CONDITION(cond), \
161 rcu_idle_exit(), \
162 rcu_idle_enter()); \
151 } \ 163 } \
152 static inline int \ 164 static inline int \
153 register_trace_##name(void (*probe)(data_proto), void *data) \ 165 register_trace_##name(void (*probe)(data_proto), void *data) \
@@ -176,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void)
176 __attribute__((section("__tracepoints_strings"))) = #name; \ 188 __attribute__((section("__tracepoints_strings"))) = #name; \
177 struct tracepoint __tracepoint_##name \ 189 struct tracepoint __tracepoint_##name \
178 __attribute__((section("__tracepoints"))) = \ 190 __attribute__((section("__tracepoints"))) = \
179 { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ 191 { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
180 static struct tracepoint * const __tracepoint_ptr_##name __used \ 192 static struct tracepoint * const __tracepoint_ptr_##name __used \
181 __attribute__((section("__tracepoints_ptrs"))) = \ 193 __attribute__((section("__tracepoints_ptrs"))) = \
182 &__tracepoint_##name; 194 &__tracepoint_##name;
@@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void)
190 EXPORT_SYMBOL(__tracepoint_##name) 202 EXPORT_SYMBOL(__tracepoint_##name)
191 203
192#else /* !CONFIG_TRACEPOINTS */ 204#else /* !CONFIG_TRACEPOINTS */
193#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 205#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
194 static inline void trace_##name(proto) \ 206 static inline void trace_##name(proto) \
195 { } \ 207 { } \
208 static inline void trace_##name##_rcuidle(proto) \
209 { } \
196 static inline int \ 210 static inline int \
197 register_trace_##name(void (*probe)(data_proto), \ 211 register_trace_##name(void (*probe)(data_proto), \
198 void *data) \ 212 void *data) \
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a9ce45e8501..7d9a9e990ce 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
157void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 157void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
158void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 158void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
159 void *key); 159 void *key);
160void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); 160void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
161void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 161void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
162void __wake_up_bit(wait_queue_head_t *, void *, int); 162void __wake_up_bit(wait_queue_head_t *, void *, int);
163int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 163int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
170#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 170#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
171#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 171#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
172#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 172#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
173#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL) 173#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
174#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
174 175
175#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 176#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
176#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 177#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index eb8b9f15f2e..af155450cab 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -289,12 +289,16 @@ enum {
289 * 289 *
290 * system_freezable_wq is equivalent to system_wq except that it's 290 * system_freezable_wq is equivalent to system_wq except that it's
291 * freezable. 291 * freezable.
292 *
293 * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
294 * it's freezable.
292 */ 295 */
293extern struct workqueue_struct *system_wq; 296extern struct workqueue_struct *system_wq;
294extern struct workqueue_struct *system_long_wq; 297extern struct workqueue_struct *system_long_wq;
295extern struct workqueue_struct *system_nrt_wq; 298extern struct workqueue_struct *system_nrt_wq;
296extern struct workqueue_struct *system_unbound_wq; 299extern struct workqueue_struct *system_unbound_wq;
297extern struct workqueue_struct *system_freezable_wq; 300extern struct workqueue_struct *system_freezable_wq;
301extern struct workqueue_struct *system_nrt_freezable_wq;
298 302
299extern struct workqueue_struct * 303extern struct workqueue_struct *
300__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 304__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,