aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/completion.h8
-rw-r--r--include/linux/dynamic_debug.h18
-rw-r--r--include/linux/hrtimer.h33
-rw-r--r--include/linux/init_task.h18
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/module.h11
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/rculist.h5
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rcutiny.h13
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/sched.h47
-rw-r--r--include/linux/sfi.h8
-rw-r--r--include/linux/timer.h32
-rw-r--r--include/linux/timerqueue.h50
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/trace/define_trace.h10
-rw-r--r--include/trace/events/skb.h4
19 files changed, 211 insertions, 74 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 36d57f74cd01..51494e6b5548 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -81,10 +81,10 @@ extern int wait_for_completion_interruptible(struct completion *x);
81extern int wait_for_completion_killable(struct completion *x); 81extern int wait_for_completion_killable(struct completion *x);
82extern unsigned long wait_for_completion_timeout(struct completion *x, 82extern unsigned long wait_for_completion_timeout(struct completion *x,
83 unsigned long timeout); 83 unsigned long timeout);
84extern unsigned long wait_for_completion_interruptible_timeout( 84extern long wait_for_completion_interruptible_timeout(
85 struct completion *x, unsigned long timeout); 85 struct completion *x, unsigned long timeout);
86extern unsigned long wait_for_completion_killable_timeout( 86extern long wait_for_completion_killable_timeout(
87 struct completion *x, unsigned long timeout); 87 struct completion *x, unsigned long timeout);
88extern bool try_wait_for_completion(struct completion *x); 88extern bool try_wait_for_completion(struct completion *x);
89extern bool completion_done(struct completion *x); 89extern bool completion_done(struct completion *x);
90 90
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a90b3892074a..1c70028f81f9 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -44,34 +44,24 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
44extern int ddebug_remove_module(const char *mod_name); 44extern int ddebug_remove_module(const char *mod_name);
45 45
46#define dynamic_pr_debug(fmt, ...) do { \ 46#define dynamic_pr_debug(fmt, ...) do { \
47 __label__ do_printk; \
48 __label__ out; \
49 static struct _ddebug descriptor \ 47 static struct _ddebug descriptor \
50 __used \ 48 __used \
51 __attribute__((section("__verbose"), aligned(8))) = \ 49 __attribute__((section("__verbose"), aligned(8))) = \
52 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ 50 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
53 _DPRINTK_FLAGS_DEFAULT }; \ 51 _DPRINTK_FLAGS_DEFAULT }; \
54 JUMP_LABEL(&descriptor.enabled, do_printk); \ 52 if (unlikely(descriptor.enabled)) \
55 goto out; \ 53 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
56do_printk: \
57 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
58out: ; \
59 } while (0) 54 } while (0)
60 55
61 56
62#define dynamic_dev_dbg(dev, fmt, ...) do { \ 57#define dynamic_dev_dbg(dev, fmt, ...) do { \
63 __label__ do_printk; \
64 __label__ out; \
65 static struct _ddebug descriptor \ 58 static struct _ddebug descriptor \
66 __used \ 59 __used \
67 __attribute__((section("__verbose"), aligned(8))) = \ 60 __attribute__((section("__verbose"), aligned(8))) = \
68 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ 61 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
69 _DPRINTK_FLAGS_DEFAULT }; \ 62 _DPRINTK_FLAGS_DEFAULT }; \
70 JUMP_LABEL(&descriptor.enabled, do_printk); \ 63 if (unlikely(descriptor.enabled)) \
71 goto out; \ 64 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
72do_printk: \
73 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
74out: ; \
75 } while (0) 65 } while (0)
76 66
77#else 67#else
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fd0c1b857d3d..330586ffffbb 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -22,7 +22,7 @@
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/timer.h> 24#include <linux/timer.h>
25 25#include <linux/timerqueue.h>
26 26
27struct hrtimer_clock_base; 27struct hrtimer_clock_base;
28struct hrtimer_cpu_base; 28struct hrtimer_cpu_base;
@@ -79,8 +79,8 @@ enum hrtimer_restart {
79 79
80/** 80/**
81 * struct hrtimer - the basic hrtimer structure 81 * struct hrtimer - the basic hrtimer structure
82 * @node: red black tree node for time ordered insertion 82 * @node: timerqueue node, which also manages node.expires,
83 * @_expires: the absolute expiry time in the hrtimers internal 83 * the absolute expiry time in the hrtimers internal
84 * representation. The time is related to the clock on 84 * representation. The time is related to the clock on
85 * which the timer is based. Is setup by adding 85 * which the timer is based. Is setup by adding
86 * slack to the _softexpires value. For non range timers 86 * slack to the _softexpires value. For non range timers
@@ -101,8 +101,7 @@ enum hrtimer_restart {
101 * The hrtimer structure must be initialized by hrtimer_init() 101 * The hrtimer structure must be initialized by hrtimer_init()
102 */ 102 */
103struct hrtimer { 103struct hrtimer {
104 struct rb_node node; 104 struct timerqueue_node node;
105 ktime_t _expires;
106 ktime_t _softexpires; 105 ktime_t _softexpires;
107 enum hrtimer_restart (*function)(struct hrtimer *); 106 enum hrtimer_restart (*function)(struct hrtimer *);
108 struct hrtimer_clock_base *base; 107 struct hrtimer_clock_base *base;
@@ -141,8 +140,7 @@ struct hrtimer_sleeper {
141struct hrtimer_clock_base { 140struct hrtimer_clock_base {
142 struct hrtimer_cpu_base *cpu_base; 141 struct hrtimer_cpu_base *cpu_base;
143 clockid_t index; 142 clockid_t index;
144 struct rb_root active; 143 struct timerqueue_head active;
145 struct rb_node *first;
146 ktime_t resolution; 144 ktime_t resolution;
147 ktime_t (*get_time)(void); 145 ktime_t (*get_time)(void);
148 ktime_t softirq_time; 146 ktime_t softirq_time;
@@ -158,7 +156,6 @@ struct hrtimer_clock_base {
158 * @lock: lock protecting the base and associated clock bases 156 * @lock: lock protecting the base and associated clock bases
159 * and timers 157 * and timers
160 * @clock_base: array of clock bases for this cpu 158 * @clock_base: array of clock bases for this cpu
161 * @curr_timer: the timer which is executing a callback right now
162 * @expires_next: absolute time of the next event which was scheduled 159 * @expires_next: absolute time of the next event which was scheduled
163 * via clock_set_next_event() 160 * via clock_set_next_event()
164 * @hres_active: State of high resolution mode 161 * @hres_active: State of high resolution mode
@@ -184,43 +181,43 @@ struct hrtimer_cpu_base {
184 181
185static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) 182static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
186{ 183{
187 timer->_expires = time; 184 timer->node.expires = time;
188 timer->_softexpires = time; 185 timer->_softexpires = time;
189} 186}
190 187
191static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) 188static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
192{ 189{
193 timer->_softexpires = time; 190 timer->_softexpires = time;
194 timer->_expires = ktime_add_safe(time, delta); 191 timer->node.expires = ktime_add_safe(time, delta);
195} 192}
196 193
197static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) 194static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
198{ 195{
199 timer->_softexpires = time; 196 timer->_softexpires = time;
200 timer->_expires = ktime_add_safe(time, ns_to_ktime(delta)); 197 timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
201} 198}
202 199
203static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) 200static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
204{ 201{
205 timer->_expires.tv64 = tv64; 202 timer->node.expires.tv64 = tv64;
206 timer->_softexpires.tv64 = tv64; 203 timer->_softexpires.tv64 = tv64;
207} 204}
208 205
209static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) 206static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
210{ 207{
211 timer->_expires = ktime_add_safe(timer->_expires, time); 208 timer->node.expires = ktime_add_safe(timer->node.expires, time);
212 timer->_softexpires = ktime_add_safe(timer->_softexpires, time); 209 timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
213} 210}
214 211
215static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) 212static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
216{ 213{
217 timer->_expires = ktime_add_ns(timer->_expires, ns); 214 timer->node.expires = ktime_add_ns(timer->node.expires, ns);
218 timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); 215 timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
219} 216}
220 217
221static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) 218static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
222{ 219{
223 return timer->_expires; 220 return timer->node.expires;
224} 221}
225 222
226static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) 223static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
@@ -230,7 +227,7 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
230 227
231static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) 228static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
232{ 229{
233 return timer->_expires.tv64; 230 return timer->node.expires.tv64;
234} 231}
235static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) 232static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
236{ 233{
@@ -239,12 +236,12 @@ static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
239 236
240static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) 237static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
241{ 238{
242 return ktime_to_ns(timer->_expires); 239 return ktime_to_ns(timer->node.expires);
243} 240}
244 241
245static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) 242static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
246{ 243{
247 return ktime_sub(timer->_expires, timer->base->get_time()); 244 return ktime_sub(timer->node.expires, timer->base->get_time());
248} 245}
249 246
250#ifdef CONFIG_HIGH_RES_TIMERS 247#ifdef CONFIG_HIGH_RES_TIMERS
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1f8c06ce0fa6..caa151fbebb7 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -12,6 +12,13 @@
12#include <linux/securebits.h> 12#include <linux/securebits.h>
13#include <net/net_namespace.h> 13#include <net/net_namespace.h>
14 14
15#ifdef CONFIG_SMP
16# define INIT_PUSHABLE_TASKS(tsk) \
17 .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
18#else
19# define INIT_PUSHABLE_TASKS(tsk)
20#endif
21
15extern struct files_struct init_files; 22extern struct files_struct init_files;
16extern struct fs_struct init_fs; 23extern struct fs_struct init_fs;
17 24
@@ -83,6 +90,12 @@ extern struct group_info init_groups;
83 */ 90 */
84# define CAP_INIT_BSET CAP_FULL_SET 91# define CAP_INIT_BSET CAP_FULL_SET
85 92
93#ifdef CONFIG_RCU_BOOST
94#define INIT_TASK_RCU_BOOST() \
95 .rcu_boost_mutex = NULL,
96#else
97#define INIT_TASK_RCU_BOOST()
98#endif
86#ifdef CONFIG_TREE_PREEMPT_RCU 99#ifdef CONFIG_TREE_PREEMPT_RCU
87#define INIT_TASK_RCU_TREE_PREEMPT() \ 100#define INIT_TASK_RCU_TREE_PREEMPT() \
88 .rcu_blocked_node = NULL, 101 .rcu_blocked_node = NULL,
@@ -94,7 +107,8 @@ extern struct group_info init_groups;
94 .rcu_read_lock_nesting = 0, \ 107 .rcu_read_lock_nesting = 0, \
95 .rcu_read_unlock_special = 0, \ 108 .rcu_read_unlock_special = 0, \
96 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 109 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
97 INIT_TASK_RCU_TREE_PREEMPT() 110 INIT_TASK_RCU_TREE_PREEMPT() \
111 INIT_TASK_RCU_BOOST()
98#else 112#else
99#define INIT_TASK_RCU_PREEMPT(tsk) 113#define INIT_TASK_RCU_PREEMPT(tsk)
100#endif 114#endif
@@ -137,7 +151,7 @@ extern struct cred init_cred;
137 .nr_cpus_allowed = NR_CPUS, \ 151 .nr_cpus_allowed = NR_CPUS, \
138 }, \ 152 }, \
139 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 153 .tasks = LIST_HEAD_INIT(tsk.tasks), \
140 .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \ 154 INIT_PUSHABLE_TASKS(tsk) \
141 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ 155 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
142 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ 156 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
143 .real_parent = &tsk, \ 157 .real_parent = &tsk, \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 79d0c4f6d071..55e0d4253e49 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -114,15 +114,15 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
114struct irqaction { 114struct irqaction {
115 irq_handler_t handler; 115 irq_handler_t handler;
116 unsigned long flags; 116 unsigned long flags;
117 const char *name;
118 void *dev_id; 117 void *dev_id;
119 struct irqaction *next; 118 struct irqaction *next;
120 int irq; 119 int irq;
121 struct proc_dir_entry *dir;
122 irq_handler_t thread_fn; 120 irq_handler_t thread_fn;
123 struct task_struct *thread; 121 struct task_struct *thread;
124 unsigned long thread_flags; 122 unsigned long thread_flags;
125}; 123 const char *name;
124 struct proc_dir_entry *dir;
125} ____cacheline_internodealigned_in_smp;
126 126
127extern irqreturn_t no_action(int cpl, void *dev_id); 127extern irqreturn_t no_action(int cpl, void *dev_id);
128 128
diff --git a/include/linux/module.h b/include/linux/module.h
index 7575bbbdf2a2..8b17fd8c790d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -308,6 +308,9 @@ struct module
308 /* The size of the executable code in each section. */ 308 /* The size of the executable code in each section. */
309 unsigned int init_text_size, core_text_size; 309 unsigned int init_text_size, core_text_size;
310 310
311 /* Size of RO sections of the module (text+rodata) */
312 unsigned int init_ro_size, core_ro_size;
313
311 /* Arch-specific module values */ 314 /* Arch-specific module values */
312 struct mod_arch_specific arch; 315 struct mod_arch_specific arch;
313 316
@@ -672,7 +675,6 @@ static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
672{ 675{
673 return 0; 676 return 0;
674} 677}
675
676#endif /* CONFIG_MODULES */ 678#endif /* CONFIG_MODULES */
677 679
678#ifdef CONFIG_SYSFS 680#ifdef CONFIG_SYSFS
@@ -687,6 +689,13 @@ extern int module_sysfs_initialized;
687 689
688#define __MODULE_STRING(x) __stringify(x) 690#define __MODULE_STRING(x) __stringify(x)
689 691
692#ifdef CONFIG_DEBUG_SET_MODULE_RONX
693extern void set_all_modules_text_rw(void);
694extern void set_all_modules_text_ro(void);
695#else
696static inline void set_all_modules_text_rw(void) { }
697static inline void set_all_modules_text_ro(void) { }
698#endif
690 699
691#ifdef CONFIG_GENERIC_BUG 700#ifdef CONFIG_GENERIC_BUG
692void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, 701void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f363bc8fdc74..94b48bd40dd7 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -160,4 +160,8 @@ extern int mutex_trylock(struct mutex *lock);
160extern void mutex_unlock(struct mutex *lock); 160extern void mutex_unlock(struct mutex *lock);
161extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 161extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
162 162
163#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
164#define arch_mutex_cpu_relax() cpu_relax()
165#endif
166
163#endif 167#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index f31ef61f1c65..2dea94fc4402 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -241,11 +241,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
241#define list_first_entry_rcu(ptr, type, member) \ 241#define list_first_entry_rcu(ptr, type, member) \
242 list_entry_rcu((ptr)->next, type, member) 242 list_entry_rcu((ptr)->next, type, member)
243 243
244#define __list_for_each_rcu(pos, head) \
245 for (pos = rcu_dereference_raw(list_next_rcu(head)); \
246 pos != (head); \
247 pos = rcu_dereference_raw(list_next_rcu((pos)))
248
249/** 244/**
250 * list_for_each_entry_rcu - iterate over rcu list of given type 245 * list_for_each_entry_rcu - iterate over rcu list of given type
251 * @pos: the type * to use as a loop cursor. 246 * @pos: the type * to use as a loop cursor.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 03cda7bed985..af5614856285 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,6 +47,8 @@
47extern int rcutorture_runnable; /* for sysctl */ 47extern int rcutorture_runnable; /* for sysctl */
48#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 48#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
49 49
50#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
51#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b))
50#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) 52#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
51#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) 53#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
52 54
@@ -66,7 +68,6 @@ extern void call_rcu_sched(struct rcu_head *head,
66extern void synchronize_sched(void); 68extern void synchronize_sched(void);
67extern void rcu_barrier_bh(void); 69extern void rcu_barrier_bh(void);
68extern void rcu_barrier_sched(void); 70extern void rcu_barrier_sched(void);
69extern void synchronize_sched_expedited(void);
70extern int sched_expedited_torture_stats(char *page); 71extern int sched_expedited_torture_stats(char *page);
71 72
72static inline void __rcu_read_lock_bh(void) 73static inline void __rcu_read_lock_bh(void)
@@ -118,7 +119,6 @@ static inline int rcu_preempt_depth(void)
118#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 119#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
119 120
120/* Internal to kernel */ 121/* Internal to kernel */
121extern void rcu_init(void);
122extern void rcu_sched_qs(int cpu); 122extern void rcu_sched_qs(int cpu);
123extern void rcu_bh_qs(int cpu); 123extern void rcu_bh_qs(int cpu);
124extern void rcu_check_callbacks(int cpu, int user); 124extern void rcu_check_callbacks(int cpu, int user);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 13877cb93a60..30ebd7c8d874 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,7 +27,9 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30#define rcu_init_sched() do { } while (0) 30static inline void rcu_init(void)
31{
32}
31 33
32#ifdef CONFIG_TINY_RCU 34#ifdef CONFIG_TINY_RCU
33 35
@@ -58,6 +60,11 @@ static inline void synchronize_rcu_bh_expedited(void)
58 synchronize_sched(); 60 synchronize_sched();
59} 61}
60 62
63static inline void synchronize_sched_expedited(void)
64{
65 synchronize_sched();
66}
67
61#ifdef CONFIG_TINY_RCU 68#ifdef CONFIG_TINY_RCU
62 69
63static inline void rcu_preempt_note_context_switch(void) 70static inline void rcu_preempt_note_context_switch(void)
@@ -125,16 +132,12 @@ static inline void rcu_cpu_stall_reset(void)
125} 132}
126 133
127#ifdef CONFIG_DEBUG_LOCK_ALLOC 134#ifdef CONFIG_DEBUG_LOCK_ALLOC
128
129extern int rcu_scheduler_active __read_mostly; 135extern int rcu_scheduler_active __read_mostly;
130extern void rcu_scheduler_starting(void); 136extern void rcu_scheduler_starting(void);
131
132#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 137#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
133
134static inline void rcu_scheduler_starting(void) 138static inline void rcu_scheduler_starting(void)
135{ 139{
136} 140}
137
138#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 141#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
139 142
140#endif /* __LINUX_RCUTINY_H */ 143#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 95518e628794..3a933482734a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,6 +30,7 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33extern void rcu_init(void);
33extern void rcu_note_context_switch(int cpu); 34extern void rcu_note_context_switch(int cpu);
34extern int rcu_needs_cpu(int cpu); 35extern int rcu_needs_cpu(int cpu);
35extern void rcu_cpu_stall_reset(void); 36extern void rcu_cpu_stall_reset(void);
@@ -47,6 +48,7 @@ static inline void exit_rcu(void)
47#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 48#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
48 49
49extern void synchronize_rcu_bh(void); 50extern void synchronize_rcu_bh(void);
51extern void synchronize_sched_expedited(void);
50extern void synchronize_rcu_expedited(void); 52extern void synchronize_rcu_expedited(void);
51 53
52static inline void synchronize_rcu_bh_expedited(void) 54static inline void synchronize_rcu_bh_expedited(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a99d735db3df..777cd01e240e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -513,6 +513,8 @@ struct thread_group_cputimer {
513 spinlock_t lock; 513 spinlock_t lock;
514}; 514};
515 515
516struct autogroup;
517
516/* 518/*
517 * NOTE! "signal_struct" does not have it's own 519 * NOTE! "signal_struct" does not have it's own
518 * locking, because a shared signal_struct always 520 * locking, because a shared signal_struct always
@@ -580,6 +582,9 @@ struct signal_struct {
580 582
581 struct tty_struct *tty; /* NULL if no tty */ 583 struct tty_struct *tty; /* NULL if no tty */
582 584
585#ifdef CONFIG_SCHED_AUTOGROUP
586 struct autogroup *autogroup;
587#endif
583 /* 588 /*
584 * Cumulative resource counters for dead threads in the group, 589 * Cumulative resource counters for dead threads in the group,
585 * and for reaped dead child processes forked by this group. 590 * and for reaped dead child processes forked by this group.
@@ -1233,13 +1238,18 @@ struct task_struct {
1233#ifdef CONFIG_TREE_PREEMPT_RCU 1238#ifdef CONFIG_TREE_PREEMPT_RCU
1234 struct rcu_node *rcu_blocked_node; 1239 struct rcu_node *rcu_blocked_node;
1235#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1240#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1241#ifdef CONFIG_RCU_BOOST
1242 struct rt_mutex *rcu_boost_mutex;
1243#endif /* #ifdef CONFIG_RCU_BOOST */
1236 1244
1237#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1245#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1238 struct sched_info sched_info; 1246 struct sched_info sched_info;
1239#endif 1247#endif
1240 1248
1241 struct list_head tasks; 1249 struct list_head tasks;
1250#ifdef CONFIG_SMP
1242 struct plist_node pushable_tasks; 1251 struct plist_node pushable_tasks;
1252#endif
1243 1253
1244 struct mm_struct *mm, *active_mm; 1254 struct mm_struct *mm, *active_mm;
1245#if defined(SPLIT_RSS_COUNTING) 1255#if defined(SPLIT_RSS_COUNTING)
@@ -1763,7 +1773,8 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1763#ifdef CONFIG_PREEMPT_RCU 1773#ifdef CONFIG_PREEMPT_RCU
1764 1774
1765#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1775#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1766#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1776#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1777#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1767 1778
1768static inline void rcu_copy_process(struct task_struct *p) 1779static inline void rcu_copy_process(struct task_struct *p)
1769{ 1780{
@@ -1771,7 +1782,10 @@ static inline void rcu_copy_process(struct task_struct *p)
1771 p->rcu_read_unlock_special = 0; 1782 p->rcu_read_unlock_special = 0;
1772#ifdef CONFIG_TREE_PREEMPT_RCU 1783#ifdef CONFIG_TREE_PREEMPT_RCU
1773 p->rcu_blocked_node = NULL; 1784 p->rcu_blocked_node = NULL;
1774#endif 1785#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1786#ifdef CONFIG_RCU_BOOST
1787 p->rcu_boost_mutex = NULL;
1788#endif /* #ifdef CONFIG_RCU_BOOST */
1775 INIT_LIST_HEAD(&p->rcu_node_entry); 1789 INIT_LIST_HEAD(&p->rcu_node_entry);
1776} 1790}
1777 1791
@@ -1876,14 +1890,11 @@ extern void sched_clock_idle_sleep_event(void);
1876extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1890extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1877 1891
1878#ifdef CONFIG_HOTPLUG_CPU 1892#ifdef CONFIG_HOTPLUG_CPU
1879extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1880extern void idle_task_exit(void); 1893extern void idle_task_exit(void);
1881#else 1894#else
1882static inline void idle_task_exit(void) {} 1895static inline void idle_task_exit(void) {}
1883#endif 1896#endif
1884 1897
1885extern void sched_idle_next(void);
1886
1887#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 1898#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1888extern void wake_up_idle_cpu(int cpu); 1899extern void wake_up_idle_cpu(int cpu);
1889#else 1900#else
@@ -1893,8 +1904,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
1893extern unsigned int sysctl_sched_latency; 1904extern unsigned int sysctl_sched_latency;
1894extern unsigned int sysctl_sched_min_granularity; 1905extern unsigned int sysctl_sched_min_granularity;
1895extern unsigned int sysctl_sched_wakeup_granularity; 1906extern unsigned int sysctl_sched_wakeup_granularity;
1896extern unsigned int sysctl_sched_shares_ratelimit;
1897extern unsigned int sysctl_sched_shares_thresh;
1898extern unsigned int sysctl_sched_child_runs_first; 1907extern unsigned int sysctl_sched_child_runs_first;
1899 1908
1900enum sched_tunable_scaling { 1909enum sched_tunable_scaling {
@@ -1910,6 +1919,7 @@ extern unsigned int sysctl_sched_migration_cost;
1910extern unsigned int sysctl_sched_nr_migrate; 1919extern unsigned int sysctl_sched_nr_migrate;
1911extern unsigned int sysctl_sched_time_avg; 1920extern unsigned int sysctl_sched_time_avg;
1912extern unsigned int sysctl_timer_migration; 1921extern unsigned int sysctl_timer_migration;
1922extern unsigned int sysctl_sched_shares_window;
1913 1923
1914int sched_proc_update_handler(struct ctl_table *table, int write, 1924int sched_proc_update_handler(struct ctl_table *table, int write,
1915 void __user *buffer, size_t *length, 1925 void __user *buffer, size_t *length,
@@ -1935,6 +1945,24 @@ int sched_rt_handler(struct ctl_table *table, int write,
1935 1945
1936extern unsigned int sysctl_sched_compat_yield; 1946extern unsigned int sysctl_sched_compat_yield;
1937 1947
1948#ifdef CONFIG_SCHED_AUTOGROUP
1949extern unsigned int sysctl_sched_autogroup_enabled;
1950
1951extern void sched_autogroup_create_attach(struct task_struct *p);
1952extern void sched_autogroup_detach(struct task_struct *p);
1953extern void sched_autogroup_fork(struct signal_struct *sig);
1954extern void sched_autogroup_exit(struct signal_struct *sig);
1955#ifdef CONFIG_PROC_FS
1956extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1957extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
1958#endif
1959#else
1960static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1961static inline void sched_autogroup_detach(struct task_struct *p) { }
1962static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1963static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1964#endif
1965
1938#ifdef CONFIG_RT_MUTEXES 1966#ifdef CONFIG_RT_MUTEXES
1939extern int rt_mutex_getprio(struct task_struct *p); 1967extern int rt_mutex_getprio(struct task_struct *p);
1940extern void rt_mutex_setprio(struct task_struct *p, int prio); 1968extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -1953,9 +1981,10 @@ extern int task_nice(const struct task_struct *p);
1953extern int can_nice(const struct task_struct *p, const int nice); 1981extern int can_nice(const struct task_struct *p, const int nice);
1954extern int task_curr(const struct task_struct *p); 1982extern int task_curr(const struct task_struct *p);
1955extern int idle_cpu(int cpu); 1983extern int idle_cpu(int cpu);
1956extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1984extern int sched_setscheduler(struct task_struct *, int,
1985 const struct sched_param *);
1957extern int sched_setscheduler_nocheck(struct task_struct *, int, 1986extern int sched_setscheduler_nocheck(struct task_struct *, int,
1958 struct sched_param *); 1987 const struct sched_param *);
1959extern struct task_struct *idle_task(int cpu); 1988extern struct task_struct *idle_task(int cpu);
1960extern struct task_struct *curr_task(int cpu); 1989extern struct task_struct *curr_task(int cpu);
1961extern void set_curr_task(int cpu, struct task_struct *p); 1990extern void set_curr_task(int cpu, struct task_struct *p);
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index 7f770c638e99..fe817918b30e 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -77,6 +77,8 @@
77#define SFI_OEM_ID_SIZE 6 77#define SFI_OEM_ID_SIZE 6
78#define SFI_OEM_TABLE_ID_SIZE 8 78#define SFI_OEM_TABLE_ID_SIZE 8
79 79
80#define SFI_NAME_LEN 16
81
80#define SFI_SYST_SEARCH_BEGIN 0x000E0000 82#define SFI_SYST_SEARCH_BEGIN 0x000E0000
81#define SFI_SYST_SEARCH_END 0x000FFFFF 83#define SFI_SYST_SEARCH_END 0x000FFFFF
82 84
@@ -156,13 +158,13 @@ struct sfi_device_table_entry {
156 u16 addr; 158 u16 addr;
157 u8 irq; 159 u8 irq;
158 u32 max_freq; 160 u32 max_freq;
159 char name[16]; 161 char name[SFI_NAME_LEN];
160} __packed; 162} __packed;
161 163
162struct sfi_gpio_table_entry { 164struct sfi_gpio_table_entry {
163 char controller_name[16]; 165 char controller_name[SFI_NAME_LEN];
164 u16 pin_no; 166 u16 pin_no;
165 char pin_name[16]; 167 char pin_name[SFI_NAME_LEN];
166} __packed; 168} __packed;
167 169
168typedef int (*sfi_table_handler) (struct sfi_table_header *table); 170typedef int (*sfi_table_handler) (struct sfi_table_header *table);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 38cf093ef62c..6abd9138beda 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -24,9 +24,9 @@ struct timer_list {
24 int slack; 24 int slack;
25 25
26#ifdef CONFIG_TIMER_STATS 26#ifdef CONFIG_TIMER_STATS
27 int start_pid;
27 void *start_site; 28 void *start_site;
28 char start_comm[16]; 29 char start_comm[16];
29 int start_pid;
30#endif 30#endif
31#ifdef CONFIG_LOCKDEP 31#ifdef CONFIG_LOCKDEP
32 struct lockdep_map lockdep_map; 32 struct lockdep_map lockdep_map;
@@ -48,12 +48,38 @@ extern struct tvec_base boot_tvec_bases;
48#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) 48#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
49#endif 49#endif
50 50
51/*
52 * Note that all tvec_bases are 2 byte aligned and lower bit of
53 * base in timer_list is guaranteed to be zero. Use the LSB to
54 * indicate whether the timer is deferrable.
55 *
56 * A deferrable timer will work normally when the system is busy, but
57 * will not cause a CPU to come out of idle just to service it; instead,
58 * the timer will be serviced when the CPU eventually wakes up with a
59 * subsequent non-deferrable timer.
60 */
61#define TBASE_DEFERRABLE_FLAG (0x1)
62
51#define TIMER_INITIALIZER(_function, _expires, _data) { \ 63#define TIMER_INITIALIZER(_function, _expires, _data) { \
52 .entry = { .prev = TIMER_ENTRY_STATIC }, \ 64 .entry = { .prev = TIMER_ENTRY_STATIC }, \
53 .function = (_function), \ 65 .function = (_function), \
54 .expires = (_expires), \ 66 .expires = (_expires), \
55 .data = (_data), \ 67 .data = (_data), \
56 .base = &boot_tvec_bases, \ 68 .base = &boot_tvec_bases, \
69 .slack = -1, \
70 __TIMER_LOCKDEP_MAP_INITIALIZER( \
71 __FILE__ ":" __stringify(__LINE__)) \
72 }
73
74#define TBASE_MAKE_DEFERRED(ptr) ((struct tvec_base *) \
75 ((unsigned char *)(ptr) + TBASE_DEFERRABLE_FLAG))
76
77#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) {\
78 .entry = { .prev = TIMER_ENTRY_STATIC }, \
79 .function = (_function), \
80 .expires = (_expires), \
81 .data = (_data), \
82 .base = TBASE_MAKE_DEFERRED(&boot_tvec_bases), \
57 __TIMER_LOCKDEP_MAP_INITIALIZER( \ 83 __TIMER_LOCKDEP_MAP_INITIALIZER( \
58 __FILE__ ":" __stringify(__LINE__)) \ 84 __FILE__ ":" __stringify(__LINE__)) \
59 } 85 }
@@ -248,11 +274,11 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
248 274
249extern void add_timer(struct timer_list *timer); 275extern void add_timer(struct timer_list *timer);
250 276
277extern int try_to_del_timer_sync(struct timer_list *timer);
278
251#ifdef CONFIG_SMP 279#ifdef CONFIG_SMP
252 extern int try_to_del_timer_sync(struct timer_list *timer);
253 extern int del_timer_sync(struct timer_list *timer); 280 extern int del_timer_sync(struct timer_list *timer);
254#else 281#else
255# define try_to_del_timer_sync(t) del_timer(t)
256# define del_timer_sync(t) del_timer(t) 282# define del_timer_sync(t) del_timer(t)
257#endif 283#endif
258 284
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
new file mode 100644
index 000000000000..d24aabaca474
--- /dev/null
+++ b/include/linux/timerqueue.h
@@ -0,0 +1,50 @@
1#ifndef _LINUX_TIMERQUEUE_H
2#define _LINUX_TIMERQUEUE_H
3
4#include <linux/rbtree.h>
5#include <linux/ktime.h>
6
7
8struct timerqueue_node {
9 struct rb_node node;
10 ktime_t expires;
11};
12
13struct timerqueue_head {
14 struct rb_root head;
15 struct timerqueue_node *next;
16};
17
18
19extern void timerqueue_add(struct timerqueue_head *head,
20 struct timerqueue_node *node);
21extern void timerqueue_del(struct timerqueue_head *head,
22 struct timerqueue_node *node);
23extern struct timerqueue_node *timerqueue_iterate_next(
24 struct timerqueue_node *node);
25
26/**
27 * timerqueue_getnext - Returns the timer with the earlies expiration time
28 *
29 * @head: head of timerqueue
30 *
31 * Returns a pointer to the timer node that has the
32 * earliest expiration time.
33 */
34static inline
35struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
36{
37 return head->next;
38}
39
40static inline void timerqueue_init(struct timerqueue_node *node)
41{
42 RB_CLEAR_NODE(&node->node);
43}
44
45static inline void timerqueue_init_head(struct timerqueue_head *head)
46{
47 head->head = RB_ROOT;
48 head->next = NULL;
49}
50#endif /* _LINUX_TIMERQUEUE_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d3e4f87e95c0..c6814616653b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -32,7 +32,7 @@ struct tracepoint {
32 int state; /* State. */ 32 int state; /* State. */
33 void (*regfunc)(void); 33 void (*regfunc)(void);
34 void (*unregfunc)(void); 34 void (*unregfunc)(void);
35 struct tracepoint_func *funcs; 35 struct tracepoint_func __rcu *funcs;
36} __attribute__((aligned(32))); /* 36} __attribute__((aligned(32))); /*
37 * Aligned on 32 bytes because it is 37 * Aligned on 32 bytes because it is
38 * globally visible and gcc happily 38 * globally visible and gcc happily
@@ -326,7 +326,7 @@ do_trace: \
326 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); 326 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
327 * __entry->next_pid = next->pid; 327 * __entry->next_pid = next->pid;
328 * __entry->next_prio = next->prio; 328 * __entry->next_prio = next->prio;
329 * ) 329 * ),
330 * 330 *
331 * * 331 * *
332 * * Formatted output of a trace record via TP_printk(). 332 * * Formatted output of a trace record via TP_printk().
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0c0771f06bfa..bd257fee6031 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -127,12 +127,20 @@ struct execute_work {
127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
128 } 128 }
129 129
130#define __DEFERRED_WORK_INITIALIZER(n, f) { \
131 .work = __WORK_INITIALIZER((n).work, (f)), \
132 .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
133 }
134
130#define DECLARE_WORK(n, f) \ 135#define DECLARE_WORK(n, f) \
131 struct work_struct n = __WORK_INITIALIZER(n, f) 136 struct work_struct n = __WORK_INITIALIZER(n, f)
132 137
133#define DECLARE_DELAYED_WORK(n, f) \ 138#define DECLARE_DELAYED_WORK(n, f) \
134 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 139 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
135 140
141#define DECLARE_DEFERRED_WORK(n, f) \
142 struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
143
136/* 144/*
137 * initialize a work item's function pointer 145 * initialize a work item's function pointer
138 */ 146 */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index b0b4eb24d592..da39b22636f7 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,6 +21,16 @@
21#undef CREATE_TRACE_POINTS 21#undef CREATE_TRACE_POINTS
22 22
23#include <linux/stringify.h> 23#include <linux/stringify.h>
24/*
25 * module.h includes tracepoints, and because ftrace.h
26 * pulls in module.h:
27 * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
28 * linux/ftrace.h -> linux/module.h
29 * we must include module.h here before we play with any of
30 * the TRACE_EVENT() macros, otherwise the tracepoints included
31 * by module.h may break the build.
32 */
33#include <linux/module.h>
24 34
25#undef TRACE_EVENT 35#undef TRACE_EVENT
26#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 36#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 75ce9d500d8e..f10293c41b1e 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -25,9 +25,7 @@ TRACE_EVENT(kfree_skb,
25 25
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->skbaddr = skb; 27 __entry->skbaddr = skb;
28 if (skb) { 28 __entry->protocol = ntohs(skb->protocol);
29 __entry->protocol = ntohs(skb->protocol);
30 }
31 __entry->location = location; 29 __entry->location = location;
32 ), 30 ),
33 31