aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/clocksource.h7
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/iocontext.h10
-rw-r--r--include/linux/kernel.h28
-rw-r--r--include/linux/math64.h4
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/rcupdate.h83
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/sched.h54
-rw-r--r--include/linux/srcu.h15
-rw-r--r--include/linux/timex.h17
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/workqueue.h4
18 files changed, 218 insertions, 61 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 081147da0564..fbe89e17124e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -319,13 +319,6 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
319 __clocksource_updatefreq_scale(cs, 1000, khz); 319 __clocksource_updatefreq_scale(cs, 1000, khz);
320} 320}
321 321
322static inline void
323clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
324{
325 return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
326 NSEC_PER_SEC, minsec);
327}
328
329#ifdef CONFIG_GENERIC_TIME_VSYSCALL 322#ifdef CONFIG_GENERIC_TIME_VSYSCALL
330extern void 323extern void
331update_vsyscall(struct timespec *ts, struct timespec *wtm, 324update_vsyscall(struct timespec *ts, struct timespec *wtm,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index dd478fc8f9f5..5f3f3be5af09 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -144,12 +144,14 @@ struct event_filter;
144enum trace_reg { 144enum trace_reg {
145 TRACE_REG_REGISTER, 145 TRACE_REG_REGISTER,
146 TRACE_REG_UNREGISTER, 146 TRACE_REG_UNREGISTER,
147#ifdef CONFIG_PERF_EVENTS
147 TRACE_REG_PERF_REGISTER, 148 TRACE_REG_PERF_REGISTER,
148 TRACE_REG_PERF_UNREGISTER, 149 TRACE_REG_PERF_UNREGISTER,
149 TRACE_REG_PERF_OPEN, 150 TRACE_REG_PERF_OPEN,
150 TRACE_REG_PERF_CLOSE, 151 TRACE_REG_PERF_CLOSE,
151 TRACE_REG_PERF_ADD, 152 TRACE_REG_PERF_ADD,
152 TRACE_REG_PERF_DEL, 153 TRACE_REG_PERF_DEL,
154#endif
153}; 155};
154 156
155struct ftrace_event_call; 157struct ftrace_event_call;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index fe23ee768589..e61d3192448e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
596 596
597extern int disk_expand_part_tbl(struct gendisk *disk, int target); 597extern int disk_expand_part_tbl(struct gendisk *disk, int target);
598extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 598extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
599extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
599extern struct hd_struct * __must_check add_partition(struct gendisk *disk, 600extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
600 int partno, sector_t start, 601 int partno, sector_t start,
601 sector_t len, int flags, 602 sector_t len, int flags,
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9c66b1ada9d7..f994d51f70f2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -149,7 +149,7 @@ extern struct cred init_cred;
149 }, \ 149 }, \
150 .rt = { \ 150 .rt = { \
151 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 151 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
152 .time_slice = HZ, \ 152 .time_slice = RR_TIMESLICE, \
153 .nr_cpus_allowed = NR_CPUS, \ 153 .nr_cpus_allowed = NR_CPUS, \
154 }, \ 154 }, \
155 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 155 .tasks = LIST_HEAD_INIT(tsk.tasks), \
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 119773eebe31..1a3018063034 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,8 +6,11 @@
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7 7
8enum { 8enum {
9 ICQ_IOPRIO_CHANGED, 9 ICQ_IOPRIO_CHANGED = 1 << 0,
10 ICQ_CGROUP_CHANGED, 10 ICQ_CGROUP_CHANGED = 1 << 1,
11 ICQ_EXITED = 1 << 2,
12
13 ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
11}; 14};
12 15
13/* 16/*
@@ -88,7 +91,7 @@ struct io_cq {
88 struct rcu_head __rcu_head; 91 struct rcu_head __rcu_head;
89 }; 92 };
90 93
91 unsigned long changed; 94 unsigned int flags;
92}; 95};
93 96
94/* 97/*
@@ -139,6 +142,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
139 gfp_t gfp_flags, int node); 142 gfp_t gfp_flags, int node);
140void ioc_ioprio_changed(struct io_context *ioc, int ioprio); 143void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc); 144void ioc_cgroup_changed(struct io_context *ioc);
145unsigned int icq_get_changed(struct io_cq *icq);
142#else 146#else
143struct io_context; 147struct io_context;
144static inline void put_io_context(struct io_context *ioc) { } 148static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8343422240a..5582c7985567 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -85,6 +85,19 @@
85} \ 85} \
86) 86)
87 87
88/*
89 * Multiplies an integer by a fraction, while avoiding unnecessary
90 * overflow or loss of precision.
91 */
92#define mult_frac(x, numer, denom)( \
93{ \
94 typeof(x) quot = (x) / (denom); \
95 typeof(x) rem = (x) % (denom); \
96 (quot * (numer)) + ((rem * (numer)) / (denom)); \
97} \
98)
99
100
88#define _RET_IP_ (unsigned long)__builtin_return_address(0) 101#define _RET_IP_ (unsigned long)__builtin_return_address(0)
89#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) 102#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
90 103
@@ -414,16 +427,10 @@ extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
414 * Most likely, you want to use tracing_on/tracing_off. 427 * Most likely, you want to use tracing_on/tracing_off.
415 */ 428 */
416#ifdef CONFIG_RING_BUFFER 429#ifdef CONFIG_RING_BUFFER
417void tracing_on(void);
418void tracing_off(void);
419/* trace_off_permanent stops recording with no way to bring it back */ 430/* trace_off_permanent stops recording with no way to bring it back */
420void tracing_off_permanent(void); 431void tracing_off_permanent(void);
421int tracing_is_on(void);
422#else 432#else
423static inline void tracing_on(void) { }
424static inline void tracing_off(void) { }
425static inline void tracing_off_permanent(void) { } 433static inline void tracing_off_permanent(void) { }
426static inline int tracing_is_on(void) { return 0; }
427#endif 434#endif
428 435
429enum ftrace_dump_mode { 436enum ftrace_dump_mode {
@@ -433,6 +440,10 @@ enum ftrace_dump_mode {
433}; 440};
434 441
435#ifdef CONFIG_TRACING 442#ifdef CONFIG_TRACING
443void tracing_on(void);
444void tracing_off(void);
445int tracing_is_on(void);
446
436extern void tracing_start(void); 447extern void tracing_start(void);
437extern void tracing_stop(void); 448extern void tracing_stop(void);
438extern void ftrace_off_permanent(void); 449extern void ftrace_off_permanent(void);
@@ -517,6 +528,11 @@ static inline void tracing_start(void) { }
517static inline void tracing_stop(void) { } 528static inline void tracing_stop(void) { }
518static inline void ftrace_off_permanent(void) { } 529static inline void ftrace_off_permanent(void) { }
519static inline void trace_dump_stack(void) { } 530static inline void trace_dump_stack(void) { }
531
532static inline void tracing_on(void) { }
533static inline void tracing_off(void) { }
534static inline int tracing_is_on(void) { return 0; }
535
520static inline int 536static inline int
521trace_printk(const char *fmt, ...) 537trace_printk(const char *fmt, ...)
522{ 538{
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 23fcdfcba81b..b8ba85544721 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -6,6 +6,8 @@
6 6
7#if BITS_PER_LONG == 64 7#if BITS_PER_LONG == 64
8 8
9#define div64_long(x,y) div64_s64((x),(y))
10
9/** 11/**
10 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder 12 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
11 * 13 *
@@ -45,6 +47,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
45 47
46#elif BITS_PER_LONG == 32 48#elif BITS_PER_LONG == 32
47 49
50#define div64_long(x,y) div_s64((x),(y))
51
48#ifndef div_u64_rem 52#ifndef div_u64_rem
49static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 53static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
50{ 54{
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 58969b2a8a82..5a710b9c578e 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -48,12 +48,14 @@ do { \
48 barrier(); \ 48 barrier(); \
49} while (0) 49} while (0)
50 50
51#define preempt_enable_no_resched() \ 51#define sched_preempt_enable_no_resched() \
52do { \ 52do { \
53 barrier(); \ 53 barrier(); \
54 dec_preempt_count(); \ 54 dec_preempt_count(); \
55} while (0) 55} while (0)
56 56
57#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
58
57#define preempt_enable() \ 59#define preempt_enable() \
58do { \ 60do { \
59 preempt_enable_no_resched(); \ 61 preempt_enable_no_resched(); \
@@ -92,6 +94,7 @@ do { \
92#else /* !CONFIG_PREEMPT_COUNT */ 94#else /* !CONFIG_PREEMPT_COUNT */
93 95
94#define preempt_disable() do { } while (0) 96#define preempt_disable() do { } while (0)
97#define sched_preempt_enable_no_resched() do { } while (0)
95#define preempt_enable_no_resched() do { } while (0) 98#define preempt_enable_no_resched() do { } while (0)
96#define preempt_enable() do { } while (0) 99#define preempt_enable() do { } while (0)
97 100
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f0e22f75143f..1f77a4174ee0 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -101,6 +101,11 @@ asmlinkage __printf(1, 2) __cold
101int printk(const char *fmt, ...); 101int printk(const char *fmt, ...);
102 102
103/* 103/*
104 * Special printk facility for scheduler use only, _DO_NOT_USE_ !
105 */
106__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
107
108/*
104 * Please don't use printk_ratelimit(), because it shares ratelimiting state 109 * Please don't use printk_ratelimit(), because it shares ratelimiting state
105 * with all other unrelated printk_ratelimit() callsites. Instead use 110 * with all other unrelated printk_ratelimit() callsites. Instead use
106 * printk_ratelimited() or plain old __ratelimit(). 111 * printk_ratelimited() or plain old __ratelimit().
@@ -127,6 +132,11 @@ int printk(const char *s, ...)
127{ 132{
128 return 0; 133 return 0;
129} 134}
135static inline __printf(1, 2) __cold
136int printk_sched(const char *s, ...)
137{
138 return 0;
139}
130static inline int printk_ratelimit(void) 140static inline int printk_ratelimit(void)
131{ 141{
132 return 0; 142 return 0;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 81c04f4348ec..937217425c47 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -190,6 +190,33 @@ extern void rcu_idle_exit(void);
190extern void rcu_irq_enter(void); 190extern void rcu_irq_enter(void);
191extern void rcu_irq_exit(void); 191extern void rcu_irq_exit(void);
192 192
193/**
194 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
195 * @a: Code that RCU needs to pay attention to.
196 *
197 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
198 * in the inner idle loop, that is, between the rcu_idle_enter() and
199 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
200 * critical sections. However, things like powertop need tracepoints
201 * in the inner idle loop.
202 *
203 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
204 * will tell RCU that it needs to pay attending, invoke its argument
205 * (in this example, a call to the do_something_with_RCU() function),
206 * and then tell RCU to go back to ignoring this CPU. It is permissible
207 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
208 * quite limited. If deeper nesting is required, it will be necessary
209 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
210 *
211 * This macro may be used from process-level code only.
212 */
213#define RCU_NONIDLE(a) \
214 do { \
215 rcu_idle_exit(); \
216 do { a; } while (0); \
217 rcu_idle_enter(); \
218 } while (0)
219
193/* 220/*
194 * Infrastructure to implement the synchronize_() primitives in 221 * Infrastructure to implement the synchronize_() primitives in
195 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 222 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -226,6 +253,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
226} 253}
227#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 254#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
228 255
256#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
257bool rcu_lockdep_current_cpu_online(void);
258#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
259static inline bool rcu_lockdep_current_cpu_online(void)
260{
261 return 1;
262}
263#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
264
229#ifdef CONFIG_DEBUG_LOCK_ALLOC 265#ifdef CONFIG_DEBUG_LOCK_ALLOC
230 266
231#ifdef CONFIG_PROVE_RCU 267#ifdef CONFIG_PROVE_RCU
@@ -239,13 +275,11 @@ static inline int rcu_is_cpu_idle(void)
239 275
240static inline void rcu_lock_acquire(struct lockdep_map *map) 276static inline void rcu_lock_acquire(struct lockdep_map *map)
241{ 277{
242 WARN_ON_ONCE(rcu_is_cpu_idle());
243 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); 278 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
244} 279}
245 280
246static inline void rcu_lock_release(struct lockdep_map *map) 281static inline void rcu_lock_release(struct lockdep_map *map)
247{ 282{
248 WARN_ON_ONCE(rcu_is_cpu_idle());
249 lock_release(map, 1, _THIS_IP_); 283 lock_release(map, 1, _THIS_IP_);
250} 284}
251 285
@@ -270,6 +304,9 @@ extern int debug_lockdep_rcu_enabled(void);
270 * occur in the same context, for example, it is illegal to invoke 304 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock() 305 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler. 306 * was invoked from within an irq handler.
307 *
308 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
309 * offline from an RCU perspective, so check for those as well.
273 */ 310 */
274static inline int rcu_read_lock_held(void) 311static inline int rcu_read_lock_held(void)
275{ 312{
@@ -277,6 +314,8 @@ static inline int rcu_read_lock_held(void)
277 return 1; 314 return 1;
278 if (rcu_is_cpu_idle()) 315 if (rcu_is_cpu_idle())
279 return 0; 316 return 0;
317 if (!rcu_lockdep_current_cpu_online())
318 return 0;
280 return lock_is_held(&rcu_lock_map); 319 return lock_is_held(&rcu_lock_map);
281} 320}
282 321
@@ -313,6 +352,9 @@ extern int rcu_read_lock_bh_held(void);
313 * notice an extended quiescent state to other CPUs that started a grace 352 * notice an extended quiescent state to other CPUs that started a grace
314 * period. Otherwise we would delay any grace period as long as we run in 353 * period. Otherwise we would delay any grace period as long as we run in
315 * the idle task. 354 * the idle task.
355 *
356 * Similarly, we avoid claiming an SRCU read lock held if the current
357 * CPU is offline.
316 */ 358 */
317#ifdef CONFIG_PREEMPT_COUNT 359#ifdef CONFIG_PREEMPT_COUNT
318static inline int rcu_read_lock_sched_held(void) 360static inline int rcu_read_lock_sched_held(void)
@@ -323,6 +365,8 @@ static inline int rcu_read_lock_sched_held(void)
323 return 1; 365 return 1;
324 if (rcu_is_cpu_idle()) 366 if (rcu_is_cpu_idle())
325 return 0; 367 return 0;
368 if (!rcu_lockdep_current_cpu_online())
369 return 0;
326 if (debug_locks) 370 if (debug_locks)
327 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 371 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
328 return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); 372 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -381,8 +425,22 @@ extern int rcu_my_thread_group_empty(void);
381 } \ 425 } \
382 } while (0) 426 } while (0)
383 427
428#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
429static inline void rcu_preempt_sleep_check(void)
430{
431 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
432 "Illegal context switch in RCU read-side "
433 "critical section");
434}
435#else /* #ifdef CONFIG_PROVE_RCU */
436static inline void rcu_preempt_sleep_check(void)
437{
438}
439#endif /* #else #ifdef CONFIG_PROVE_RCU */
440
384#define rcu_sleep_check() \ 441#define rcu_sleep_check() \
385 do { \ 442 do { \
443 rcu_preempt_sleep_check(); \
386 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ 444 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
387 "Illegal context switch in RCU-bh" \ 445 "Illegal context switch in RCU-bh" \
388 " read-side critical section"); \ 446 " read-side critical section"); \
@@ -470,6 +528,13 @@ extern int rcu_my_thread_group_empty(void);
470 * NULL. Although rcu_access_pointer() may also be used in cases where 528 * NULL. Although rcu_access_pointer() may also be used in cases where
471 * update-side locks prevent the value of the pointer from changing, you 529 * update-side locks prevent the value of the pointer from changing, you
472 * should instead use rcu_dereference_protected() for this use case. 530 * should instead use rcu_dereference_protected() for this use case.
531 *
532 * It is also permissible to use rcu_access_pointer() when read-side
533 * access to the pointer was removed at least one grace period ago, as
534 * is the case in the context of the RCU callback that is freeing up
535 * the data, or after a synchronize_rcu() returns. This can be useful
536 * when tearing down multi-linked structures after a grace period
537 * has elapsed.
473 */ 538 */
474#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 539#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
475 540
@@ -659,6 +724,8 @@ static inline void rcu_read_lock(void)
659 __rcu_read_lock(); 724 __rcu_read_lock();
660 __acquire(RCU); 725 __acquire(RCU);
661 rcu_lock_acquire(&rcu_lock_map); 726 rcu_lock_acquire(&rcu_lock_map);
727 rcu_lockdep_assert(!rcu_is_cpu_idle(),
728 "rcu_read_lock() used illegally while idle");
662} 729}
663 730
664/* 731/*
@@ -678,6 +745,8 @@ static inline void rcu_read_lock(void)
678 */ 745 */
679static inline void rcu_read_unlock(void) 746static inline void rcu_read_unlock(void)
680{ 747{
748 rcu_lockdep_assert(!rcu_is_cpu_idle(),
749 "rcu_read_unlock() used illegally while idle");
681 rcu_lock_release(&rcu_lock_map); 750 rcu_lock_release(&rcu_lock_map);
682 __release(RCU); 751 __release(RCU);
683 __rcu_read_unlock(); 752 __rcu_read_unlock();
@@ -705,6 +774,8 @@ static inline void rcu_read_lock_bh(void)
705 local_bh_disable(); 774 local_bh_disable();
706 __acquire(RCU_BH); 775 __acquire(RCU_BH);
707 rcu_lock_acquire(&rcu_bh_lock_map); 776 rcu_lock_acquire(&rcu_bh_lock_map);
777 rcu_lockdep_assert(!rcu_is_cpu_idle(),
778 "rcu_read_lock_bh() used illegally while idle");
708} 779}
709 780
710/* 781/*
@@ -714,6 +785,8 @@ static inline void rcu_read_lock_bh(void)
714 */ 785 */
715static inline void rcu_read_unlock_bh(void) 786static inline void rcu_read_unlock_bh(void)
716{ 787{
788 rcu_lockdep_assert(!rcu_is_cpu_idle(),
789 "rcu_read_unlock_bh() used illegally while idle");
717 rcu_lock_release(&rcu_bh_lock_map); 790 rcu_lock_release(&rcu_bh_lock_map);
718 __release(RCU_BH); 791 __release(RCU_BH);
719 local_bh_enable(); 792 local_bh_enable();
@@ -737,6 +810,8 @@ static inline void rcu_read_lock_sched(void)
737 preempt_disable(); 810 preempt_disable();
738 __acquire(RCU_SCHED); 811 __acquire(RCU_SCHED);
739 rcu_lock_acquire(&rcu_sched_lock_map); 812 rcu_lock_acquire(&rcu_sched_lock_map);
813 rcu_lockdep_assert(!rcu_is_cpu_idle(),
814 "rcu_read_lock_sched() used illegally while idle");
740} 815}
741 816
742/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 817/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -753,6 +828,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
753 */ 828 */
754static inline void rcu_read_unlock_sched(void) 829static inline void rcu_read_unlock_sched(void)
755{ 830{
831 rcu_lockdep_assert(!rcu_is_cpu_idle(),
832 "rcu_read_unlock_sched() used illegally while idle");
756 rcu_lock_release(&rcu_sched_lock_map); 833 rcu_lock_release(&rcu_sched_lock_map);
757 __release(RCU_SCHED); 834 __release(RCU_SCHED);
758 preempt_enable(); 835 preempt_enable();
@@ -841,7 +918,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
841 /* See the kfree_rcu() header comment. */ 918 /* See the kfree_rcu() header comment. */
842 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); 919 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
843 920
844 call_rcu(head, (rcu_callback)offset); 921 kfree_call_rcu(head, (rcu_callback)offset);
845} 922}
846 923
847/** 924/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 00b7a5e493d2..e93df77176d1 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,13 +27,9 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30#ifdef CONFIG_RCU_BOOST
31static inline void rcu_init(void) 30static inline void rcu_init(void)
32{ 31{
33} 32}
34#else /* #ifdef CONFIG_RCU_BOOST */
35void rcu_init(void);
36#endif /* #else #ifdef CONFIG_RCU_BOOST */
37 33
38static inline void rcu_barrier_bh(void) 34static inline void rcu_barrier_bh(void)
39{ 35{
@@ -83,6 +79,12 @@ static inline void synchronize_sched_expedited(void)
83 synchronize_sched(); 79 synchronize_sched();
84} 80}
85 81
82static inline void kfree_call_rcu(struct rcu_head *head,
83 void (*func)(struct rcu_head *rcu))
84{
85 call_rcu(head, func);
86}
87
86#ifdef CONFIG_TINY_RCU 88#ifdef CONFIG_TINY_RCU
87 89
88static inline void rcu_preempt_note_context_switch(void) 90static inline void rcu_preempt_note_context_switch(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 67458468f1a8..e8ee5dd0854c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -61,6 +61,24 @@ extern void synchronize_rcu_bh(void);
61extern void synchronize_sched_expedited(void); 61extern void synchronize_sched_expedited(void);
62extern void synchronize_rcu_expedited(void); 62extern void synchronize_rcu_expedited(void);
63 63
64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
65
66/**
67 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
68 *
69 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
70 * approach to force the grace period to end quickly. This consumes
71 * significant time on all CPUs and is unfriendly to real-time workloads,
72 * so is thus not recommended for any sort of common-case code. In fact,
73 * if you are using synchronize_rcu_bh_expedited() in a loop, please
74 * restructure your code to batch your updates, and then use a single
75 * synchronize_rcu_bh() instead.
76 *
77 * Note that it is illegal to call this function while holding any lock
78 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
79 * to call this function from a CPU-hotplug notifier. Failing to observe
80 * these restriction will result in deadlock.
81 */
64static inline void synchronize_rcu_bh_expedited(void) 82static inline void synchronize_rcu_bh_expedited(void)
65{ 83{
66 synchronize_sched_expedited(); 84 synchronize_sched_expedited();
@@ -83,6 +101,7 @@ extern void rcu_sched_force_quiescent_state(void);
83/* A context switch is a grace period for RCU-sched and RCU-bh. */ 101/* A context switch is a grace period for RCU-sched and RCU-bh. */
84static inline int rcu_blocking_is_gp(void) 102static inline int rcu_blocking_is_gp(void)
85{ 103{
104 might_sleep(); /* Check for RCU read-side critical section. */
86 return num_online_cpus() == 1; 105 return num_online_cpus() == 1;
87} 106}
88 107
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 67be0376d8e3..7be2e88f23fd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
151 151
152void ring_buffer_record_disable(struct ring_buffer *buffer); 152void ring_buffer_record_disable(struct ring_buffer *buffer);
153void ring_buffer_record_enable(struct ring_buffer *buffer); 153void ring_buffer_record_enable(struct ring_buffer *buffer);
154void ring_buffer_record_off(struct ring_buffer *buffer);
155void ring_buffer_record_on(struct ring_buffer *buffer);
156int ring_buffer_record_is_on(struct ring_buffer *buffer);
154void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 157void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
155void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 158void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
156 159
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0657368bd78f..e074e1e54f85 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
361extern signed long schedule_timeout_killable(signed long timeout); 361extern signed long schedule_timeout_killable(signed long timeout);
362extern signed long schedule_timeout_uninterruptible(signed long timeout); 362extern signed long schedule_timeout_uninterruptible(signed long timeout);
363asmlinkage void schedule(void); 363asmlinkage void schedule(void);
364extern void schedule_preempt_disabled(void);
364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 365extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 366
366struct nsproxy; 367struct nsproxy;
@@ -905,6 +906,7 @@ struct sched_group_power {
905 * single CPU. 906 * single CPU.
906 */ 907 */
907 unsigned int power, power_orig; 908 unsigned int power, power_orig;
909 unsigned long next_update;
908 /* 910 /*
909 * Number of busy cpus in this group. 911 * Number of busy cpus in this group.
910 */ 912 */
@@ -1052,6 +1054,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
1052unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1054unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1053unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1055unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1054 1056
1057bool cpus_share_cache(int this_cpu, int that_cpu);
1058
1055#else /* CONFIG_SMP */ 1059#else /* CONFIG_SMP */
1056 1060
1057struct sched_domain_attr; 1061struct sched_domain_attr;
@@ -1061,6 +1065,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1061 struct sched_domain_attr *dattr_new) 1065 struct sched_domain_attr *dattr_new)
1062{ 1066{
1063} 1067}
1068
1069static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1070{
1071 return true;
1072}
1073
1064#endif /* !CONFIG_SMP */ 1074#endif /* !CONFIG_SMP */
1065 1075
1066 1076
@@ -1225,6 +1235,12 @@ struct sched_rt_entity {
1225#endif 1235#endif
1226}; 1236};
1227 1237
1238/*
1239 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1240 * Timeslices get refilled after they expire.
1241 */
1242#define RR_TIMESLICE (100 * HZ / 1000)
1243
1228struct rcu_node; 1244struct rcu_node;
1229 1245
1230enum perf_event_task_context { 1246enum perf_event_task_context {
@@ -1319,6 +1335,11 @@ struct task_struct {
1319 unsigned sched_reset_on_fork:1; 1335 unsigned sched_reset_on_fork:1;
1320 unsigned sched_contributes_to_load:1; 1336 unsigned sched_contributes_to_load:1;
1321 1337
1338#ifdef CONFIG_GENERIC_HARDIRQS
1339 /* IRQ handler threads */
1340 unsigned irq_thread:1;
1341#endif
1342
1322 pid_t pid; 1343 pid_t pid;
1323 pid_t tgid; 1344 pid_t tgid;
1324 1345
@@ -1427,11 +1448,6 @@ struct task_struct {
1427 * mempolicy */ 1448 * mempolicy */
1428 spinlock_t alloc_lock; 1449 spinlock_t alloc_lock;
1429 1450
1430#ifdef CONFIG_GENERIC_HARDIRQS
1431 /* IRQ handler threads */
1432 struct irqaction *irqaction;
1433#endif
1434
1435 /* Protection of the PI data structures: */ 1451 /* Protection of the PI data structures: */
1436 raw_spinlock_t pi_lock; 1452 raw_spinlock_t pi_lock;
1437 1453
@@ -1863,8 +1879,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
1863#ifdef CONFIG_PREEMPT_RCU 1879#ifdef CONFIG_PREEMPT_RCU
1864 1880
1865#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1881#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1866#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1882#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1867#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1868 1883
1869static inline void rcu_copy_process(struct task_struct *p) 1884static inline void rcu_copy_process(struct task_struct *p)
1870{ 1885{
@@ -2048,7 +2063,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
2048extern void sched_autogroup_exit(struct signal_struct *sig); 2063extern void sched_autogroup_exit(struct signal_struct *sig);
2049#ifdef CONFIG_PROC_FS 2064#ifdef CONFIG_PROC_FS
2050extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2065extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2051extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2066extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2052#endif 2067#endif
2053#else 2068#else
2054static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2069static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2065,12 +2080,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2065extern int rt_mutex_getprio(struct task_struct *p); 2080extern int rt_mutex_getprio(struct task_struct *p);
2066extern void rt_mutex_setprio(struct task_struct *p, int prio); 2081extern void rt_mutex_setprio(struct task_struct *p, int prio);
2067extern void rt_mutex_adjust_pi(struct task_struct *p); 2082extern void rt_mutex_adjust_pi(struct task_struct *p);
2083static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2084{
2085 return tsk->pi_blocked_on != NULL;
2086}
2068#else 2087#else
2069static inline int rt_mutex_getprio(struct task_struct *p) 2088static inline int rt_mutex_getprio(struct task_struct *p)
2070{ 2089{
2071 return p->normal_prio; 2090 return p->normal_prio;
2072} 2091}
2073# define rt_mutex_adjust_pi(p) do { } while (0) 2092# define rt_mutex_adjust_pi(p) do { } while (0)
2093static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2094{
2095 return false;
2096}
2074#endif 2097#endif
2075 2098
2076extern bool yield_to(struct task_struct *p, bool preempt); 2099extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2389,12 +2412,15 @@ static inline void task_unlock(struct task_struct *p)
2389extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2412extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2390 unsigned long *flags); 2413 unsigned long *flags);
2391 2414
2392#define lock_task_sighand(tsk, flags) \ 2415static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2393({ struct sighand_struct *__ss; \ 2416 unsigned long *flags)
2394 __cond_lock(&(tsk)->sighand->siglock, \ 2417{
2395 (__ss = __lock_task_sighand(tsk, flags))); \ 2418 struct sighand_struct *ret;
2396 __ss; \ 2419
2397}) \ 2420 ret = __lock_task_sighand(tsk, flags);
2421 (void)__cond_lock(&tsk->sighand->siglock, ret);
2422 return ret;
2423}
2398 2424
2399static inline void unlock_task_sighand(struct task_struct *tsk, 2425static inline void unlock_task_sighand(struct task_struct *tsk,
2400 unsigned long *flags) 2426 unsigned long *flags)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e1b005918bbb..d3d5fa54f25e 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp);
99 * power mode. This way we can notice an extended quiescent state to 99 * power mode. This way we can notice an extended quiescent state to
100 * other CPUs that started a grace period. Otherwise we would delay any 100 * other CPUs that started a grace period. Otherwise we would delay any
101 * grace period as long as we run in the idle task. 101 * grace period as long as we run in the idle task.
102 *
103 * Similarly, we avoid claiming an SRCU read lock held if the current
104 * CPU is offline.
102 */ 105 */
103static inline int srcu_read_lock_held(struct srcu_struct *sp) 106static inline int srcu_read_lock_held(struct srcu_struct *sp)
104{ 107{
105 if (rcu_is_cpu_idle())
106 return 0;
107
108 if (!debug_lockdep_rcu_enabled()) 108 if (!debug_lockdep_rcu_enabled())
109 return 1; 109 return 1;
110 110 if (rcu_is_cpu_idle())
111 return 0;
112 if (!rcu_lockdep_current_cpu_online())
113 return 0;
111 return lock_is_held(&sp->dep_map); 114 return lock_is_held(&sp->dep_map);
112} 115}
113 116
@@ -169,6 +172,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
169 int retval = __srcu_read_lock(sp); 172 int retval = __srcu_read_lock(sp);
170 173
171 rcu_lock_acquire(&(sp)->dep_map); 174 rcu_lock_acquire(&(sp)->dep_map);
175 rcu_lockdep_assert(!rcu_is_cpu_idle(),
176 "srcu_read_lock() used illegally while idle");
172 return retval; 177 return retval;
173} 178}
174 179
@@ -182,6 +187,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
182static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 187static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
183 __releases(sp) 188 __releases(sp)
184{ 189{
190 rcu_lockdep_assert(!rcu_is_cpu_idle(),
191 "srcu_read_unlock() used illegally while idle");
185 rcu_lock_release(&(sp)->dep_map); 192 rcu_lock_release(&(sp)->dep_map);
186 __srcu_read_unlock(sp, idx); 193 __srcu_read_unlock(sp, idx);
187} 194}
diff --git a/include/linux/timex.h b/include/linux/timex.h
index aa60fe7b6ed6..b75e1864ed19 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -234,23 +234,9 @@ struct timex {
234extern unsigned long tick_usec; /* USER_HZ period (usec) */ 234extern unsigned long tick_usec; /* USER_HZ period (usec) */
235extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ 235extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
236 236
237/*
238 * phase-lock loop variables
239 */
240extern int time_status; /* clock synchronization status bits */
241
242extern void ntp_init(void); 237extern void ntp_init(void);
243extern void ntp_clear(void); 238extern void ntp_clear(void);
244 239
245/**
246 * ntp_synced - Returns 1 if the NTP status is not UNSYNC
247 *
248 */
249static inline int ntp_synced(void)
250{
251 return !(time_status & STA_UNSYNC);
252}
253
254/* Required to safely shift negative values */ 240/* Required to safely shift negative values */
255#define shift_right(x, s) ({ \ 241#define shift_right(x, s) ({ \
256 __typeof__(x) __x = (x); \ 242 __typeof__(x) __x = (x); \
@@ -264,10 +250,9 @@ static inline int ntp_synced(void)
264#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) 250#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
265 251
266/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ 252/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
267extern u64 tick_length; 253extern u64 ntp_tick_length(void);
268 254
269extern void second_overflow(void); 255extern void second_overflow(void);
270extern void update_ntp_one_tick(void);
271extern int do_adjtimex(struct timex *); 256extern int do_adjtimex(struct timex *);
272extern void hardpps(const struct timespec *, const struct timespec *); 257extern void hardpps(const struct timespec *, const struct timespec *);
273 258
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a9ce45e8501c..7d9a9e990ce6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
157void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 157void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
158void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 158void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
159 void *key); 159 void *key);
160void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); 160void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
161void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 161void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
162void __wake_up_bit(wait_queue_head_t *, void *, int); 162void __wake_up_bit(wait_queue_head_t *, void *, int);
163int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 163int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
170#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 170#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
171#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 171#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
172#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 172#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
173#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL) 173#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
174#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
174 175
175#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 176#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
176#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 177#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index eb8b9f15f2e0..af155450cabb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -289,12 +289,16 @@ enum {
289 * 289 *
290 * system_freezable_wq is equivalent to system_wq except that it's 290 * system_freezable_wq is equivalent to system_wq except that it's
291 * freezable. 291 * freezable.
292 *
293 * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
294 * it's freezable.
292 */ 295 */
293extern struct workqueue_struct *system_wq; 296extern struct workqueue_struct *system_wq;
294extern struct workqueue_struct *system_long_wq; 297extern struct workqueue_struct *system_long_wq;
295extern struct workqueue_struct *system_nrt_wq; 298extern struct workqueue_struct *system_nrt_wq;
296extern struct workqueue_struct *system_unbound_wq; 299extern struct workqueue_struct *system_unbound_wq;
297extern struct workqueue_struct *system_freezable_wq; 300extern struct workqueue_struct *system_freezable_wq;
301extern struct workqueue_struct *system_nrt_freezable_wq;
298 302
299extern struct workqueue_struct * 303extern struct workqueue_struct *
300__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 304__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,