diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/cpu.h | 17 | ||||
-rw-r--r-- | include/linux/debug_locks.h | 5 | ||||
-rw-r--r-- | include/linux/futex.h | 6 | ||||
-rw-r--r-- | include/linux/hardirq.h | 6 | ||||
-rw-r--r-- | include/linux/hrtimer.h | 14 | ||||
-rw-r--r-- | include/linux/init_task.h | 7 | ||||
-rw-r--r-- | include/linux/interrupt.h | 1 | ||||
-rw-r--r-- | include/linux/jiffies.h | 6 | ||||
-rw-r--r-- | include/linux/kernel.h | 4 | ||||
-rw-r--r-- | include/linux/latencytop.h | 44 | ||||
-rw-r--r-- | include/linux/notifier.h | 4 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 164 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 173 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 86 | ||||
-rw-r--r-- | include/linux/rcupreempt_trace.h | 99 | ||||
-rw-r--r-- | include/linux/sched.h | 83 | ||||
-rw-r--r-- | include/linux/smp_lock.h | 14 | ||||
-rw-r--r-- | include/linux/stacktrace.h | 3 | ||||
-rw-r--r-- | include/linux/topology.h | 5 |
19 files changed, 581 insertions, 160 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 92f2029a34f3..0be8d65bc3c8 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -71,18 +71,27 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) | |||
71 | 71 | ||
72 | int cpu_up(unsigned int cpu); | 72 | int cpu_up(unsigned int cpu); |
73 | 73 | ||
74 | extern void cpu_hotplug_init(void); | ||
75 | |||
74 | #else | 76 | #else |
75 | 77 | ||
76 | static inline int register_cpu_notifier(struct notifier_block *nb) | 78 | static inline int register_cpu_notifier(struct notifier_block *nb) |
77 | { | 79 | { |
78 | return 0; | 80 | return 0; |
79 | } | 81 | } |
82 | |||
80 | static inline void unregister_cpu_notifier(struct notifier_block *nb) | 83 | static inline void unregister_cpu_notifier(struct notifier_block *nb) |
81 | { | 84 | { |
82 | } | 85 | } |
83 | 86 | ||
87 | static inline void cpu_hotplug_init(void) | ||
88 | { | ||
89 | } | ||
90 | |||
84 | #endif /* CONFIG_SMP */ | 91 | #endif /* CONFIG_SMP */ |
85 | extern struct sysdev_class cpu_sysdev_class; | 92 | extern struct sysdev_class cpu_sysdev_class; |
93 | extern void cpu_maps_update_begin(void); | ||
94 | extern void cpu_maps_update_done(void); | ||
86 | 95 | ||
87 | #ifdef CONFIG_HOTPLUG_CPU | 96 | #ifdef CONFIG_HOTPLUG_CPU |
88 | /* Stop CPUs going up and down. */ | 97 | /* Stop CPUs going up and down. */ |
@@ -97,8 +106,8 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | |||
97 | mutex_unlock(cpu_hp_mutex); | 106 | mutex_unlock(cpu_hp_mutex); |
98 | } | 107 | } |
99 | 108 | ||
100 | extern void lock_cpu_hotplug(void); | 109 | extern void get_online_cpus(void); |
101 | extern void unlock_cpu_hotplug(void); | 110 | extern void put_online_cpus(void); |
102 | #define hotcpu_notifier(fn, pri) { \ | 111 | #define hotcpu_notifier(fn, pri) { \ |
103 | static struct notifier_block fn##_nb = \ | 112 | static struct notifier_block fn##_nb = \ |
104 | { .notifier_call = fn, .priority = pri }; \ | 113 | { .notifier_call = fn, .priority = pri }; \ |
@@ -115,8 +124,8 @@ static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | |||
115 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | 124 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) |
116 | { } | 125 | { } |
117 | 126 | ||
118 | #define lock_cpu_hotplug() do { } while (0) | 127 | #define get_online_cpus() do { } while (0) |
119 | #define unlock_cpu_hotplug() do { } while (0) | 128 | #define put_online_cpus() do { } while (0) |
120 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | 129 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
121 | /* These aren't inline functions due to a GCC bug. */ | 130 | /* These aren't inline functions due to a GCC bug. */ |
122 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) | 131 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 1678a5de7013..f4a5871767f5 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -47,6 +47,7 @@ struct task_struct; | |||
47 | 47 | ||
48 | #ifdef CONFIG_LOCKDEP | 48 | #ifdef CONFIG_LOCKDEP |
49 | extern void debug_show_all_locks(void); | 49 | extern void debug_show_all_locks(void); |
50 | extern void __debug_show_held_locks(struct task_struct *task); | ||
50 | extern void debug_show_held_locks(struct task_struct *task); | 51 | extern void debug_show_held_locks(struct task_struct *task); |
51 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | 52 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); |
52 | extern void debug_check_no_locks_held(struct task_struct *task); | 53 | extern void debug_check_no_locks_held(struct task_struct *task); |
@@ -55,6 +56,10 @@ static inline void debug_show_all_locks(void) | |||
55 | { | 56 | { |
56 | } | 57 | } |
57 | 58 | ||
59 | static inline void __debug_show_held_locks(struct task_struct *task) | ||
60 | { | ||
61 | } | ||
62 | |||
58 | static inline void debug_show_held_locks(struct task_struct *task) | 63 | static inline void debug_show_held_locks(struct task_struct *task) |
59 | { | 64 | { |
60 | } | 65 | } |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 92d420fe03f8..1a15f8e237a7 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -1,8 +1,12 @@ | |||
1 | #ifndef _LINUX_FUTEX_H | 1 | #ifndef _LINUX_FUTEX_H |
2 | #define _LINUX_FUTEX_H | 2 | #define _LINUX_FUTEX_H |
3 | 3 | ||
4 | #include <linux/sched.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
7 | struct inode; | ||
8 | struct mm_struct; | ||
9 | struct task_struct; | ||
6 | union ktime; | 10 | union ktime; |
7 | 11 | ||
8 | /* Second argument to futex syscall */ | 12 | /* Second argument to futex syscall */ |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 8d302298a161..2961ec788046 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -72,11 +72,7 @@ | |||
72 | #define in_softirq() (softirq_count()) | 72 | #define in_softirq() (softirq_count()) |
73 | #define in_interrupt() (irq_count()) | 73 | #define in_interrupt() (irq_count()) |
74 | 74 | ||
75 | #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) | 75 | #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) |
76 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) | ||
77 | #else | ||
78 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) | ||
79 | #endif | ||
80 | 76 | ||
81 | #ifdef CONFIG_PREEMPT | 77 | #ifdef CONFIG_PREEMPT |
82 | # define PREEMPT_CHECK_OFFSET 1 | 78 | # define PREEMPT_CHECK_OFFSET 1 |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7a9398e19704..49067f14fac1 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -115,10 +115,8 @@ struct hrtimer { | |||
115 | enum hrtimer_restart (*function)(struct hrtimer *); | 115 | enum hrtimer_restart (*function)(struct hrtimer *); |
116 | struct hrtimer_clock_base *base; | 116 | struct hrtimer_clock_base *base; |
117 | unsigned long state; | 117 | unsigned long state; |
118 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
119 | enum hrtimer_cb_mode cb_mode; | 118 | enum hrtimer_cb_mode cb_mode; |
120 | struct list_head cb_entry; | 119 | struct list_head cb_entry; |
121 | #endif | ||
122 | #ifdef CONFIG_TIMER_STATS | 120 | #ifdef CONFIG_TIMER_STATS |
123 | void *start_site; | 121 | void *start_site; |
124 | char start_comm[16]; | 122 | char start_comm[16]; |
@@ -194,10 +192,10 @@ struct hrtimer_cpu_base { | |||
194 | spinlock_t lock; | 192 | spinlock_t lock; |
195 | struct lock_class_key lock_key; | 193 | struct lock_class_key lock_key; |
196 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 194 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
195 | struct list_head cb_pending; | ||
197 | #ifdef CONFIG_HIGH_RES_TIMERS | 196 | #ifdef CONFIG_HIGH_RES_TIMERS |
198 | ktime_t expires_next; | 197 | ktime_t expires_next; |
199 | int hres_active; | 198 | int hres_active; |
200 | struct list_head cb_pending; | ||
201 | unsigned long nr_events; | 199 | unsigned long nr_events; |
202 | #endif | 200 | #endif |
203 | }; | 201 | }; |
@@ -217,6 +215,11 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | |||
217 | return timer->base->get_time(); | 215 | return timer->base->get_time(); |
218 | } | 216 | } |
219 | 217 | ||
218 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
219 | { | ||
220 | return timer->base->cpu_base->hres_active; | ||
221 | } | ||
222 | |||
220 | /* | 223 | /* |
221 | * The resolution of the clocks. The resolution value is returned in | 224 | * The resolution of the clocks. The resolution value is returned in |
222 | * the clock_getres() system call to give application programmers an | 225 | * the clock_getres() system call to give application programmers an |
@@ -248,6 +251,10 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | |||
248 | return timer->base->softirq_time; | 251 | return timer->base->softirq_time; |
249 | } | 252 | } |
250 | 253 | ||
254 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
255 | { | ||
256 | return 0; | ||
257 | } | ||
251 | #endif | 258 | #endif |
252 | 259 | ||
253 | extern ktime_t ktime_get(void); | 260 | extern ktime_t ktime_get(void); |
@@ -310,6 +317,7 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, | |||
310 | 317 | ||
311 | /* Soft interrupt function to run the hrtimer queues: */ | 318 | /* Soft interrupt function to run the hrtimer queues: */ |
312 | extern void hrtimer_run_queues(void); | 319 | extern void hrtimer_run_queues(void); |
320 | extern void hrtimer_run_pending(void); | ||
313 | 321 | ||
314 | /* Bootup initialization: */ | 322 | /* Bootup initialization: */ |
315 | extern void __init hrtimers_init(void); | 323 | extern void __init hrtimers_init(void); |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index cae35b6b9aec..796019b22b6f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -132,9 +132,12 @@ extern struct group_info init_groups; | |||
132 | .cpus_allowed = CPU_MASK_ALL, \ | 132 | .cpus_allowed = CPU_MASK_ALL, \ |
133 | .mm = NULL, \ | 133 | .mm = NULL, \ |
134 | .active_mm = &init_mm, \ | 134 | .active_mm = &init_mm, \ |
135 | .run_list = LIST_HEAD_INIT(tsk.run_list), \ | 135 | .rt = { \ |
136 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ | ||
137 | .time_slice = HZ, \ | ||
138 | .nr_cpus_allowed = NR_CPUS, \ | ||
139 | }, \ | ||
136 | .ioprio = 0, \ | 140 | .ioprio = 0, \ |
137 | .time_slice = HZ, \ | ||
138 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 141 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
139 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ | 142 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ |
140 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ | 143 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2306920fa388..c3db4a00f1fa 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -256,6 +256,7 @@ enum | |||
256 | #ifdef CONFIG_HIGH_RES_TIMERS | 256 | #ifdef CONFIG_HIGH_RES_TIMERS |
257 | HRTIMER_SOFTIRQ, | 257 | HRTIMER_SOFTIRQ, |
258 | #endif | 258 | #endif |
259 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | ||
259 | }; | 260 | }; |
260 | 261 | ||
261 | /* softirq mask and active fields moved to irq_cpustat_t in | 262 | /* softirq mask and active fields moved to irq_cpustat_t in |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 8b080024bbc1..7ba9e47bf061 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -29,6 +29,12 @@ | |||
29 | # define SHIFT_HZ 9 | 29 | # define SHIFT_HZ 9 |
30 | #elif HZ >= 768 && HZ < 1536 | 30 | #elif HZ >= 768 && HZ < 1536 |
31 | # define SHIFT_HZ 10 | 31 | # define SHIFT_HZ 10 |
32 | #elif HZ >= 1536 && HZ < 3072 | ||
33 | # define SHIFT_HZ 11 | ||
34 | #elif HZ >= 3072 && HZ < 6144 | ||
35 | # define SHIFT_HZ 12 | ||
36 | #elif HZ >= 6144 && HZ < 12288 | ||
37 | # define SHIFT_HZ 13 | ||
32 | #else | 38 | #else |
33 | # error You lose. | 39 | # error You lose. |
34 | #endif | 40 | #endif |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 94bc99656963..a7283c9beadf 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -105,8 +105,8 @@ struct user; | |||
105 | * supposed to. | 105 | * supposed to. |
106 | */ | 106 | */ |
107 | #ifdef CONFIG_PREEMPT_VOLUNTARY | 107 | #ifdef CONFIG_PREEMPT_VOLUNTARY |
108 | extern int cond_resched(void); | 108 | extern int _cond_resched(void); |
109 | # define might_resched() cond_resched() | 109 | # define might_resched() _cond_resched() |
110 | #else | 110 | #else |
111 | # define might_resched() do { } while (0) | 111 | # define might_resched() do { } while (0) |
112 | #endif | 112 | #endif |
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h new file mode 100644 index 000000000000..901c2d6377a8 --- /dev/null +++ b/include/linux/latencytop.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * latencytop.h: Infrastructure for displaying latency | ||
3 | * | ||
4 | * (C) Copyright 2008 Intel Corporation | ||
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #ifndef _INCLUDE_GUARD_LATENCYTOP_H_ | ||
10 | #define _INCLUDE_GUARD_LATENCYTOP_H_ | ||
11 | |||
12 | #ifdef CONFIG_LATENCYTOP | ||
13 | |||
14 | #define LT_SAVECOUNT 32 | ||
15 | #define LT_BACKTRACEDEPTH 12 | ||
16 | |||
17 | struct latency_record { | ||
18 | unsigned long backtrace[LT_BACKTRACEDEPTH]; | ||
19 | unsigned int count; | ||
20 | unsigned long time; | ||
21 | unsigned long max; | ||
22 | }; | ||
23 | |||
24 | |||
25 | struct task_struct; | ||
26 | |||
27 | void account_scheduler_latency(struct task_struct *task, int usecs, int inter); | ||
28 | |||
29 | void clear_all_latency_tracing(struct task_struct *p); | ||
30 | |||
31 | #else | ||
32 | |||
33 | static inline void | ||
34 | account_scheduler_latency(struct task_struct *task, int usecs, int inter) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | static inline void clear_all_latency_tracing(struct task_struct *p) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | #endif | ||
43 | |||
44 | #endif | ||
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 0c40cc0b4a36..5dfbc684ce7d 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret) | |||
207 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ | 207 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ |
208 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | 208 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ |
209 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | 209 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ |
210 | #define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ | 210 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, |
211 | #define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ | ||
212 | #define CPU_DYING 0x000A /* CPU (unsigned)v not running any task, | ||
213 | * not handling interrupts, soon dead */ | 211 | * not handling interrupts, soon dead */ |
214 | 212 | ||
215 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | 213 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h new file mode 100644 index 000000000000..4d6624260b4c --- /dev/null +++ b/include/linux/rcuclassic.h | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (classic version) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2001 | ||
19 | * | ||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUCLASSIC_H | ||
34 | #define __LINUX_RCUCLASSIC_H | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/threads.h> | ||
41 | #include <linux/percpu.h> | ||
42 | #include <linux/cpumask.h> | ||
43 | #include <linux/seqlock.h> | ||
44 | |||
45 | |||
46 | /* Global control variables for rcupdate callback mechanism. */ | ||
47 | struct rcu_ctrlblk { | ||
48 | long cur; /* Current batch number. */ | ||
49 | long completed; /* Number of the last completed batch */ | ||
50 | int next_pending; /* Is the next batch already waiting? */ | ||
51 | |||
52 | int signaled; | ||
53 | |||
54 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
55 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
56 | /* for current batch to proceed. */ | ||
57 | } ____cacheline_internodealigned_in_smp; | ||
58 | |||
59 | /* Is batch a before batch b ? */ | ||
60 | static inline int rcu_batch_before(long a, long b) | ||
61 | { | ||
62 | return (a - b) < 0; | ||
63 | } | ||
64 | |||
65 | /* Is batch a after batch b ? */ | ||
66 | static inline int rcu_batch_after(long a, long b) | ||
67 | { | ||
68 | return (a - b) > 0; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Per-CPU data for Read-Copy UPdate. | ||
73 | * nxtlist - new callbacks are added here | ||
74 | * curlist - current batch for which quiescent cycle started if any | ||
75 | */ | ||
76 | struct rcu_data { | ||
77 | /* 1) quiescent state handling : */ | ||
78 | long quiescbatch; /* Batch # for grace period */ | ||
79 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
80 | int qs_pending; /* core waits for quiesc state */ | ||
81 | |||
82 | /* 2) batch handling */ | ||
83 | long batch; /* Batch # for current RCU batch */ | ||
84 | struct rcu_head *nxtlist; | ||
85 | struct rcu_head **nxttail; | ||
86 | long qlen; /* # of queued callbacks */ | ||
87 | struct rcu_head *curlist; | ||
88 | struct rcu_head **curtail; | ||
89 | struct rcu_head *donelist; | ||
90 | struct rcu_head **donetail; | ||
91 | long blimit; /* Upper limit on a processed batch */ | ||
92 | int cpu; | ||
93 | struct rcu_head barrier; | ||
94 | }; | ||
95 | |||
96 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
97 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
98 | |||
99 | /* | ||
100 | * Increment the quiescent state counter. | ||
101 | * The counter is a bit degenerated: We do not need to know | ||
102 | * how many quiescent states passed, just if there was at least | ||
103 | * one since the start of the grace period. Thus just a flag. | ||
104 | */ | ||
105 | static inline void rcu_qsctr_inc(int cpu) | ||
106 | { | ||
107 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
108 | rdp->passed_quiesc = 1; | ||
109 | } | ||
110 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
111 | { | ||
112 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
113 | rdp->passed_quiesc = 1; | ||
114 | } | ||
115 | |||
116 | extern int rcu_pending(int cpu); | ||
117 | extern int rcu_needs_cpu(int cpu); | ||
118 | |||
119 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
120 | extern struct lockdep_map rcu_lock_map; | ||
121 | # define rcu_read_acquire() \ | ||
122 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
123 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
124 | #else | ||
125 | # define rcu_read_acquire() do { } while (0) | ||
126 | # define rcu_read_release() do { } while (0) | ||
127 | #endif | ||
128 | |||
129 | #define __rcu_read_lock() \ | ||
130 | do { \ | ||
131 | preempt_disable(); \ | ||
132 | __acquire(RCU); \ | ||
133 | rcu_read_acquire(); \ | ||
134 | } while (0) | ||
135 | #define __rcu_read_unlock() \ | ||
136 | do { \ | ||
137 | rcu_read_release(); \ | ||
138 | __release(RCU); \ | ||
139 | preempt_enable(); \ | ||
140 | } while (0) | ||
141 | #define __rcu_read_lock_bh() \ | ||
142 | do { \ | ||
143 | local_bh_disable(); \ | ||
144 | __acquire(RCU_BH); \ | ||
145 | rcu_read_acquire(); \ | ||
146 | } while (0) | ||
147 | #define __rcu_read_unlock_bh() \ | ||
148 | do { \ | ||
149 | rcu_read_release(); \ | ||
150 | __release(RCU_BH); \ | ||
151 | local_bh_enable(); \ | ||
152 | } while (0) | ||
153 | |||
154 | #define __synchronize_sched() synchronize_rcu() | ||
155 | |||
156 | extern void __rcu_init(void); | ||
157 | extern void rcu_check_callbacks(int cpu, int user); | ||
158 | extern void rcu_restart_cpu(int cpu); | ||
159 | |||
160 | extern long rcu_batches_completed(void); | ||
161 | extern long rcu_batches_completed_bh(void); | ||
162 | |||
163 | #endif /* __KERNEL__ */ | ||
164 | #endif /* __LINUX_RCUCLASSIC_H */ | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index cc24a01df940..d32c14de270e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * | 17 | * |
18 | * Copyright (C) IBM Corporation, 2001 | 18 | * Copyright IBM Corporation, 2001 |
19 | * | 19 | * |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * | 21 | * |
@@ -53,96 +53,18 @@ struct rcu_head { | |||
53 | void (*func)(struct rcu_head *head); | 53 | void (*func)(struct rcu_head *head); |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #ifdef CONFIG_CLASSIC_RCU | ||
57 | #include <linux/rcuclassic.h> | ||
58 | #else /* #ifdef CONFIG_CLASSIC_RCU */ | ||
59 | #include <linux/rcupreempt.h> | ||
60 | #endif /* #else #ifdef CONFIG_CLASSIC_RCU */ | ||
61 | |||
56 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 62 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
57 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 63 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
58 | #define INIT_RCU_HEAD(ptr) do { \ | 64 | #define INIT_RCU_HEAD(ptr) do { \ |
59 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 65 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
60 | } while (0) | 66 | } while (0) |
61 | 67 | ||
62 | |||
63 | |||
64 | /* Global control variables for rcupdate callback mechanism. */ | ||
65 | struct rcu_ctrlblk { | ||
66 | long cur; /* Current batch number. */ | ||
67 | long completed; /* Number of the last completed batch */ | ||
68 | int next_pending; /* Is the next batch already waiting? */ | ||
69 | |||
70 | int signaled; | ||
71 | |||
72 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
73 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
74 | /* for current batch to proceed. */ | ||
75 | } ____cacheline_internodealigned_in_smp; | ||
76 | |||
77 | /* Is batch a before batch b ? */ | ||
78 | static inline int rcu_batch_before(long a, long b) | ||
79 | { | ||
80 | return (a - b) < 0; | ||
81 | } | ||
82 | |||
83 | /* Is batch a after batch b ? */ | ||
84 | static inline int rcu_batch_after(long a, long b) | ||
85 | { | ||
86 | return (a - b) > 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Per-CPU data for Read-Copy UPdate. | ||
91 | * nxtlist - new callbacks are added here | ||
92 | * curlist - current batch for which quiescent cycle started if any | ||
93 | */ | ||
94 | struct rcu_data { | ||
95 | /* 1) quiescent state handling : */ | ||
96 | long quiescbatch; /* Batch # for grace period */ | ||
97 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
98 | int qs_pending; /* core waits for quiesc state */ | ||
99 | |||
100 | /* 2) batch handling */ | ||
101 | long batch; /* Batch # for current RCU batch */ | ||
102 | struct rcu_head *nxtlist; | ||
103 | struct rcu_head **nxttail; | ||
104 | long qlen; /* # of queued callbacks */ | ||
105 | struct rcu_head *curlist; | ||
106 | struct rcu_head **curtail; | ||
107 | struct rcu_head *donelist; | ||
108 | struct rcu_head **donetail; | ||
109 | long blimit; /* Upper limit on a processed batch */ | ||
110 | int cpu; | ||
111 | struct rcu_head barrier; | ||
112 | }; | ||
113 | |||
114 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
115 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
116 | |||
117 | /* | ||
118 | * Increment the quiescent state counter. | ||
119 | * The counter is a bit degenerated: We do not need to know | ||
120 | * how many quiescent states passed, just if there was at least | ||
121 | * one since the start of the grace period. Thus just a flag. | ||
122 | */ | ||
123 | static inline void rcu_qsctr_inc(int cpu) | ||
124 | { | ||
125 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
126 | rdp->passed_quiesc = 1; | ||
127 | } | ||
128 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
129 | { | ||
130 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
131 | rdp->passed_quiesc = 1; | ||
132 | } | ||
133 | |||
134 | extern int rcu_pending(int cpu); | ||
135 | extern int rcu_needs_cpu(int cpu); | ||
136 | |||
137 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
138 | extern struct lockdep_map rcu_lock_map; | ||
139 | # define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
140 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
141 | #else | ||
142 | # define rcu_read_acquire() do { } while (0) | ||
143 | # define rcu_read_release() do { } while (0) | ||
144 | #endif | ||
145 | |||
146 | /** | 68 | /** |
147 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 69 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
148 | * | 70 | * |
@@ -172,24 +94,13 @@ extern struct lockdep_map rcu_lock_map; | |||
172 | * | 94 | * |
173 | * It is illegal to block while in an RCU read-side critical section. | 95 | * It is illegal to block while in an RCU read-side critical section. |
174 | */ | 96 | */ |
175 | #define rcu_read_lock() \ | 97 | #define rcu_read_lock() __rcu_read_lock() |
176 | do { \ | ||
177 | preempt_disable(); \ | ||
178 | __acquire(RCU); \ | ||
179 | rcu_read_acquire(); \ | ||
180 | } while(0) | ||
181 | 98 | ||
182 | /** | 99 | /** |
183 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 100 | * rcu_read_unlock - marks the end of an RCU read-side critical section. |
184 | * | 101 | * |
185 | * See rcu_read_lock() for more information. | 102 | * See rcu_read_lock() for more information. |
186 | */ | 103 | */ |
187 | #define rcu_read_unlock() \ | ||
188 | do { \ | ||
189 | rcu_read_release(); \ | ||
190 | __release(RCU); \ | ||
191 | preempt_enable(); \ | ||
192 | } while(0) | ||
193 | 104 | ||
194 | /* | 105 | /* |
195 | * So where is rcu_write_lock()? It does not exist, as there is no | 106 | * So where is rcu_write_lock()? It does not exist, as there is no |
@@ -200,6 +111,7 @@ extern struct lockdep_map rcu_lock_map; | |||
200 | * used as well. RCU does not care how the writers keep out of each | 111 | * used as well. RCU does not care how the writers keep out of each |
201 | * others' way, as long as they do so. | 112 | * others' way, as long as they do so. |
202 | */ | 113 | */ |
114 | #define rcu_read_unlock() __rcu_read_unlock() | ||
203 | 115 | ||
204 | /** | 116 | /** |
205 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 117 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
@@ -212,24 +124,14 @@ extern struct lockdep_map rcu_lock_map; | |||
212 | * can use just rcu_read_lock(). | 124 | * can use just rcu_read_lock(). |
213 | * | 125 | * |
214 | */ | 126 | */ |
215 | #define rcu_read_lock_bh() \ | 127 | #define rcu_read_lock_bh() __rcu_read_lock_bh() |
216 | do { \ | ||
217 | local_bh_disable(); \ | ||
218 | __acquire(RCU_BH); \ | ||
219 | rcu_read_acquire(); \ | ||
220 | } while(0) | ||
221 | 128 | ||
222 | /* | 129 | /* |
223 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | 130 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
224 | * | 131 | * |
225 | * See rcu_read_lock_bh() for more information. | 132 | * See rcu_read_lock_bh() for more information. |
226 | */ | 133 | */ |
227 | #define rcu_read_unlock_bh() \ | 134 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() |
228 | do { \ | ||
229 | rcu_read_release(); \ | ||
230 | __release(RCU_BH); \ | ||
231 | local_bh_enable(); \ | ||
232 | } while(0) | ||
233 | 135 | ||
234 | /* | 136 | /* |
235 | * Prevent the compiler from merging or refetching accesses. The compiler | 137 | * Prevent the compiler from merging or refetching accesses. The compiler |
@@ -293,21 +195,52 @@ extern struct lockdep_map rcu_lock_map; | |||
293 | * In "classic RCU", these two guarantees happen to be one and | 195 | * In "classic RCU", these two guarantees happen to be one and |
294 | * the same, but can differ in realtime RCU implementations. | 196 | * the same, but can differ in realtime RCU implementations. |
295 | */ | 197 | */ |
296 | #define synchronize_sched() synchronize_rcu() | 198 | #define synchronize_sched() __synchronize_sched() |
297 | 199 | ||
298 | extern void rcu_init(void); | 200 | /** |
299 | extern void rcu_check_callbacks(int cpu, int user); | 201 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
300 | extern void rcu_restart_cpu(int cpu); | 202 | * @head: structure to be used for queueing the RCU updates. |
301 | extern long rcu_batches_completed(void); | 203 | * @func: actual update function to be invoked after the grace period |
302 | extern long rcu_batches_completed_bh(void); | 204 | * |
205 | * The update function will be invoked some time after a full grace | ||
206 | * period elapses, in other words after all currently executing RCU | ||
207 | * read-side critical sections have completed. RCU read-side critical | ||
208 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
209 | * and may be nested. | ||
210 | */ | ||
211 | extern void call_rcu(struct rcu_head *head, | ||
212 | void (*func)(struct rcu_head *head)); | ||
303 | 213 | ||
304 | /* Exported interfaces */ | 214 | /** |
305 | extern void FASTCALL(call_rcu(struct rcu_head *head, | 215 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. |
306 | void (*func)(struct rcu_head *head))); | 216 | * @head: structure to be used for queueing the RCU updates. |
307 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, | 217 | * @func: actual update function to be invoked after the grace period |
308 | void (*func)(struct rcu_head *head))); | 218 | * |
219 | * The update function will be invoked some time after a full grace | ||
220 | * period elapses, in other words after all currently executing RCU | ||
221 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
222 | * that the read-side critical sections end on completion of a softirq | ||
223 | * handler. This means that read-side critical sections in process | ||
224 | * context must not be interrupted by softirqs. This interface is to be | ||
225 | * used when most of the read-side critical sections are in softirq context. | ||
226 | * RCU read-side critical sections are delimited by : | ||
227 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
228 | * OR | ||
229 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
230 | * These may be nested. | ||
231 | */ | ||
232 | extern void call_rcu_bh(struct rcu_head *head, | ||
233 | void (*func)(struct rcu_head *head)); | ||
234 | |||
235 | /* Exported common interfaces */ | ||
309 | extern void synchronize_rcu(void); | 236 | extern void synchronize_rcu(void); |
310 | extern void rcu_barrier(void); | 237 | extern void rcu_barrier(void); |
238 | extern long rcu_batches_completed(void); | ||
239 | extern long rcu_batches_completed_bh(void); | ||
240 | |||
241 | /* Internal to kernel */ | ||
242 | extern void rcu_init(void); | ||
243 | extern int rcu_needs_cpu(int cpu); | ||
311 | 244 | ||
312 | #endif /* __KERNEL__ */ | 245 | #endif /* __KERNEL__ */ |
313 | #endif /* __LINUX_RCUPDATE_H */ | 246 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h new file mode 100644 index 000000000000..ece8eb3e4151 --- /dev/null +++ b/include/linux/rcupreempt.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUPREEMPT_H | ||
34 | #define __LINUX_RCUPREEMPT_H | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/threads.h> | ||
41 | #include <linux/percpu.h> | ||
42 | #include <linux/cpumask.h> | ||
43 | #include <linux/seqlock.h> | ||
44 | |||
45 | #define rcu_qsctr_inc(cpu) | ||
46 | #define rcu_bh_qsctr_inc(cpu) | ||
47 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | ||
48 | |||
49 | extern void __rcu_read_lock(void); | ||
50 | extern void __rcu_read_unlock(void); | ||
51 | extern int rcu_pending(int cpu); | ||
52 | extern int rcu_needs_cpu(int cpu); | ||
53 | |||
54 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | ||
55 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | ||
56 | |||
57 | extern void __synchronize_sched(void); | ||
58 | |||
59 | extern void __rcu_init(void); | ||
60 | extern void rcu_check_callbacks(int cpu, int user); | ||
61 | extern void rcu_restart_cpu(int cpu); | ||
62 | extern long rcu_batches_completed(void); | ||
63 | |||
64 | /* | ||
65 | * Return the number of RCU batches processed thus far. Useful for debug | ||
66 | * and statistic. The _bh variant is identifcal to straight RCU | ||
67 | */ | ||
68 | static inline long rcu_batches_completed_bh(void) | ||
69 | { | ||
70 | return rcu_batches_completed(); | ||
71 | } | ||
72 | |||
73 | #ifdef CONFIG_RCU_TRACE | ||
74 | struct rcupreempt_trace; | ||
75 | extern long *rcupreempt_flipctr(int cpu); | ||
76 | extern long rcupreempt_data_completed(void); | ||
77 | extern int rcupreempt_flip_flag(int cpu); | ||
78 | extern int rcupreempt_mb_flag(int cpu); | ||
79 | extern char *rcupreempt_try_flip_state_name(void); | ||
80 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | ||
81 | #endif | ||
82 | |||
83 | struct softirq_action; | ||
84 | |||
85 | #endif /* __KERNEL__ */ | ||
86 | #endif /* __LINUX_RCUPREEMPT_H */ | ||
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h new file mode 100644 index 000000000000..21cd6b2a5c42 --- /dev/null +++ b/include/linux/rcupreempt_trace.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of the Preemptible Read-Copy Update mechanism see - | ||
29 | * http://lwn.net/Articles/253651/ | ||
30 | */ | ||
31 | |||
32 | #ifndef __LINUX_RCUPREEMPT_TRACE_H | ||
33 | #define __LINUX_RCUPREEMPT_TRACE_H | ||
34 | |||
35 | #ifdef __KERNEL__ | ||
36 | #include <linux/types.h> | ||
37 | #include <linux/kernel.h> | ||
38 | |||
39 | #include <asm/atomic.h> | ||
40 | |||
41 | /* | ||
42 | * PREEMPT_RCU data structures. | ||
43 | */ | ||
44 | |||
45 | struct rcupreempt_trace { | ||
46 | long next_length; | ||
47 | long next_add; | ||
48 | long wait_length; | ||
49 | long wait_add; | ||
50 | long done_length; | ||
51 | long done_add; | ||
52 | long done_remove; | ||
53 | atomic_t done_invoked; | ||
54 | long rcu_check_callbacks; | ||
55 | atomic_t rcu_try_flip_1; | ||
56 | atomic_t rcu_try_flip_e1; | ||
57 | long rcu_try_flip_i1; | ||
58 | long rcu_try_flip_ie1; | ||
59 | long rcu_try_flip_g1; | ||
60 | long rcu_try_flip_a1; | ||
61 | long rcu_try_flip_ae1; | ||
62 | long rcu_try_flip_a2; | ||
63 | long rcu_try_flip_z1; | ||
64 | long rcu_try_flip_ze1; | ||
65 | long rcu_try_flip_z2; | ||
66 | long rcu_try_flip_m1; | ||
67 | long rcu_try_flip_me1; | ||
68 | long rcu_try_flip_m2; | ||
69 | }; | ||
70 | |||
71 | #ifdef CONFIG_RCU_TRACE | ||
72 | #define RCU_TRACE(fn, arg) fn(arg); | ||
73 | #else | ||
74 | #define RCU_TRACE(fn, arg) | ||
75 | #endif | ||
76 | |||
77 | extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); | ||
78 | extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); | ||
79 | extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); | ||
80 | extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); | ||
81 | extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); | ||
82 | extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); | ||
83 | extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); | ||
84 | extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); | ||
85 | extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); | ||
86 | extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); | ||
87 | extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); | ||
88 | extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); | ||
89 | extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); | ||
90 | extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); | ||
91 | extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); | ||
92 | extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); | ||
93 | extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); | ||
94 | extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); | ||
95 | extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); | ||
96 | extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); | ||
97 | |||
98 | #endif /* __KERNEL__ */ | ||
99 | #endif /* __LINUX_RCUPREEMPT_TRACE_H */ | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index d6eacda765ca..df5b24ee80b3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -78,7 +78,6 @@ struct sched_param { | |||
78 | #include <linux/proportions.h> | 78 | #include <linux/proportions.h> |
79 | #include <linux/seccomp.h> | 79 | #include <linux/seccomp.h> |
80 | #include <linux/rcupdate.h> | 80 | #include <linux/rcupdate.h> |
81 | #include <linux/futex.h> | ||
82 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
83 | 82 | ||
84 | #include <linux/time.h> | 83 | #include <linux/time.h> |
@@ -88,11 +87,13 @@ struct sched_param { | |||
88 | #include <linux/hrtimer.h> | 87 | #include <linux/hrtimer.h> |
89 | #include <linux/task_io_accounting.h> | 88 | #include <linux/task_io_accounting.h> |
90 | #include <linux/kobject.h> | 89 | #include <linux/kobject.h> |
90 | #include <linux/latencytop.h> | ||
91 | 91 | ||
92 | #include <asm/processor.h> | 92 | #include <asm/processor.h> |
93 | 93 | ||
94 | struct exec_domain; | 94 | struct exec_domain; |
95 | struct futex_pi_state; | 95 | struct futex_pi_state; |
96 | struct robust_list_head; | ||
96 | struct bio; | 97 | struct bio; |
97 | 98 | ||
98 | /* | 99 | /* |
@@ -230,6 +231,8 @@ static inline int select_nohz_load_balancer(int cpu) | |||
230 | } | 231 | } |
231 | #endif | 232 | #endif |
232 | 233 | ||
234 | extern unsigned long rt_needs_cpu(int cpu); | ||
235 | |||
233 | /* | 236 | /* |
234 | * Only dump TASK_* tasks. (0 for all tasks) | 237 | * Only dump TASK_* tasks. (0 for all tasks) |
235 | */ | 238 | */ |
@@ -257,13 +260,19 @@ extern void trap_init(void); | |||
257 | extern void account_process_tick(struct task_struct *task, int user); | 260 | extern void account_process_tick(struct task_struct *task, int user); |
258 | extern void update_process_times(int user); | 261 | extern void update_process_times(int user); |
259 | extern void scheduler_tick(void); | 262 | extern void scheduler_tick(void); |
263 | extern void hrtick_resched(void); | ||
264 | |||
265 | extern void sched_show_task(struct task_struct *p); | ||
260 | 266 | ||
261 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 267 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
262 | extern void softlockup_tick(void); | 268 | extern void softlockup_tick(void); |
263 | extern void spawn_softlockup_task(void); | 269 | extern void spawn_softlockup_task(void); |
264 | extern void touch_softlockup_watchdog(void); | 270 | extern void touch_softlockup_watchdog(void); |
265 | extern void touch_all_softlockup_watchdogs(void); | 271 | extern void touch_all_softlockup_watchdogs(void); |
266 | extern int softlockup_thresh; | 272 | extern unsigned long softlockup_thresh; |
273 | extern unsigned long sysctl_hung_task_check_count; | ||
274 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
275 | extern unsigned long sysctl_hung_task_warnings; | ||
267 | #else | 276 | #else |
268 | static inline void softlockup_tick(void) | 277 | static inline void softlockup_tick(void) |
269 | { | 278 | { |
@@ -822,6 +831,7 @@ struct sched_class { | |||
822 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); | 831 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); |
823 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 832 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
824 | void (*yield_task) (struct rq *rq); | 833 | void (*yield_task) (struct rq *rq); |
834 | int (*select_task_rq)(struct task_struct *p, int sync); | ||
825 | 835 | ||
826 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); | 836 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); |
827 | 837 | ||
@@ -837,11 +847,25 @@ struct sched_class { | |||
837 | int (*move_one_task) (struct rq *this_rq, int this_cpu, | 847 | int (*move_one_task) (struct rq *this_rq, int this_cpu, |
838 | struct rq *busiest, struct sched_domain *sd, | 848 | struct rq *busiest, struct sched_domain *sd, |
839 | enum cpu_idle_type idle); | 849 | enum cpu_idle_type idle); |
850 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | ||
851 | void (*post_schedule) (struct rq *this_rq); | ||
852 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | ||
840 | #endif | 853 | #endif |
841 | 854 | ||
842 | void (*set_curr_task) (struct rq *rq); | 855 | void (*set_curr_task) (struct rq *rq); |
843 | void (*task_tick) (struct rq *rq, struct task_struct *p); | 856 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
844 | void (*task_new) (struct rq *rq, struct task_struct *p); | 857 | void (*task_new) (struct rq *rq, struct task_struct *p); |
858 | void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); | ||
859 | |||
860 | void (*join_domain)(struct rq *rq); | ||
861 | void (*leave_domain)(struct rq *rq); | ||
862 | |||
863 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | ||
864 | int running); | ||
865 | void (*switched_to) (struct rq *this_rq, struct task_struct *task, | ||
866 | int running); | ||
867 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | ||
868 | int oldprio, int running); | ||
845 | }; | 869 | }; |
846 | 870 | ||
847 | struct load_weight { | 871 | struct load_weight { |
@@ -871,6 +895,8 @@ struct sched_entity { | |||
871 | #ifdef CONFIG_SCHEDSTATS | 895 | #ifdef CONFIG_SCHEDSTATS |
872 | u64 wait_start; | 896 | u64 wait_start; |
873 | u64 wait_max; | 897 | u64 wait_max; |
898 | u64 wait_count; | ||
899 | u64 wait_sum; | ||
874 | 900 | ||
875 | u64 sleep_start; | 901 | u64 sleep_start; |
876 | u64 sleep_max; | 902 | u64 sleep_max; |
@@ -909,6 +935,21 @@ struct sched_entity { | |||
909 | #endif | 935 | #endif |
910 | }; | 936 | }; |
911 | 937 | ||
938 | struct sched_rt_entity { | ||
939 | struct list_head run_list; | ||
940 | unsigned int time_slice; | ||
941 | unsigned long timeout; | ||
942 | int nr_cpus_allowed; | ||
943 | |||
944 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
945 | struct sched_rt_entity *parent; | ||
946 | /* rq on which this entity is (to be) queued: */ | ||
947 | struct rt_rq *rt_rq; | ||
948 | /* rq "owned" by this entity/group: */ | ||
949 | struct rt_rq *my_q; | ||
950 | #endif | ||
951 | }; | ||
952 | |||
912 | struct task_struct { | 953 | struct task_struct { |
913 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 954 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
914 | void *stack; | 955 | void *stack; |
@@ -925,9 +966,9 @@ struct task_struct { | |||
925 | #endif | 966 | #endif |
926 | 967 | ||
927 | int prio, static_prio, normal_prio; | 968 | int prio, static_prio, normal_prio; |
928 | struct list_head run_list; | ||
929 | const struct sched_class *sched_class; | 969 | const struct sched_class *sched_class; |
930 | struct sched_entity se; | 970 | struct sched_entity se; |
971 | struct sched_rt_entity rt; | ||
931 | 972 | ||
932 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 973 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
933 | /* list of struct preempt_notifier: */ | 974 | /* list of struct preempt_notifier: */ |
@@ -951,7 +992,11 @@ struct task_struct { | |||
951 | 992 | ||
952 | unsigned int policy; | 993 | unsigned int policy; |
953 | cpumask_t cpus_allowed; | 994 | cpumask_t cpus_allowed; |
954 | unsigned int time_slice; | 995 | |
996 | #ifdef CONFIG_PREEMPT_RCU | ||
997 | int rcu_read_lock_nesting; | ||
998 | int rcu_flipctr_idx; | ||
999 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
955 | 1000 | ||
956 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1001 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
957 | struct sched_info sched_info; | 1002 | struct sched_info sched_info; |
@@ -1041,6 +1086,11 @@ struct task_struct { | |||
1041 | /* ipc stuff */ | 1086 | /* ipc stuff */ |
1042 | struct sysv_sem sysvsem; | 1087 | struct sysv_sem sysvsem; |
1043 | #endif | 1088 | #endif |
1089 | #ifdef CONFIG_DETECT_SOFTLOCKUP | ||
1090 | /* hung task detection */ | ||
1091 | unsigned long last_switch_timestamp; | ||
1092 | unsigned long last_switch_count; | ||
1093 | #endif | ||
1044 | /* CPU-specific state of this task */ | 1094 | /* CPU-specific state of this task */ |
1045 | struct thread_struct thread; | 1095 | struct thread_struct thread; |
1046 | /* filesystem information */ | 1096 | /* filesystem information */ |
@@ -1173,6 +1223,10 @@ struct task_struct { | |||
1173 | int make_it_fail; | 1223 | int make_it_fail; |
1174 | #endif | 1224 | #endif |
1175 | struct prop_local_single dirties; | 1225 | struct prop_local_single dirties; |
1226 | #ifdef CONFIG_LATENCYTOP | ||
1227 | int latency_record_count; | ||
1228 | struct latency_record latency_record[LT_SAVECOUNT]; | ||
1229 | #endif | ||
1176 | }; | 1230 | }; |
1177 | 1231 | ||
1178 | /* | 1232 | /* |
@@ -1453,6 +1507,12 @@ extern unsigned int sysctl_sched_child_runs_first; | |||
1453 | extern unsigned int sysctl_sched_features; | 1507 | extern unsigned int sysctl_sched_features; |
1454 | extern unsigned int sysctl_sched_migration_cost; | 1508 | extern unsigned int sysctl_sched_migration_cost; |
1455 | extern unsigned int sysctl_sched_nr_migrate; | 1509 | extern unsigned int sysctl_sched_nr_migrate; |
1510 | extern unsigned int sysctl_sched_rt_period; | ||
1511 | extern unsigned int sysctl_sched_rt_ratio; | ||
1512 | #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) | ||
1513 | extern unsigned int sysctl_sched_min_bal_int_shares; | ||
1514 | extern unsigned int sysctl_sched_max_bal_int_shares; | ||
1515 | #endif | ||
1456 | 1516 | ||
1457 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1517 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1458 | struct file *file, void __user *buffer, size_t *length, | 1518 | struct file *file, void __user *buffer, size_t *length, |
@@ -1845,7 +1905,18 @@ static inline int need_resched(void) | |||
1845 | * cond_resched_lock() will drop the spinlock before scheduling, | 1905 | * cond_resched_lock() will drop the spinlock before scheduling, |
1846 | * cond_resched_softirq() will enable bhs before scheduling. | 1906 | * cond_resched_softirq() will enable bhs before scheduling. |
1847 | */ | 1907 | */ |
1848 | extern int cond_resched(void); | 1908 | #ifdef CONFIG_PREEMPT |
1909 | static inline int cond_resched(void) | ||
1910 | { | ||
1911 | return 0; | ||
1912 | } | ||
1913 | #else | ||
1914 | extern int _cond_resched(void); | ||
1915 | static inline int cond_resched(void) | ||
1916 | { | ||
1917 | return _cond_resched(); | ||
1918 | } | ||
1919 | #endif | ||
1849 | extern int cond_resched_lock(spinlock_t * lock); | 1920 | extern int cond_resched_lock(spinlock_t * lock); |
1850 | extern int cond_resched_softirq(void); | 1921 | extern int cond_resched_softirq(void); |
1851 | 1922 | ||
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 58962c51dee1..aab3a4cff4e1 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h | |||
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void); | |||
17 | __release_kernel_lock(); \ | 17 | __release_kernel_lock(); \ |
18 | } while (0) | 18 | } while (0) |
19 | 19 | ||
20 | /* | ||
21 | * Non-SMP kernels will never block on the kernel lock, | ||
22 | * so we are better off returning a constant zero from | ||
23 | * reacquire_kernel_lock() so that the compiler can see | ||
24 | * it at compile-time. | ||
25 | */ | ||
26 | #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) | ||
27 | # define return_value_on_smp return | ||
28 | #else | ||
29 | # define return_value_on_smp | ||
30 | #endif | ||
31 | |||
32 | static inline int reacquire_kernel_lock(struct task_struct *task) | 20 | static inline int reacquire_kernel_lock(struct task_struct *task) |
33 | { | 21 | { |
34 | if (unlikely(task->lock_depth >= 0)) | 22 | if (unlikely(task->lock_depth >= 0)) |
35 | return_value_on_smp __reacquire_kernel_lock(); | 23 | return __reacquire_kernel_lock(); |
36 | return 0; | 24 | return 0; |
37 | } | 25 | } |
38 | 26 | ||
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index e7fa657d0c49..5da9794b2d78 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -9,10 +9,13 @@ struct stack_trace { | |||
9 | }; | 9 | }; |
10 | 10 | ||
11 | extern void save_stack_trace(struct stack_trace *trace); | 11 | extern void save_stack_trace(struct stack_trace *trace); |
12 | extern void save_stack_trace_tsk(struct task_struct *tsk, | ||
13 | struct stack_trace *trace); | ||
12 | 14 | ||
13 | extern void print_stack_trace(struct stack_trace *trace, int spaces); | 15 | extern void print_stack_trace(struct stack_trace *trace, int spaces); |
14 | #else | 16 | #else |
15 | # define save_stack_trace(trace) do { } while (0) | 17 | # define save_stack_trace(trace) do { } while (0) |
18 | # define save_stack_trace_tsk(tsk, trace) do { } while (0) | ||
16 | # define print_stack_trace(trace, spaces) do { } while (0) | 19 | # define print_stack_trace(trace, spaces) do { } while (0) |
17 | #endif | 20 | #endif |
18 | 21 | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index 47729f18bfdf..2352f46160d3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2002, IBM Corp. | 6 | * Copyright (C) 2002, IBM Corp. |
7 | * | 7 | * |
8 | * All rights reserved. | 8 | * All rights reserved. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
@@ -103,6 +103,7 @@ | |||
103 | .forkexec_idx = 0, \ | 103 | .forkexec_idx = 0, \ |
104 | .flags = SD_LOAD_BALANCE \ | 104 | .flags = SD_LOAD_BALANCE \ |
105 | | SD_BALANCE_NEWIDLE \ | 105 | | SD_BALANCE_NEWIDLE \ |
106 | | SD_BALANCE_FORK \ | ||
106 | | SD_BALANCE_EXEC \ | 107 | | SD_BALANCE_EXEC \ |
107 | | SD_WAKE_AFFINE \ | 108 | | SD_WAKE_AFFINE \ |
108 | | SD_WAKE_IDLE \ | 109 | | SD_WAKE_IDLE \ |
@@ -134,6 +135,7 @@ | |||
134 | .forkexec_idx = 1, \ | 135 | .forkexec_idx = 1, \ |
135 | .flags = SD_LOAD_BALANCE \ | 136 | .flags = SD_LOAD_BALANCE \ |
136 | | SD_BALANCE_NEWIDLE \ | 137 | | SD_BALANCE_NEWIDLE \ |
138 | | SD_BALANCE_FORK \ | ||
137 | | SD_BALANCE_EXEC \ | 139 | | SD_BALANCE_EXEC \ |
138 | | SD_WAKE_AFFINE \ | 140 | | SD_WAKE_AFFINE \ |
139 | | SD_WAKE_IDLE \ | 141 | | SD_WAKE_IDLE \ |
@@ -165,6 +167,7 @@ | |||
165 | .forkexec_idx = 1, \ | 167 | .forkexec_idx = 1, \ |
166 | .flags = SD_LOAD_BALANCE \ | 168 | .flags = SD_LOAD_BALANCE \ |
167 | | SD_BALANCE_NEWIDLE \ | 169 | | SD_BALANCE_NEWIDLE \ |
170 | | SD_BALANCE_FORK \ | ||
168 | | SD_BALANCE_EXEC \ | 171 | | SD_BALANCE_EXEC \ |
169 | | SD_WAKE_AFFINE \ | 172 | | SD_WAKE_AFFINE \ |
170 | | BALANCE_FOR_PKG_POWER,\ | 173 | | BALANCE_FOR_PKG_POWER,\ |