diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/bug.h | 7 | ||||
-rw-r--r-- | include/linux/bottom_half.h | 1 | ||||
-rw-r--r-- | include/linux/debug_locks.h | 2 | ||||
-rw-r--r-- | include/linux/futex.h | 5 | ||||
-rw-r--r-- | include/linux/hardirq.h | 13 | ||||
-rw-r--r-- | include/linux/kernel.h | 11 | ||||
-rw-r--r-- | include/linux/lockdep.h | 43 | ||||
-rw-r--r-- | include/linux/mutex.h | 2 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 2 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 10 | ||||
-rw-r--r-- | include/linux/rcutree.h | 329 | ||||
-rw-r--r-- | include/linux/swiotlb.h | 22 | ||||
-rw-r--r-- | include/linux/uaccess.h | 2 |
13 files changed, 427 insertions, 22 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 4c794d73fb84..8af276361bf2 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -41,15 +41,14 @@ struct bug_entry { | |||
41 | 41 | ||
42 | #ifndef __WARN | 42 | #ifndef __WARN |
43 | #ifndef __ASSEMBLY__ | 43 | #ifndef __ASSEMBLY__ |
44 | extern void warn_on_slowpath(const char *file, const int line); | ||
45 | extern void warn_slowpath(const char *file, const int line, | 44 | extern void warn_slowpath(const char *file, const int line, |
46 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); | 45 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); |
47 | #define WANT_WARN_ON_SLOWPATH | 46 | #define WANT_WARN_ON_SLOWPATH |
48 | #endif | 47 | #endif |
49 | #define __WARN() warn_on_slowpath(__FILE__, __LINE__) | 48 | #define __WARN() warn_slowpath(__FILE__, __LINE__, NULL) |
50 | #define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg) | 49 | #define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg) |
51 | #else | 50 | #else |
52 | #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) | 51 | #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) |
53 | #endif | 52 | #endif |
54 | 53 | ||
55 | #ifndef WARN_ON | 54 | #ifndef WARN_ON |
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 777dbf695d44..27b1bcffe408 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _LINUX_BH_H | 2 | #define _LINUX_BH_H |
3 | 3 | ||
4 | extern void local_bh_disable(void); | 4 | extern void local_bh_disable(void); |
5 | extern void __local_bh_enable(void); | ||
6 | extern void _local_bh_enable(void); | 5 | extern void _local_bh_enable(void); |
7 | extern void local_bh_enable(void); | 6 | extern void local_bh_enable(void); |
8 | extern void local_bh_enable_ip(unsigned long ip); | 7 | extern void local_bh_enable_ip(unsigned long ip); |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 4aaa4afb1cb9..096476f1fb35 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -17,7 +17,7 @@ extern int debug_locks_off(void); | |||
17 | ({ \ | 17 | ({ \ |
18 | int __ret = 0; \ | 18 | int __ret = 0; \ |
19 | \ | 19 | \ |
20 | if (unlikely(c)) { \ | 20 | if (!oops_in_progress && unlikely(c)) { \ |
21 | if (debug_locks_off() && !debug_locks_silent) \ | 21 | if (debug_locks_off() && !debug_locks_silent) \ |
22 | WARN_ON(1); \ | 22 | WARN_ON(1); \ |
23 | __ret = 1; \ | 23 | __ret = 1; \ |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 586ab56a3ec3..3bf5bb5a34f9 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -25,7 +25,8 @@ union ktime; | |||
25 | #define FUTEX_WAKE_BITSET 10 | 25 | #define FUTEX_WAKE_BITSET 10 |
26 | 26 | ||
27 | #define FUTEX_PRIVATE_FLAG 128 | 27 | #define FUTEX_PRIVATE_FLAG 128 |
28 | #define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG | 28 | #define FUTEX_CLOCK_REALTIME 256 |
29 | #define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) | ||
29 | 30 | ||
30 | #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) | 31 | #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) |
31 | #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) | 32 | #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) |
@@ -164,6 +165,8 @@ union futex_key { | |||
164 | } both; | 165 | } both; |
165 | }; | 166 | }; |
166 | 167 | ||
168 | #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } | ||
169 | |||
167 | #ifdef CONFIG_FUTEX | 170 | #ifdef CONFIG_FUTEX |
168 | extern void exit_robust_list(struct task_struct *curr); | 171 | extern void exit_robust_list(struct task_struct *curr); |
169 | extern void exit_pi_state_list(struct task_struct *curr); | 172 | extern void exit_pi_state_list(struct task_struct *curr); |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 89a56d79e4c6..f83288347dda 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
119 | } | 119 | } |
120 | #endif | 120 | #endif |
121 | 121 | ||
122 | #if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) | 122 | #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) |
123 | extern void rcu_irq_enter(void); | 123 | extern void rcu_irq_enter(void); |
124 | extern void rcu_irq_exit(void); | 124 | extern void rcu_irq_exit(void); |
125 | extern void rcu_nmi_enter(void); | ||
126 | extern void rcu_nmi_exit(void); | ||
125 | #else | 127 | #else |
126 | # define rcu_irq_enter() do { } while (0) | 128 | # define rcu_irq_enter() do { } while (0) |
127 | # define rcu_irq_exit() do { } while (0) | 129 | # define rcu_irq_exit() do { } while (0) |
128 | #endif /* CONFIG_PREEMPT_RCU */ | 130 | # define rcu_nmi_enter() do { } while (0) |
131 | # define rcu_nmi_exit() do { } while (0) | ||
132 | #endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ | ||
129 | 133 | ||
130 | /* | 134 | /* |
131 | * It is safe to do non-atomic ops on ->hardirq_context, | 135 | * It is safe to do non-atomic ops on ->hardirq_context, |
@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void); | |||
135 | */ | 139 | */ |
136 | #define __irq_enter() \ | 140 | #define __irq_enter() \ |
137 | do { \ | 141 | do { \ |
138 | rcu_irq_enter(); \ | ||
139 | account_system_vtime(current); \ | 142 | account_system_vtime(current); \ |
140 | add_preempt_count(HARDIRQ_OFFSET); \ | 143 | add_preempt_count(HARDIRQ_OFFSET); \ |
141 | trace_hardirq_enter(); \ | 144 | trace_hardirq_enter(); \ |
@@ -154,7 +157,6 @@ extern void irq_enter(void); | |||
154 | trace_hardirq_exit(); \ | 157 | trace_hardirq_exit(); \ |
155 | account_system_vtime(current); \ | 158 | account_system_vtime(current); \ |
156 | sub_preempt_count(HARDIRQ_OFFSET); \ | 159 | sub_preempt_count(HARDIRQ_OFFSET); \ |
157 | rcu_irq_exit(); \ | ||
158 | } while (0) | 160 | } while (0) |
159 | 161 | ||
160 | /* | 162 | /* |
@@ -166,11 +168,14 @@ extern void irq_exit(void); | |||
166 | do { \ | 168 | do { \ |
167 | ftrace_nmi_enter(); \ | 169 | ftrace_nmi_enter(); \ |
168 | lockdep_off(); \ | 170 | lockdep_off(); \ |
171 | rcu_nmi_enter(); \ | ||
169 | __irq_enter(); \ | 172 | __irq_enter(); \ |
170 | } while (0) | 173 | } while (0) |
174 | |||
171 | #define nmi_exit() \ | 175 | #define nmi_exit() \ |
172 | do { \ | 176 | do { \ |
173 | __irq_exit(); \ | 177 | __irq_exit(); \ |
178 | rcu_nmi_exit(); \ | ||
174 | lockdep_on(); \ | 179 | lockdep_on(); \ |
175 | ftrace_nmi_exit(); \ | 180 | ftrace_nmi_exit(); \ |
176 | } while (0) | 181 | } while (0) |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 6002ae76785c..ca9ff6411dfa 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -141,6 +141,15 @@ extern int _cond_resched(void); | |||
141 | (__x < 0) ? -__x : __x; \ | 141 | (__x < 0) ? -__x : __x; \ |
142 | }) | 142 | }) |
143 | 143 | ||
144 | #ifdef CONFIG_PROVE_LOCKING | ||
145 | void might_fault(void); | ||
146 | #else | ||
147 | static inline void might_fault(void) | ||
148 | { | ||
149 | might_sleep(); | ||
150 | } | ||
151 | #endif | ||
152 | |||
144 | extern struct atomic_notifier_head panic_notifier_list; | 153 | extern struct atomic_notifier_head panic_notifier_list; |
145 | extern long (*panic_blink)(long time); | 154 | extern long (*panic_blink)(long time); |
146 | NORET_TYPE void panic(const char * fmt, ...) | 155 | NORET_TYPE void panic(const char * fmt, ...) |
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr); | |||
188 | extern int core_kernel_text(unsigned long addr); | 197 | extern int core_kernel_text(unsigned long addr); |
189 | extern int __kernel_text_address(unsigned long addr); | 198 | extern int __kernel_text_address(unsigned long addr); |
190 | extern int kernel_text_address(unsigned long addr); | 199 | extern int kernel_text_address(unsigned long addr); |
200 | extern int func_ptr_is_kernel_text(void *ptr); | ||
201 | |||
191 | struct pid; | 202 | struct pid; |
192 | extern struct pid *session_of_pgrp(struct pid *pgrp); | 203 | extern struct pid *session_of_pgrp(struct pid *pgrp); |
193 | 204 | ||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 29aec6e10020..37a0361f4685 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -73,6 +73,8 @@ struct lock_class_key { | |||
73 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; | 73 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define LOCKSTAT_POINTS 4 | ||
77 | |||
76 | /* | 78 | /* |
77 | * The lock-class itself: | 79 | * The lock-class itself: |
78 | */ | 80 | */ |
@@ -119,7 +121,8 @@ struct lock_class { | |||
119 | int name_version; | 121 | int name_version; |
120 | 122 | ||
121 | #ifdef CONFIG_LOCK_STAT | 123 | #ifdef CONFIG_LOCK_STAT |
122 | unsigned long contention_point[4]; | 124 | unsigned long contention_point[LOCKSTAT_POINTS]; |
125 | unsigned long contending_point[LOCKSTAT_POINTS]; | ||
123 | #endif | 126 | #endif |
124 | }; | 127 | }; |
125 | 128 | ||
@@ -144,6 +147,7 @@ enum bounce_type { | |||
144 | 147 | ||
145 | struct lock_class_stats { | 148 | struct lock_class_stats { |
146 | unsigned long contention_point[4]; | 149 | unsigned long contention_point[4]; |
150 | unsigned long contending_point[4]; | ||
147 | struct lock_time read_waittime; | 151 | struct lock_time read_waittime; |
148 | struct lock_time write_waittime; | 152 | struct lock_time write_waittime; |
149 | struct lock_time read_holdtime; | 153 | struct lock_time read_holdtime; |
@@ -165,6 +169,7 @@ struct lockdep_map { | |||
165 | const char *name; | 169 | const char *name; |
166 | #ifdef CONFIG_LOCK_STAT | 170 | #ifdef CONFIG_LOCK_STAT |
167 | int cpu; | 171 | int cpu; |
172 | unsigned long ip; | ||
168 | #endif | 173 | #endif |
169 | }; | 174 | }; |
170 | 175 | ||
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
309 | extern void lock_release(struct lockdep_map *lock, int nested, | 314 | extern void lock_release(struct lockdep_map *lock, int nested, |
310 | unsigned long ip); | 315 | unsigned long ip); |
311 | 316 | ||
312 | extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, | 317 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
313 | unsigned long ip); | 318 | struct lock_class_key *key, unsigned int subclass, |
319 | unsigned long ip); | ||
320 | |||
321 | static inline void lock_set_subclass(struct lockdep_map *lock, | ||
322 | unsigned int subclass, unsigned long ip) | ||
323 | { | ||
324 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | ||
325 | } | ||
314 | 326 | ||
315 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 327 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
316 | 328 | ||
@@ -328,6 +340,7 @@ static inline void lockdep_on(void) | |||
328 | 340 | ||
329 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) | 341 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
330 | # define lock_release(l, n, i) do { } while (0) | 342 | # define lock_release(l, n, i) do { } while (0) |
343 | # define lock_set_class(l, n, k, s, i) do { } while (0) | ||
331 | # define lock_set_subclass(l, s, i) do { } while (0) | 344 | # define lock_set_subclass(l, s, i) do { } while (0) |
332 | # define lockdep_init() do { } while (0) | 345 | # define lockdep_init() do { } while (0) |
333 | # define lockdep_info() do { } while (0) | 346 | # define lockdep_info() do { } while (0) |
@@ -356,7 +369,7 @@ struct lock_class_key { }; | |||
356 | #ifdef CONFIG_LOCK_STAT | 369 | #ifdef CONFIG_LOCK_STAT |
357 | 370 | ||
358 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | 371 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
359 | extern void lock_acquired(struct lockdep_map *lock); | 372 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
360 | 373 | ||
361 | #define LOCK_CONTENDED(_lock, try, lock) \ | 374 | #define LOCK_CONTENDED(_lock, try, lock) \ |
362 | do { \ | 375 | do { \ |
@@ -364,13 +377,13 @@ do { \ | |||
364 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | 377 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
365 | lock(_lock); \ | 378 | lock(_lock); \ |
366 | } \ | 379 | } \ |
367 | lock_acquired(&(_lock)->dep_map); \ | 380 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
368 | } while (0) | 381 | } while (0) |
369 | 382 | ||
370 | #else /* CONFIG_LOCK_STAT */ | 383 | #else /* CONFIG_LOCK_STAT */ |
371 | 384 | ||
372 | #define lock_contended(lockdep_map, ip) do {} while (0) | 385 | #define lock_contended(lockdep_map, ip) do {} while (0) |
373 | #define lock_acquired(lockdep_map) do {} while (0) | 386 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
374 | 387 | ||
375 | #define LOCK_CONTENDED(_lock, try, lock) \ | 388 | #define LOCK_CONTENDED(_lock, try, lock) \ |
376 | lock(_lock) | 389 | lock(_lock) |
@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
481 | # define lock_map_release(l) do { } while (0) | 494 | # define lock_map_release(l) do { } while (0) |
482 | #endif | 495 | #endif |
483 | 496 | ||
497 | #ifdef CONFIG_PROVE_LOCKING | ||
498 | # define might_lock(lock) \ | ||
499 | do { \ | ||
500 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | ||
501 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ | ||
502 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | ||
503 | } while (0) | ||
504 | # define might_lock_read(lock) \ | ||
505 | do { \ | ||
506 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | ||
507 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ | ||
508 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | ||
509 | } while (0) | ||
510 | #else | ||
511 | # define might_lock(lock) do { } while (0) | ||
512 | # define might_lock_read(lock) do { } while (0) | ||
513 | #endif | ||
514 | |||
484 | #endif /* __LINUX_LOCKDEP_H */ | 515 | #endif /* __LINUX_LOCKDEP_H */ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index bc6da10ceee0..7a0e5c4f8072 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
144 | /* | 144 | /* |
145 | * NOTE: mutex_trylock() follows the spin_trylock() convention, | 145 | * NOTE: mutex_trylock() follows the spin_trylock() convention, |
146 | * not the down_trylock() convention! | 146 | * not the down_trylock() convention! |
147 | * | ||
148 | * Returns 1 if the mutex has been acquired successfully, and 0 on contention. | ||
147 | */ | 149 | */ |
148 | extern int mutex_trylock(struct mutex *lock); | 150 | extern int mutex_trylock(struct mutex *lock); |
149 | extern void mutex_unlock(struct mutex *lock); | 151 | extern void mutex_unlock(struct mutex *lock); |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 5f89b62e6983..301dda829e37 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
42 | 42 | ||
43 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 43 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
44 | #define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ | 44 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ |
45 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ | 45 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ |
46 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 46 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
47 | 47 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 895dc9c1088c..1168fbcea8d4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,11 +52,15 @@ struct rcu_head { | |||
52 | void (*func)(struct rcu_head *head); | 52 | void (*func)(struct rcu_head *head); |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #ifdef CONFIG_CLASSIC_RCU | 55 | #if defined(CONFIG_CLASSIC_RCU) |
56 | #include <linux/rcuclassic.h> | 56 | #include <linux/rcuclassic.h> |
57 | #else /* #ifdef CONFIG_CLASSIC_RCU */ | 57 | #elif defined(CONFIG_TREE_RCU) |
58 | #include <linux/rcutree.h> | ||
59 | #elif defined(CONFIG_PREEMPT_RCU) | ||
58 | #include <linux/rcupreempt.h> | 60 | #include <linux/rcupreempt.h> |
59 | #endif /* #else #ifdef CONFIG_CLASSIC_RCU */ | 61 | #else |
62 | #error "Unknown RCU implementation specified to kernel configuration" | ||
63 | #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | ||
60 | 64 | ||
61 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 65 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
62 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 66 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 000000000000..d4368b7975c3 --- /dev/null +++ b/include/linux/rcutree.h | |||
@@ -0,0 +1,329 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2008 | ||
19 | * | ||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm | ||
22 | * | ||
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
25 | * | ||
26 | * For detailed explanation of Read-Copy Update mechanism see - | ||
27 | * Documentation/RCU | ||
28 | */ | ||
29 | |||
30 | #ifndef __LINUX_RCUTREE_H | ||
31 | #define __LINUX_RCUTREE_H | ||
32 | |||
33 | #include <linux/cache.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/threads.h> | ||
36 | #include <linux/percpu.h> | ||
37 | #include <linux/cpumask.h> | ||
38 | #include <linux/seqlock.h> | ||
39 | |||
40 | /* | ||
41 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | ||
42 | * In theory, it should be possible to add more levels straightforwardly. | ||
43 | * In practice, this has not been tested, so there is probably some | ||
44 | * bug somewhere. | ||
45 | */ | ||
46 | #define MAX_RCU_LVLS 3 | ||
47 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | ||
48 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | ||
49 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | ||
50 | |||
51 | #if NR_CPUS <= RCU_FANOUT | ||
52 | # define NUM_RCU_LVLS 1 | ||
53 | # define NUM_RCU_LVL_0 1 | ||
54 | # define NUM_RCU_LVL_1 (NR_CPUS) | ||
55 | # define NUM_RCU_LVL_2 0 | ||
56 | # define NUM_RCU_LVL_3 0 | ||
57 | #elif NR_CPUS <= RCU_FANOUT_SQ | ||
58 | # define NUM_RCU_LVLS 2 | ||
59 | # define NUM_RCU_LVL_0 1 | ||
60 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) | ||
61 | # define NUM_RCU_LVL_2 (NR_CPUS) | ||
62 | # define NUM_RCU_LVL_3 0 | ||
63 | #elif NR_CPUS <= RCU_FANOUT_CUBE | ||
64 | # define NUM_RCU_LVLS 3 | ||
65 | # define NUM_RCU_LVL_0 1 | ||
66 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) | ||
67 | # define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) | ||
68 | # define NUM_RCU_LVL_3 NR_CPUS | ||
69 | #else | ||
70 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | ||
71 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | ||
72 | |||
73 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | ||
74 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | ||
75 | |||
76 | /* | ||
77 | * Dynticks per-CPU state. | ||
78 | */ | ||
79 | struct rcu_dynticks { | ||
80 | int dynticks_nesting; /* Track nesting level, sort of. */ | ||
81 | int dynticks; /* Even value for dynticks-idle, else odd. */ | ||
82 | int dynticks_nmi; /* Even value for either dynticks-idle or */ | ||
83 | /* not in nmi handler, else odd. So this */ | ||
84 | /* remains even for nmi from irq handler. */ | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * Definition for node within the RCU grace-period-detection hierarchy. | ||
89 | */ | ||
90 | struct rcu_node { | ||
91 | spinlock_t lock; | ||
92 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | ||
93 | /* order for current grace period to proceed.*/ | ||
94 | unsigned long qsmaskinit; | ||
95 | /* Per-GP initialization for qsmask. */ | ||
96 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | ||
97 | int grplo; /* lowest-numbered CPU or group here. */ | ||
98 | int grphi; /* highest-numbered CPU or group here. */ | ||
99 | u8 grpnum; /* CPU/group number for next level up. */ | ||
100 | u8 level; /* root is at level 0. */ | ||
101 | struct rcu_node *parent; | ||
102 | } ____cacheline_internodealigned_in_smp; | ||
103 | |||
104 | /* Index values for nxttail array in struct rcu_data. */ | ||
105 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | ||
106 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | ||
107 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ | ||
108 | #define RCU_NEXT_TAIL 3 | ||
109 | #define RCU_NEXT_SIZE 4 | ||
110 | |||
111 | /* Per-CPU data for read-copy update. */ | ||
112 | struct rcu_data { | ||
113 | /* 1) quiescent-state and grace-period handling : */ | ||
114 | long completed; /* Track rsp->completed gp number */ | ||
115 | /* in order to detect GP end. */ | ||
116 | long gpnum; /* Highest gp number that this CPU */ | ||
117 | /* is aware of having started. */ | ||
118 | long passed_quiesc_completed; | ||
119 | /* Value of completed at time of qs. */ | ||
120 | bool passed_quiesc; /* User-mode/idle loop etc. */ | ||
121 | bool qs_pending; /* Core waits for quiesc state. */ | ||
122 | bool beenonline; /* CPU online at least once. */ | ||
123 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | ||
124 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | ||
125 | |||
126 | /* 2) batch handling */ | ||
127 | /* | ||
128 | * If nxtlist is not NULL, it is partitioned as follows. | ||
129 | * Any of the partitions might be empty, in which case the | ||
130 | * pointer to that partition will be equal to the pointer for | ||
131 | * the following partition. When the list is empty, all of | ||
132 | * the nxttail elements point to nxtlist, which is NULL. | ||
133 | * | ||
134 | * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): | ||
135 | * Entries that might have arrived after current GP ended | ||
136 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
137 | * Entries known to have arrived before current GP ended | ||
138 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
139 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
140 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | ||
141 | * Entries that batch # <= ->completed | ||
142 | * The grace period for these entries has completed, and | ||
143 | * the other grace-period-completed entries may be moved | ||
144 | * here temporarily in rcu_process_callbacks(). | ||
145 | */ | ||
146 | struct rcu_head *nxtlist; | ||
147 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | ||
148 | long qlen; /* # of queued callbacks */ | ||
149 | long blimit; /* Upper limit on a processed batch */ | ||
150 | |||
151 | #ifdef CONFIG_NO_HZ | ||
152 | /* 3) dynticks interface. */ | ||
153 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | ||
154 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | ||
155 | int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ | ||
156 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
157 | |||
158 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | ||
159 | #ifdef CONFIG_NO_HZ | ||
160 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ | ||
161 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
162 | unsigned long offline_fqs; /* Kicked due to being offline. */ | ||
163 | unsigned long resched_ipi; /* Sent a resched IPI. */ | ||
164 | |||
165 | /* 5) state to allow this CPU to force_quiescent_state on others */ | ||
166 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | ||
167 | long n_rcu_pending_force_qs; /* when to force quiescent states. */ | ||
168 | |||
169 | int cpu; | ||
170 | }; | ||
171 | |||
172 | /* Values for signaled field in struct rcu_state. */ | ||
173 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | ||
174 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | ||
175 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | ||
176 | #ifdef CONFIG_NO_HZ | ||
177 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | ||
178 | #else /* #ifdef CONFIG_NO_HZ */ | ||
179 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | ||
180 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
181 | |||
182 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | ||
183 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
184 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | ||
185 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | ||
186 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | ||
187 | /* to take at least one */ | ||
188 | /* scheduling clock irq */ | ||
189 | /* before ratting on them. */ | ||
190 | |||
191 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
192 | |||
193 | /* | ||
194 | * RCU global state, including node hierarchy. This hierarchy is | ||
195 | * represented in "heap" form in a dense array. The root (first level) | ||
196 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | ||
197 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | ||
198 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | ||
199 | * by ->level[2]). The number of levels is determined by the number of | ||
200 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" | ||
201 | * consisting of a single rcu_node. | ||
202 | */ | ||
203 | struct rcu_state { | ||
204 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ | ||
205 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | ||
206 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | ||
207 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | ||
208 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | ||
209 | |||
210 | /* The following fields are guarded by the root rcu_node's lock. */ | ||
211 | |||
212 | u8 signaled ____cacheline_internodealigned_in_smp; | ||
213 | /* Force QS state. */ | ||
214 | long gpnum; /* Current gp number. */ | ||
215 | long completed; /* # of last completed gp. */ | ||
216 | spinlock_t onofflock; /* exclude on/offline and */ | ||
217 | /* starting new GP. */ | ||
218 | spinlock_t fqslock; /* Only one task forcing */ | ||
219 | /* quiescent states. */ | ||
220 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | ||
221 | /* force_quiescent_state(). */ | ||
222 | unsigned long n_force_qs; /* Number of calls to */ | ||
223 | /* force_quiescent_state(). */ | ||
224 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ | ||
225 | /* due to lock unavailable. */ | ||
226 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | ||
227 | /* due to no GP active. */ | ||
228 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
229 | unsigned long gp_start; /* Time at which GP started, */ | ||
230 | /* but in jiffies. */ | ||
231 | unsigned long jiffies_stall; /* Time at which to check */ | ||
232 | /* for CPU stalls. */ | ||
233 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
234 | #ifdef CONFIG_NO_HZ | ||
235 | long dynticks_completed; /* Value of completed @ snap. */ | ||
236 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
237 | }; | ||
238 | |||
239 | extern struct rcu_state rcu_state; | ||
240 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
241 | |||
242 | extern struct rcu_state rcu_bh_state; | ||
243 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
244 | |||
245 | /* | ||
246 | * Increment the quiescent state counter. | ||
247 | * The counter is a bit degenerated: We do not need to know | ||
248 | * how many quiescent states passed, just if there was at least | ||
249 | * one since the start of the grace period. Thus just a flag. | ||
250 | */ | ||
251 | static inline void rcu_qsctr_inc(int cpu) | ||
252 | { | ||
253 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
254 | rdp->passed_quiesc = 1; | ||
255 | rdp->passed_quiesc_completed = rdp->completed; | ||
256 | } | ||
257 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
258 | { | ||
259 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
260 | rdp->passed_quiesc = 1; | ||
261 | rdp->passed_quiesc_completed = rdp->completed; | ||
262 | } | ||
263 | |||
264 | extern int rcu_pending(int cpu); | ||
265 | extern int rcu_needs_cpu(int cpu); | ||
266 | |||
267 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
268 | extern struct lockdep_map rcu_lock_map; | ||
269 | # define rcu_read_acquire() \ | ||
270 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
271 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
272 | #else | ||
273 | # define rcu_read_acquire() do { } while (0) | ||
274 | # define rcu_read_release() do { } while (0) | ||
275 | #endif | ||
276 | |||
277 | static inline void __rcu_read_lock(void) | ||
278 | { | ||
279 | preempt_disable(); | ||
280 | __acquire(RCU); | ||
281 | rcu_read_acquire(); | ||
282 | } | ||
283 | static inline void __rcu_read_unlock(void) | ||
284 | { | ||
285 | rcu_read_release(); | ||
286 | __release(RCU); | ||
287 | preempt_enable(); | ||
288 | } | ||
289 | static inline void __rcu_read_lock_bh(void) | ||
290 | { | ||
291 | local_bh_disable(); | ||
292 | __acquire(RCU_BH); | ||
293 | rcu_read_acquire(); | ||
294 | } | ||
295 | static inline void __rcu_read_unlock_bh(void) | ||
296 | { | ||
297 | rcu_read_release(); | ||
298 | __release(RCU_BH); | ||
299 | local_bh_enable(); | ||
300 | } | ||
301 | |||
302 | #define __synchronize_sched() synchronize_rcu() | ||
303 | |||
304 | #define call_rcu_sched(head, func) call_rcu(head, func) | ||
305 | |||
306 | static inline void rcu_init_sched(void) | ||
307 | { | ||
308 | } | ||
309 | |||
310 | extern void __rcu_init(void); | ||
311 | extern void rcu_check_callbacks(int cpu, int user); | ||
312 | extern void rcu_restart_cpu(int cpu); | ||
313 | |||
314 | extern long rcu_batches_completed(void); | ||
315 | extern long rcu_batches_completed_bh(void); | ||
316 | |||
317 | #ifdef CONFIG_NO_HZ | ||
318 | void rcu_enter_nohz(void); | ||
319 | void rcu_exit_nohz(void); | ||
320 | #else /* CONFIG_NO_HZ */ | ||
321 | static inline void rcu_enter_nohz(void) | ||
322 | { | ||
323 | } | ||
324 | static inline void rcu_exit_nohz(void) | ||
325 | { | ||
326 | } | ||
327 | #endif /* CONFIG_NO_HZ */ | ||
328 | |||
329 | #endif /* __LINUX_RCUTREE_H */ | ||
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b18ec5533e8c..325af1de0351 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -7,9 +7,31 @@ struct device; | |||
7 | struct dma_attrs; | 7 | struct dma_attrs; |
8 | struct scatterlist; | 8 | struct scatterlist; |
9 | 9 | ||
10 | /* | ||
11 | * Maximum allowable number of contiguous slabs to map, | ||
12 | * must be a power of 2. What is the appropriate value ? | ||
13 | * The complexity of {map,unmap}_single is linearly dependent on this value. | ||
14 | */ | ||
15 | #define IO_TLB_SEGSIZE 128 | ||
16 | |||
17 | |||
18 | /* | ||
19 | * log of the size of each IO TLB slab. The number of slabs is command line | ||
20 | * controllable. | ||
21 | */ | ||
22 | #define IO_TLB_SHIFT 11 | ||
23 | |||
10 | extern void | 24 | extern void |
11 | swiotlb_init(void); | 25 | swiotlb_init(void); |
12 | 26 | ||
27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); | ||
28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | ||
29 | |||
30 | extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); | ||
31 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | ||
32 | |||
33 | extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); | ||
34 | |||
13 | extern void | 35 | extern void |
14 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 36 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
15 | dma_addr_t *dma_handle, gfp_t flags); | 37 | dma_addr_t *dma_handle, gfp_t flags); |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index fec6decfb983..6b58367d145e 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
78 | \ | 78 | \ |
79 | set_fs(KERNEL_DS); \ | 79 | set_fs(KERNEL_DS); \ |
80 | pagefault_disable(); \ | 80 | pagefault_disable(); \ |
81 | ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ | 81 | ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ |
82 | pagefault_enable(); \ | 82 | pagefault_enable(); \ |
83 | set_fs(old_fs); \ | 83 | set_fs(old_fs); \ |
84 | ret; \ | 84 | ret; \ |