diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/bottom_half.h | 1 | ||||
| -rw-r--r-- | include/linux/hardirq.h | 14 | ||||
| -rw-r--r-- | include/linux/lockdep.h | 12 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 10 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 329 | ||||
| -rw-r--r-- | include/linux/swiotlb.h | 22 |
6 files changed, 376 insertions, 12 deletions
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 777dbf695d44..27b1bcffe408 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #define _LINUX_BH_H | 2 | #define _LINUX_BH_H |
| 3 | 3 | ||
| 4 | extern void local_bh_disable(void); | 4 | extern void local_bh_disable(void); |
| 5 | extern void __local_bh_enable(void); | ||
| 6 | extern void _local_bh_enable(void); | 5 | extern void _local_bh_enable(void); |
| 7 | extern void local_bh_enable(void); | 6 | extern void local_bh_enable(void); |
| 8 | extern void local_bh_enable_ip(unsigned long ip); | 7 | extern void local_bh_enable_ip(unsigned long ip); |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 181006cc94a0..9b70b9231693 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -118,13 +118,17 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
| 118 | } | 118 | } |
| 119 | #endif | 119 | #endif |
| 120 | 120 | ||
| 121 | #if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) | 121 | #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) |
| 122 | extern void rcu_irq_enter(void); | 122 | extern void rcu_irq_enter(void); |
| 123 | extern void rcu_irq_exit(void); | 123 | extern void rcu_irq_exit(void); |
| 124 | extern void rcu_nmi_enter(void); | ||
| 125 | extern void rcu_nmi_exit(void); | ||
| 124 | #else | 126 | #else |
| 125 | # define rcu_irq_enter() do { } while (0) | 127 | # define rcu_irq_enter() do { } while (0) |
| 126 | # define rcu_irq_exit() do { } while (0) | 128 | # define rcu_irq_exit() do { } while (0) |
| 127 | #endif /* CONFIG_PREEMPT_RCU */ | 129 | # define rcu_nmi_enter() do { } while (0) |
| 130 | # define rcu_nmi_exit() do { } while (0) | ||
| 131 | #endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ | ||
| 128 | 132 | ||
| 129 | /* | 133 | /* |
| 130 | * It is safe to do non-atomic ops on ->hardirq_context, | 134 | * It is safe to do non-atomic ops on ->hardirq_context, |
| @@ -134,7 +138,6 @@ extern void rcu_irq_exit(void); | |||
| 134 | */ | 138 | */ |
| 135 | #define __irq_enter() \ | 139 | #define __irq_enter() \ |
| 136 | do { \ | 140 | do { \ |
| 137 | rcu_irq_enter(); \ | ||
| 138 | account_system_vtime(current); \ | 141 | account_system_vtime(current); \ |
| 139 | add_preempt_count(HARDIRQ_OFFSET); \ | 142 | add_preempt_count(HARDIRQ_OFFSET); \ |
| 140 | trace_hardirq_enter(); \ | 143 | trace_hardirq_enter(); \ |
| @@ -153,7 +156,6 @@ extern void irq_enter(void); | |||
| 153 | trace_hardirq_exit(); \ | 156 | trace_hardirq_exit(); \ |
| 154 | account_system_vtime(current); \ | 157 | account_system_vtime(current); \ |
| 155 | sub_preempt_count(HARDIRQ_OFFSET); \ | 158 | sub_preempt_count(HARDIRQ_OFFSET); \ |
| 156 | rcu_irq_exit(); \ | ||
| 157 | } while (0) | 159 | } while (0) |
| 158 | 160 | ||
| 159 | /* | 161 | /* |
| @@ -161,7 +163,7 @@ extern void irq_enter(void); | |||
| 161 | */ | 163 | */ |
| 162 | extern void irq_exit(void); | 164 | extern void irq_exit(void); |
| 163 | 165 | ||
| 164 | #define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0) | 166 | #define nmi_enter() do { lockdep_off(); rcu_nmi_enter(); __irq_enter(); } while (0) |
| 165 | #define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) | 167 | #define nmi_exit() do { __irq_exit(); rcu_nmi_exit(); lockdep_on(); } while (0) |
| 166 | 168 | ||
| 167 | #endif /* LINUX_HARDIRQ_H */ | 169 | #endif /* LINUX_HARDIRQ_H */ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 8956daf64abd..37a0361f4685 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -314,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 314 | extern void lock_release(struct lockdep_map *lock, int nested, | 314 | extern void lock_release(struct lockdep_map *lock, int nested, |
| 315 | unsigned long ip); | 315 | unsigned long ip); |
| 316 | 316 | ||
| 317 | extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, | 317 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
| 318 | unsigned long ip); | 318 | struct lock_class_key *key, unsigned int subclass, |
| 319 | unsigned long ip); | ||
| 320 | |||
| 321 | static inline void lock_set_subclass(struct lockdep_map *lock, | ||
| 322 | unsigned int subclass, unsigned long ip) | ||
| 323 | { | ||
| 324 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | ||
| 325 | } | ||
| 319 | 326 | ||
| 320 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 327 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
| 321 | 328 | ||
| @@ -333,6 +340,7 @@ static inline void lockdep_on(void) | |||
| 333 | 340 | ||
| 334 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) | 341 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
| 335 | # define lock_release(l, n, i) do { } while (0) | 342 | # define lock_release(l, n, i) do { } while (0) |
| 343 | # define lock_set_class(l, n, k, s, i) do { } while (0) | ||
| 336 | # define lock_set_subclass(l, s, i) do { } while (0) | 344 | # define lock_set_subclass(l, s, i) do { } while (0) |
| 337 | # define lockdep_init() do { } while (0) | 345 | # define lockdep_init() do { } while (0) |
| 338 | # define lockdep_info() do { } while (0) | 346 | # define lockdep_info() do { } while (0) |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 86f1f5e43e33..bfd289aff576 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -52,11 +52,15 @@ struct rcu_head { | |||
| 52 | void (*func)(struct rcu_head *head); | 52 | void (*func)(struct rcu_head *head); |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | #ifdef CONFIG_CLASSIC_RCU | 55 | #if defined(CONFIG_CLASSIC_RCU) |
| 56 | #include <linux/rcuclassic.h> | 56 | #include <linux/rcuclassic.h> |
| 57 | #else /* #ifdef CONFIG_CLASSIC_RCU */ | 57 | #elif defined(CONFIG_TREE_RCU) |
| 58 | #include <linux/rcutree.h> | ||
| 59 | #elif defined(CONFIG_PREEMPT_RCU) | ||
| 58 | #include <linux/rcupreempt.h> | 60 | #include <linux/rcupreempt.h> |
| 59 | #endif /* #else #ifdef CONFIG_CLASSIC_RCU */ | 61 | #else |
| 62 | #error "Unknown RCU implementation specified to kernel configuration" | ||
| 63 | #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | ||
| 60 | 64 | ||
| 61 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 65 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
| 62 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 66 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 000000000000..d4368b7975c3 --- /dev/null +++ b/include/linux/rcutree.h | |||
| @@ -0,0 +1,329 @@ | |||
| 1 | /* | ||
| 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2008 | ||
| 19 | * | ||
| 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
| 21 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm | ||
| 22 | * | ||
| 23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
| 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
| 25 | * | ||
| 26 | * For detailed explanation of Read-Copy Update mechanism see - | ||
| 27 | * Documentation/RCU | ||
| 28 | */ | ||
| 29 | |||
| 30 | #ifndef __LINUX_RCUTREE_H | ||
| 31 | #define __LINUX_RCUTREE_H | ||
| 32 | |||
| 33 | #include <linux/cache.h> | ||
| 34 | #include <linux/spinlock.h> | ||
| 35 | #include <linux/threads.h> | ||
| 36 | #include <linux/percpu.h> | ||
| 37 | #include <linux/cpumask.h> | ||
| 38 | #include <linux/seqlock.h> | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | ||
| 42 | * In theory, it should be possible to add more levels straightforwardly. | ||
| 43 | * In practice, this has not been tested, so there is probably some | ||
| 44 | * bug somewhere. | ||
| 45 | */ | ||
| 46 | #define MAX_RCU_LVLS 3 | ||
| 47 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | ||
| 48 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | ||
| 49 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | ||
| 50 | |||
| 51 | #if NR_CPUS <= RCU_FANOUT | ||
| 52 | # define NUM_RCU_LVLS 1 | ||
| 53 | # define NUM_RCU_LVL_0 1 | ||
| 54 | # define NUM_RCU_LVL_1 (NR_CPUS) | ||
| 55 | # define NUM_RCU_LVL_2 0 | ||
| 56 | # define NUM_RCU_LVL_3 0 | ||
| 57 | #elif NR_CPUS <= RCU_FANOUT_SQ | ||
| 58 | # define NUM_RCU_LVLS 2 | ||
| 59 | # define NUM_RCU_LVL_0 1 | ||
| 60 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) | ||
| 61 | # define NUM_RCU_LVL_2 (NR_CPUS) | ||
| 62 | # define NUM_RCU_LVL_3 0 | ||
| 63 | #elif NR_CPUS <= RCU_FANOUT_CUBE | ||
| 64 | # define NUM_RCU_LVLS 3 | ||
| 65 | # define NUM_RCU_LVL_0 1 | ||
| 66 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) | ||
| 67 | # define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) | ||
| 68 | # define NUM_RCU_LVL_3 NR_CPUS | ||
| 69 | #else | ||
| 70 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | ||
| 71 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | ||
| 72 | |||
| 73 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | ||
| 74 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | ||
| 75 | |||
| 76 | /* | ||
| 77 | * Dynticks per-CPU state. | ||
| 78 | */ | ||
| 79 | struct rcu_dynticks { | ||
| 80 | int dynticks_nesting; /* Track nesting level, sort of. */ | ||
| 81 | int dynticks; /* Even value for dynticks-idle, else odd. */ | ||
| 82 | int dynticks_nmi; /* Even value for either dynticks-idle or */ | ||
| 83 | /* not in nmi handler, else odd. So this */ | ||
| 84 | /* remains even for nmi from irq handler. */ | ||
| 85 | }; | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Definition for node within the RCU grace-period-detection hierarchy. | ||
| 89 | */ | ||
| 90 | struct rcu_node { | ||
| 91 | spinlock_t lock; | ||
| 92 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | ||
| 93 | /* order for current grace period to proceed.*/ | ||
| 94 | unsigned long qsmaskinit; | ||
| 95 | /* Per-GP initialization for qsmask. */ | ||
| 96 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | ||
| 97 | int grplo; /* lowest-numbered CPU or group here. */ | ||
| 98 | int grphi; /* highest-numbered CPU or group here. */ | ||
| 99 | u8 grpnum; /* CPU/group number for next level up. */ | ||
| 100 | u8 level; /* root is at level 0. */ | ||
| 101 | struct rcu_node *parent; | ||
| 102 | } ____cacheline_internodealigned_in_smp; | ||
| 103 | |||
| 104 | /* Index values for nxttail array in struct rcu_data. */ | ||
| 105 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | ||
| 106 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | ||
| 107 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ | ||
| 108 | #define RCU_NEXT_TAIL 3 | ||
| 109 | #define RCU_NEXT_SIZE 4 | ||
| 110 | |||
| 111 | /* Per-CPU data for read-copy update. */ | ||
| 112 | struct rcu_data { | ||
| 113 | /* 1) quiescent-state and grace-period handling : */ | ||
| 114 | long completed; /* Track rsp->completed gp number */ | ||
| 115 | /* in order to detect GP end. */ | ||
| 116 | long gpnum; /* Highest gp number that this CPU */ | ||
| 117 | /* is aware of having started. */ | ||
| 118 | long passed_quiesc_completed; | ||
| 119 | /* Value of completed at time of qs. */ | ||
| 120 | bool passed_quiesc; /* User-mode/idle loop etc. */ | ||
| 121 | bool qs_pending; /* Core waits for quiesc state. */ | ||
| 122 | bool beenonline; /* CPU online at least once. */ | ||
| 123 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | ||
| 124 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | ||
| 125 | |||
| 126 | /* 2) batch handling */ | ||
| 127 | /* | ||
| 128 | * If nxtlist is not NULL, it is partitioned as follows. | ||
| 129 | * Any of the partitions might be empty, in which case the | ||
| 130 | * pointer to that partition will be equal to the pointer for | ||
| 131 | * the following partition. When the list is empty, all of | ||
| 132 | * the nxttail elements point to nxtlist, which is NULL. | ||
| 133 | * | ||
| 134 | * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): | ||
| 135 | * Entries that might have arrived after current GP ended | ||
| 136 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
| 137 | * Entries known to have arrived before current GP ended | ||
| 138 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
| 139 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
| 140 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | ||
| 141 | * Entries that batch # <= ->completed | ||
| 142 | * The grace period for these entries has completed, and | ||
| 143 | * the other grace-period-completed entries may be moved | ||
| 144 | * here temporarily in rcu_process_callbacks(). | ||
| 145 | */ | ||
| 146 | struct rcu_head *nxtlist; | ||
| 147 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | ||
| 148 | long qlen; /* # of queued callbacks */ | ||
| 149 | long blimit; /* Upper limit on a processed batch */ | ||
| 150 | |||
| 151 | #ifdef CONFIG_NO_HZ | ||
| 152 | /* 3) dynticks interface. */ | ||
| 153 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | ||
| 154 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | ||
| 155 | int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ | ||
| 156 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 157 | |||
| 158 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | ||
| 159 | #ifdef CONFIG_NO_HZ | ||
| 160 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ | ||
| 161 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 162 | unsigned long offline_fqs; /* Kicked due to being offline. */ | ||
| 163 | unsigned long resched_ipi; /* Sent a resched IPI. */ | ||
| 164 | |||
| 165 | /* 5) state to allow this CPU to force_quiescent_state on others */ | ||
| 166 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | ||
| 167 | long n_rcu_pending_force_qs; /* when to force quiescent states. */ | ||
| 168 | |||
| 169 | int cpu; | ||
| 170 | }; | ||
| 171 | |||
| 172 | /* Values for signaled field in struct rcu_state. */ | ||
| 173 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | ||
| 174 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | ||
| 175 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | ||
| 176 | #ifdef CONFIG_NO_HZ | ||
| 177 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | ||
| 178 | #else /* #ifdef CONFIG_NO_HZ */ | ||
| 179 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | ||
| 180 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
| 181 | |||
| 182 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | ||
| 183 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 184 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | ||
| 185 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | ||
| 186 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | ||
| 187 | /* to take at least one */ | ||
| 188 | /* scheduling clock irq */ | ||
| 189 | /* before ratting on them. */ | ||
| 190 | |||
| 191 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 192 | |||
| 193 | /* | ||
| 194 | * RCU global state, including node hierarchy. This hierarchy is | ||
| 195 | * represented in "heap" form in a dense array. The root (first level) | ||
| 196 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | ||
| 197 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | ||
| 198 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | ||
| 199 | * by ->level[2]). The number of levels is determined by the number of | ||
| 200 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" | ||
| 201 | * consisting of a single rcu_node. | ||
| 202 | */ | ||
| 203 | struct rcu_state { | ||
| 204 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ | ||
| 205 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | ||
| 206 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | ||
| 207 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | ||
| 208 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | ||
| 209 | |||
| 210 | /* The following fields are guarded by the root rcu_node's lock. */ | ||
| 211 | |||
| 212 | u8 signaled ____cacheline_internodealigned_in_smp; | ||
| 213 | /* Force QS state. */ | ||
| 214 | long gpnum; /* Current gp number. */ | ||
| 215 | long completed; /* # of last completed gp. */ | ||
| 216 | spinlock_t onofflock; /* exclude on/offline and */ | ||
| 217 | /* starting new GP. */ | ||
| 218 | spinlock_t fqslock; /* Only one task forcing */ | ||
| 219 | /* quiescent states. */ | ||
| 220 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | ||
| 221 | /* force_quiescent_state(). */ | ||
| 222 | unsigned long n_force_qs; /* Number of calls to */ | ||
| 223 | /* force_quiescent_state(). */ | ||
| 224 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ | ||
| 225 | /* due to lock unavailable. */ | ||
| 226 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | ||
| 227 | /* due to no GP active. */ | ||
| 228 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 229 | unsigned long gp_start; /* Time at which GP started, */ | ||
| 230 | /* but in jiffies. */ | ||
| 231 | unsigned long jiffies_stall; /* Time at which to check */ | ||
| 232 | /* for CPU stalls. */ | ||
| 233 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 234 | #ifdef CONFIG_NO_HZ | ||
| 235 | long dynticks_completed; /* Value of completed @ snap. */ | ||
| 236 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 237 | }; | ||
| 238 | |||
| 239 | extern struct rcu_state rcu_state; | ||
| 240 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
| 241 | |||
| 242 | extern struct rcu_state rcu_bh_state; | ||
| 243 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
| 244 | |||
| 245 | /* | ||
| 246 | * Increment the quiescent state counter. | ||
| 247 | * The counter is a bit degenerated: We do not need to know | ||
| 248 | * how many quiescent states passed, just if there was at least | ||
| 249 | * one since the start of the grace period. Thus just a flag. | ||
| 250 | */ | ||
| 251 | static inline void rcu_qsctr_inc(int cpu) | ||
| 252 | { | ||
| 253 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
| 254 | rdp->passed_quiesc = 1; | ||
| 255 | rdp->passed_quiesc_completed = rdp->completed; | ||
| 256 | } | ||
| 257 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
| 258 | { | ||
| 259 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
| 260 | rdp->passed_quiesc = 1; | ||
| 261 | rdp->passed_quiesc_completed = rdp->completed; | ||
| 262 | } | ||
| 263 | |||
| 264 | extern int rcu_pending(int cpu); | ||
| 265 | extern int rcu_needs_cpu(int cpu); | ||
| 266 | |||
| 267 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 268 | extern struct lockdep_map rcu_lock_map; | ||
| 269 | # define rcu_read_acquire() \ | ||
| 270 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
| 271 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
| 272 | #else | ||
| 273 | # define rcu_read_acquire() do { } while (0) | ||
| 274 | # define rcu_read_release() do { } while (0) | ||
| 275 | #endif | ||
| 276 | |||
| 277 | static inline void __rcu_read_lock(void) | ||
| 278 | { | ||
| 279 | preempt_disable(); | ||
| 280 | __acquire(RCU); | ||
| 281 | rcu_read_acquire(); | ||
| 282 | } | ||
| 283 | static inline void __rcu_read_unlock(void) | ||
| 284 | { | ||
| 285 | rcu_read_release(); | ||
| 286 | __release(RCU); | ||
| 287 | preempt_enable(); | ||
| 288 | } | ||
| 289 | static inline void __rcu_read_lock_bh(void) | ||
| 290 | { | ||
| 291 | local_bh_disable(); | ||
| 292 | __acquire(RCU_BH); | ||
| 293 | rcu_read_acquire(); | ||
| 294 | } | ||
| 295 | static inline void __rcu_read_unlock_bh(void) | ||
| 296 | { | ||
| 297 | rcu_read_release(); | ||
| 298 | __release(RCU_BH); | ||
| 299 | local_bh_enable(); | ||
| 300 | } | ||
| 301 | |||
| 302 | #define __synchronize_sched() synchronize_rcu() | ||
| 303 | |||
| 304 | #define call_rcu_sched(head, func) call_rcu(head, func) | ||
| 305 | |||
| 306 | static inline void rcu_init_sched(void) | ||
| 307 | { | ||
| 308 | } | ||
| 309 | |||
| 310 | extern void __rcu_init(void); | ||
| 311 | extern void rcu_check_callbacks(int cpu, int user); | ||
| 312 | extern void rcu_restart_cpu(int cpu); | ||
| 313 | |||
| 314 | extern long rcu_batches_completed(void); | ||
| 315 | extern long rcu_batches_completed_bh(void); | ||
| 316 | |||
| 317 | #ifdef CONFIG_NO_HZ | ||
| 318 | void rcu_enter_nohz(void); | ||
| 319 | void rcu_exit_nohz(void); | ||
| 320 | #else /* CONFIG_NO_HZ */ | ||
| 321 | static inline void rcu_enter_nohz(void) | ||
| 322 | { | ||
| 323 | } | ||
| 324 | static inline void rcu_exit_nohz(void) | ||
| 325 | { | ||
| 326 | } | ||
| 327 | #endif /* CONFIG_NO_HZ */ | ||
| 328 | |||
| 329 | #endif /* __LINUX_RCUTREE_H */ | ||
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b18ec5533e8c..325af1de0351 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
| @@ -7,9 +7,31 @@ struct device; | |||
| 7 | struct dma_attrs; | 7 | struct dma_attrs; |
| 8 | struct scatterlist; | 8 | struct scatterlist; |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * Maximum allowable number of contiguous slabs to map, | ||
| 12 | * must be a power of 2. What is the appropriate value ? | ||
| 13 | * The complexity of {map,unmap}_single is linearly dependent on this value. | ||
| 14 | */ | ||
| 15 | #define IO_TLB_SEGSIZE 128 | ||
| 16 | |||
| 17 | |||
| 18 | /* | ||
| 19 | * log of the size of each IO TLB slab. The number of slabs is command line | ||
| 20 | * controllable. | ||
| 21 | */ | ||
| 22 | #define IO_TLB_SHIFT 11 | ||
| 23 | |||
| 10 | extern void | 24 | extern void |
| 11 | swiotlb_init(void); | 25 | swiotlb_init(void); |
| 12 | 26 | ||
| 27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); | ||
| 28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | ||
| 29 | |||
| 30 | extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); | ||
| 31 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | ||
| 32 | |||
| 33 | extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); | ||
| 34 | |||
| 13 | extern void | 35 | extern void |
| 14 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 36 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
| 15 | dma_addr_t *dma_handle, gfp_t flags); | 37 | dma_addr_t *dma_handle, gfp_t flags); |
