diff options
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r-- | fs/btrfs/locking.c | 207 |
1 files changed, 173 insertions, 34 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 39bae7761db6..85506c4a3af7 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -25,64 +25,203 @@ | |||
25 | #include "extent_io.h" | 25 | #include "extent_io.h" |
26 | #include "locking.h" | 26 | #include "locking.h" |
27 | 27 | ||
28 | static inline void spin_nested(struct extent_buffer *eb) | ||
29 | { | ||
30 | spin_lock(&eb->lock); | ||
31 | } | ||
32 | |||
28 | /* | 33 | /* |
29 | * locks the per buffer mutex in an extent buffer. This uses adaptive locks | 34 | * Setting a lock to blocking will drop the spinlock and set the |
30 | * and the spin is not tuned very extensively. The spinning does make a big | 35 | * flag that forces other procs who want the lock to wait. After |
31 | * difference in almost every workload, but spinning for the right amount of | 36 | * this you can safely schedule with the lock held. |
32 | * time needs some help. | ||
33 | * | ||
34 | * In general, we want to spin as long as the lock holder is doing btree | ||
35 | * searches, and we should give up if they are in more expensive code. | ||
36 | */ | 37 | */ |
38 | void btrfs_set_lock_blocking(struct extent_buffer *eb) | ||
39 | { | ||
40 | if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { | ||
41 | set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags); | ||
42 | spin_unlock(&eb->lock); | ||
43 | } | ||
44 | /* exit with the spin lock released and the bit set */ | ||
45 | } | ||
37 | 46 | ||
38 | int btrfs_tree_lock(struct extent_buffer *eb) | 47 | /* |
48 | * clearing the blocking flag will take the spinlock again. | ||
49 | * After this you can't safely schedule | ||
50 | */ | ||
51 | void btrfs_clear_lock_blocking(struct extent_buffer *eb) | ||
39 | { | 52 | { |
40 | int i; | 53 | if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { |
54 | spin_nested(eb); | ||
55 | clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags); | ||
56 | smp_mb__after_clear_bit(); | ||
57 | } | ||
58 | /* exit with the spin lock held */ | ||
59 | } | ||
41 | 60 | ||
42 | if (mutex_trylock(&eb->mutex)) | 61 | /* |
43 | return 0; | 62 | * unfortunately, many of the places that currently set a lock to blocking |
63 | * don't end up blocking for every long, and often they don't block | ||
64 | * at all. For a dbench 50 run, if we don't spin one the blocking bit | ||
65 | * at all, the context switch rate can jump up to 400,000/sec or more. | ||
66 | * | ||
67 | * So, we're still stuck with this crummy spin on the blocking bit, | ||
68 | * at least until the most common causes of the short blocks | ||
69 | * can be dealt with. | ||
70 | */ | ||
71 | static int btrfs_spin_on_block(struct extent_buffer *eb) | ||
72 | { | ||
73 | int i; | ||
44 | for (i = 0; i < 512; i++) { | 74 | for (i = 0; i < 512; i++) { |
45 | cpu_relax(); | 75 | cpu_relax(); |
46 | if (mutex_trylock(&eb->mutex)) | 76 | if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) |
47 | return 0; | 77 | return 1; |
78 | if (need_resched()) | ||
79 | break; | ||
48 | } | 80 | } |
49 | cpu_relax(); | ||
50 | mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb)); | ||
51 | return 0; | 81 | return 0; |
52 | } | 82 | } |
53 | 83 | ||
54 | int btrfs_try_tree_lock(struct extent_buffer *eb) | 84 | /* |
85 | * This is somewhat different from trylock. It will take the | ||
86 | * spinlock but if it finds the lock is set to blocking, it will | ||
87 | * return without the lock held. | ||
88 | * | ||
89 | * returns 1 if it was able to take the lock and zero otherwise | ||
90 | * | ||
91 | * After this call, scheduling is not safe without first calling | ||
92 | * btrfs_set_lock_blocking() | ||
93 | */ | ||
94 | int btrfs_try_spin_lock(struct extent_buffer *eb) | ||
55 | { | 95 | { |
56 | return mutex_trylock(&eb->mutex); | 96 | int i; |
97 | |||
98 | spin_nested(eb); | ||
99 | if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) | ||
100 | return 1; | ||
101 | spin_unlock(&eb->lock); | ||
102 | |||
103 | /* spin for a bit on the BLOCKING flag */ | ||
104 | for (i = 0; i < 2; i++) { | ||
105 | if (!btrfs_spin_on_block(eb)) | ||
106 | break; | ||
107 | |||
108 | spin_nested(eb); | ||
109 | if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) | ||
110 | return 1; | ||
111 | spin_unlock(&eb->lock); | ||
112 | } | ||
113 | return 0; | ||
57 | } | 114 | } |
58 | 115 | ||
59 | int btrfs_tree_unlock(struct extent_buffer *eb) | 116 | /* |
117 | * the autoremove wake function will return 0 if it tried to wake up | ||
118 | * a process that was already awake, which means that process won't | ||
119 | * count as an exclusive wakeup. The waitq code will continue waking | ||
120 | * procs until it finds one that was actually sleeping. | ||
121 | * | ||
122 | * For btrfs, this isn't quite what we want. We want a single proc | ||
123 | * to be notified that the lock is ready for taking. If that proc | ||
124 | * already happen to be awake, great, it will loop around and try for | ||
125 | * the lock. | ||
126 | * | ||
127 | * So, btrfs_wake_function always returns 1, even when the proc that we | ||
128 | * tried to wake up was already awake. | ||
129 | */ | ||
130 | static int btrfs_wake_function(wait_queue_t *wait, unsigned mode, | ||
131 | int sync, void *key) | ||
60 | { | 132 | { |
61 | mutex_unlock(&eb->mutex); | 133 | autoremove_wake_function(wait, mode, sync, key); |
62 | return 0; | 134 | return 1; |
63 | } | 135 | } |
64 | 136 | ||
65 | int btrfs_tree_locked(struct extent_buffer *eb) | 137 | /* |
138 | * returns with the extent buffer spinlocked. | ||
139 | * | ||
140 | * This will spin and/or wait as required to take the lock, and then | ||
141 | * return with the spinlock held. | ||
142 | * | ||
143 | * After this call, scheduling is not safe without first calling | ||
144 | * btrfs_set_lock_blocking() | ||
145 | */ | ||
146 | int btrfs_tree_lock(struct extent_buffer *eb) | ||
66 | { | 147 | { |
67 | return mutex_is_locked(&eb->mutex); | 148 | DEFINE_WAIT(wait); |
149 | wait.func = btrfs_wake_function; | ||
150 | |||
151 | while(1) { | ||
152 | spin_nested(eb); | ||
153 | |||
154 | /* nobody is blocking, exit with the spinlock held */ | ||
155 | if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) | ||
156 | return 0; | ||
157 | |||
158 | /* | ||
159 | * we have the spinlock, but the real owner is blocking. | ||
160 | * wait for them | ||
161 | */ | ||
162 | spin_unlock(&eb->lock); | ||
163 | |||
164 | /* | ||
165 | * spin for a bit, and if the blocking flag goes away, | ||
166 | * loop around | ||
167 | */ | ||
168 | if (btrfs_spin_on_block(eb)) | ||
169 | continue; | ||
170 | |||
171 | prepare_to_wait_exclusive(&eb->lock_wq, &wait, | ||
172 | TASK_UNINTERRUPTIBLE); | ||
173 | |||
174 | if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) | ||
175 | schedule(); | ||
176 | |||
177 | finish_wait(&eb->lock_wq, &wait); | ||
178 | } | ||
179 | return 0; | ||
68 | } | 180 | } |
69 | 181 | ||
70 | /* | 182 | /* |
71 | * btrfs_search_slot uses this to decide if it should drop its locks | 183 | * Very quick trylock, this does not spin or schedule. It returns |
72 | * before doing something expensive like allocating free blocks for cow. | 184 | * 1 with the spinlock held if it was able to take the lock, or it |
185 | * returns zero if it was unable to take the lock. | ||
186 | * | ||
187 | * After this call, scheduling is not safe without first calling | ||
188 | * btrfs_set_lock_blocking() | ||
73 | */ | 189 | */ |
74 | int btrfs_path_lock_waiting(struct btrfs_path *path, int level) | 190 | int btrfs_try_tree_lock(struct extent_buffer *eb) |
75 | { | 191 | { |
76 | int i; | 192 | if (spin_trylock(&eb->lock)) { |
77 | struct extent_buffer *eb; | 193 | if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { |
78 | for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) { | 194 | /* |
79 | eb = path->nodes[i]; | 195 | * we've got the spinlock, but the real owner is |
80 | if (!eb) | 196 | * blocking. Drop the spinlock and return failure |
81 | break; | 197 | */ |
82 | smp_mb(); | 198 | spin_unlock(&eb->lock); |
83 | if (!list_empty(&eb->mutex.wait_list)) | 199 | return 0; |
84 | return 1; | 200 | } |
201 | return 1; | ||
85 | } | 202 | } |
203 | /* someone else has the spinlock giveup */ | ||
86 | return 0; | 204 | return 0; |
87 | } | 205 | } |
88 | 206 | ||
207 | int btrfs_tree_unlock(struct extent_buffer *eb) | ||
208 | { | ||
209 | /* | ||
210 | * if we were a blocking owner, we don't have the spinlock held | ||
211 | * just clear the bit and look for waiters | ||
212 | */ | ||
213 | if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) | ||
214 | smp_mb__after_clear_bit(); | ||
215 | else | ||
216 | spin_unlock(&eb->lock); | ||
217 | |||
218 | if (waitqueue_active(&eb->lock_wq)) | ||
219 | wake_up(&eb->lock_wq); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | int btrfs_tree_locked(struct extent_buffer *eb) | ||
224 | { | ||
225 | return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) || | ||
226 | spin_is_locked(&eb->lock); | ||
227 | } | ||