diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /fs/btrfs/locking.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r-- | fs/btrfs/locking.c | 73 |
1 files changed, 11 insertions, 62 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 2a1762c6604..d77b67c4b27 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -33,14 +33,6 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb); | |||
33 | */ | 33 | */ |
34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
35 | { | 35 | { |
36 | if (eb->lock_nested) { | ||
37 | read_lock(&eb->lock); | ||
38 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
39 | read_unlock(&eb->lock); | ||
40 | return; | ||
41 | } | ||
42 | read_unlock(&eb->lock); | ||
43 | } | ||
44 | if (rw == BTRFS_WRITE_LOCK) { | 36 | if (rw == BTRFS_WRITE_LOCK) { |
45 | if (atomic_read(&eb->blocking_writers) == 0) { | 37 | if (atomic_read(&eb->blocking_writers) == 0) { |
46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 38 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
@@ -65,28 +57,18 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
65 | */ | 57 | */ |
66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | 58 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
67 | { | 59 | { |
68 | if (eb->lock_nested) { | ||
69 | read_lock(&eb->lock); | ||
70 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
71 | read_unlock(&eb->lock); | ||
72 | return; | ||
73 | } | ||
74 | read_unlock(&eb->lock); | ||
75 | } | ||
76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { | 60 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | 61 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); |
78 | write_lock(&eb->lock); | 62 | write_lock(&eb->lock); |
79 | WARN_ON(atomic_read(&eb->spinning_writers)); | 63 | WARN_ON(atomic_read(&eb->spinning_writers)); |
80 | atomic_inc(&eb->spinning_writers); | 64 | atomic_inc(&eb->spinning_writers); |
81 | if (atomic_dec_and_test(&eb->blocking_writers) && | 65 | if (atomic_dec_and_test(&eb->blocking_writers)) |
82 | waitqueue_active(&eb->write_lock_wq)) | ||
83 | wake_up(&eb->write_lock_wq); | 66 | wake_up(&eb->write_lock_wq); |
84 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { | 67 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { |
85 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); | 68 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); |
86 | read_lock(&eb->lock); | 69 | read_lock(&eb->lock); |
87 | atomic_inc(&eb->spinning_readers); | 70 | atomic_inc(&eb->spinning_readers); |
88 | if (atomic_dec_and_test(&eb->blocking_readers) && | 71 | if (atomic_dec_and_test(&eb->blocking_readers)) |
89 | waitqueue_active(&eb->read_lock_wq)) | ||
90 | wake_up(&eb->read_lock_wq); | 72 | wake_up(&eb->read_lock_wq); |
91 | } | 73 | } |
92 | return; | 74 | return; |
@@ -99,25 +81,12 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
99 | void btrfs_tree_read_lock(struct extent_buffer *eb) | 81 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
100 | { | 82 | { |
101 | again: | 83 | again: |
102 | read_lock(&eb->lock); | ||
103 | if (atomic_read(&eb->blocking_writers) && | ||
104 | current->pid == eb->lock_owner) { | ||
105 | /* | ||
106 | * This extent is already write-locked by our thread. We allow | ||
107 | * an additional read lock to be added because it's for the same | ||
108 | * thread. btrfs_find_all_roots() depends on this as it may be | ||
109 | * called on a partly (write-)locked tree. | ||
110 | */ | ||
111 | BUG_ON(eb->lock_nested); | ||
112 | eb->lock_nested = 1; | ||
113 | read_unlock(&eb->lock); | ||
114 | return; | ||
115 | } | ||
116 | read_unlock(&eb->lock); | ||
117 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | 84 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
118 | read_lock(&eb->lock); | 85 | read_lock(&eb->lock); |
119 | if (atomic_read(&eb->blocking_writers)) { | 86 | if (atomic_read(&eb->blocking_writers)) { |
120 | read_unlock(&eb->lock); | 87 | read_unlock(&eb->lock); |
88 | wait_event(eb->write_lock_wq, | ||
89 | atomic_read(&eb->blocking_writers) == 0); | ||
121 | goto again; | 90 | goto again; |
122 | } | 91 | } |
123 | atomic_inc(&eb->read_locks); | 92 | atomic_inc(&eb->read_locks); |
@@ -160,7 +129,6 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
160 | } | 129 | } |
161 | atomic_inc(&eb->write_locks); | 130 | atomic_inc(&eb->write_locks); |
162 | atomic_inc(&eb->spinning_writers); | 131 | atomic_inc(&eb->spinning_writers); |
163 | eb->lock_owner = current->pid; | ||
164 | return 1; | 132 | return 1; |
165 | } | 133 | } |
166 | 134 | ||
@@ -169,15 +137,6 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
169 | */ | 137 | */ |
170 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | 138 | void btrfs_tree_read_unlock(struct extent_buffer *eb) |
171 | { | 139 | { |
172 | if (eb->lock_nested) { | ||
173 | read_lock(&eb->lock); | ||
174 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
175 | eb->lock_nested = 0; | ||
176 | read_unlock(&eb->lock); | ||
177 | return; | ||
178 | } | ||
179 | read_unlock(&eb->lock); | ||
180 | } | ||
181 | btrfs_assert_tree_read_locked(eb); | 140 | btrfs_assert_tree_read_locked(eb); |
182 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 141 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
183 | atomic_dec(&eb->spinning_readers); | 142 | atomic_dec(&eb->spinning_readers); |
@@ -190,19 +149,9 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) | |||
190 | */ | 149 | */ |
191 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | 150 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |
192 | { | 151 | { |
193 | if (eb->lock_nested) { | ||
194 | read_lock(&eb->lock); | ||
195 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
196 | eb->lock_nested = 0; | ||
197 | read_unlock(&eb->lock); | ||
198 | return; | ||
199 | } | ||
200 | read_unlock(&eb->lock); | ||
201 | } | ||
202 | btrfs_assert_tree_read_locked(eb); | 152 | btrfs_assert_tree_read_locked(eb); |
203 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 153 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
204 | if (atomic_dec_and_test(&eb->blocking_readers) && | 154 | if (atomic_dec_and_test(&eb->blocking_readers)) |
205 | waitqueue_active(&eb->read_lock_wq)) | ||
206 | wake_up(&eb->read_lock_wq); | 155 | wake_up(&eb->read_lock_wq); |
207 | atomic_dec(&eb->read_locks); | 156 | atomic_dec(&eb->read_locks); |
208 | } | 157 | } |
@@ -211,7 +160,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | |||
211 | * take a spinning write lock. This will wait for both | 160 | * take a spinning write lock. This will wait for both |
212 | * blocking readers or writers | 161 | * blocking readers or writers |
213 | */ | 162 | */ |
214 | void btrfs_tree_lock(struct extent_buffer *eb) | 163 | int btrfs_tree_lock(struct extent_buffer *eb) |
215 | { | 164 | { |
216 | again: | 165 | again: |
217 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); | 166 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); |
@@ -232,13 +181,13 @@ again: | |||
232 | WARN_ON(atomic_read(&eb->spinning_writers)); | 181 | WARN_ON(atomic_read(&eb->spinning_writers)); |
233 | atomic_inc(&eb->spinning_writers); | 182 | atomic_inc(&eb->spinning_writers); |
234 | atomic_inc(&eb->write_locks); | 183 | atomic_inc(&eb->write_locks); |
235 | eb->lock_owner = current->pid; | 184 | return 0; |
236 | } | 185 | } |
237 | 186 | ||
238 | /* | 187 | /* |
239 | * drop a spinning or a blocking write lock. | 188 | * drop a spinning or a blocking write lock. |
240 | */ | 189 | */ |
241 | void btrfs_tree_unlock(struct extent_buffer *eb) | 190 | int btrfs_tree_unlock(struct extent_buffer *eb) |
242 | { | 191 | { |
243 | int blockers = atomic_read(&eb->blocking_writers); | 192 | int blockers = atomic_read(&eb->blocking_writers); |
244 | 193 | ||
@@ -250,14 +199,14 @@ void btrfs_tree_unlock(struct extent_buffer *eb) | |||
250 | if (blockers) { | 199 | if (blockers) { |
251 | WARN_ON(atomic_read(&eb->spinning_writers)); | 200 | WARN_ON(atomic_read(&eb->spinning_writers)); |
252 | atomic_dec(&eb->blocking_writers); | 201 | atomic_dec(&eb->blocking_writers); |
253 | smp_mb(); | 202 | smp_wmb(); |
254 | if (waitqueue_active(&eb->write_lock_wq)) | 203 | wake_up(&eb->write_lock_wq); |
255 | wake_up(&eb->write_lock_wq); | ||
256 | } else { | 204 | } else { |
257 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 205 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
258 | atomic_dec(&eb->spinning_writers); | 206 | atomic_dec(&eb->spinning_writers); |
259 | write_unlock(&eb->lock); | 207 | write_unlock(&eb->lock); |
260 | } | 208 | } |
209 | return 0; | ||
261 | } | 210 | } |
262 | 211 | ||
263 | void btrfs_assert_tree_locked(struct extent_buffer *eb) | 212 | void btrfs_assert_tree_locked(struct extent_buffer *eb) |