diff options
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r-- | fs/btrfs/locking.c | 53 |
1 files changed, 51 insertions, 2 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index d77b67c4b275..5e178d8f7167 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -33,6 +33,14 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb); | |||
33 | */ | 33 | */ |
34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
35 | { | 35 | { |
36 | if (eb->lock_nested) { | ||
37 | read_lock(&eb->lock); | ||
38 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
39 | read_unlock(&eb->lock); | ||
40 | return; | ||
41 | } | ||
42 | read_unlock(&eb->lock); | ||
43 | } | ||
36 | if (rw == BTRFS_WRITE_LOCK) { | 44 | if (rw == BTRFS_WRITE_LOCK) { |
37 | if (atomic_read(&eb->blocking_writers) == 0) { | 45 | if (atomic_read(&eb->blocking_writers) == 0) { |
38 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
@@ -57,6 +65,14 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
57 | */ | 65 | */ |
58 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
59 | { | 67 | { |
68 | if (eb->lock_nested) { | ||
69 | read_lock(&eb->lock); | ||
70 | if (&eb->lock_nested && current->pid == eb->lock_owner) { | ||
71 | read_unlock(&eb->lock); | ||
72 | return; | ||
73 | } | ||
74 | read_unlock(&eb->lock); | ||
75 | } | ||
60 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { | 76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
61 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | 77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); |
62 | write_lock(&eb->lock); | 78 | write_lock(&eb->lock); |
@@ -81,12 +97,25 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
81 | void btrfs_tree_read_lock(struct extent_buffer *eb) | 97 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
82 | { | 98 | { |
83 | again: | 99 | again: |
100 | read_lock(&eb->lock); | ||
101 | if (atomic_read(&eb->blocking_writers) && | ||
102 | current->pid == eb->lock_owner) { | ||
103 | /* | ||
104 | * This extent is already write-locked by our thread. We allow | ||
105 | * an additional read lock to be added because it's for the same | ||
106 | * thread. btrfs_find_all_roots() depends on this as it may be | ||
107 | * called on a partly (write-)locked tree. | ||
108 | */ | ||
109 | BUG_ON(eb->lock_nested); | ||
110 | eb->lock_nested = 1; | ||
111 | read_unlock(&eb->lock); | ||
112 | return; | ||
113 | } | ||
114 | read_unlock(&eb->lock); | ||
84 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | 115 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
85 | read_lock(&eb->lock); | 116 | read_lock(&eb->lock); |
86 | if (atomic_read(&eb->blocking_writers)) { | 117 | if (atomic_read(&eb->blocking_writers)) { |
87 | read_unlock(&eb->lock); | 118 | read_unlock(&eb->lock); |
88 | wait_event(eb->write_lock_wq, | ||
89 | atomic_read(&eb->blocking_writers) == 0); | ||
90 | goto again; | 119 | goto again; |
91 | } | 120 | } |
92 | atomic_inc(&eb->read_locks); | 121 | atomic_inc(&eb->read_locks); |
@@ -129,6 +158,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
129 | } | 158 | } |
130 | atomic_inc(&eb->write_locks); | 159 | atomic_inc(&eb->write_locks); |
131 | atomic_inc(&eb->spinning_writers); | 160 | atomic_inc(&eb->spinning_writers); |
161 | eb->lock_owner = current->pid; | ||
132 | return 1; | 162 | return 1; |
133 | } | 163 | } |
134 | 164 | ||
@@ -137,6 +167,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
137 | */ | 167 | */ |
138 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | 168 | void btrfs_tree_read_unlock(struct extent_buffer *eb) |
139 | { | 169 | { |
170 | if (eb->lock_nested) { | ||
171 | read_lock(&eb->lock); | ||
172 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
173 | eb->lock_nested = 0; | ||
174 | read_unlock(&eb->lock); | ||
175 | return; | ||
176 | } | ||
177 | read_unlock(&eb->lock); | ||
178 | } | ||
140 | btrfs_assert_tree_read_locked(eb); | 179 | btrfs_assert_tree_read_locked(eb); |
141 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 180 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
142 | atomic_dec(&eb->spinning_readers); | 181 | atomic_dec(&eb->spinning_readers); |
@@ -149,6 +188,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) | |||
149 | */ | 188 | */ |
150 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | 189 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |
151 | { | 190 | { |
191 | if (eb->lock_nested) { | ||
192 | read_lock(&eb->lock); | ||
193 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
194 | eb->lock_nested = 0; | ||
195 | read_unlock(&eb->lock); | ||
196 | return; | ||
197 | } | ||
198 | read_unlock(&eb->lock); | ||
199 | } | ||
152 | btrfs_assert_tree_read_locked(eb); | 200 | btrfs_assert_tree_read_locked(eb); |
153 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 201 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
154 | if (atomic_dec_and_test(&eb->blocking_readers)) | 202 | if (atomic_dec_and_test(&eb->blocking_readers)) |
@@ -181,6 +229,7 @@ again: | |||
181 | WARN_ON(atomic_read(&eb->spinning_writers)); | 229 | WARN_ON(atomic_read(&eb->spinning_writers)); |
182 | atomic_inc(&eb->spinning_writers); | 230 | atomic_inc(&eb->spinning_writers); |
183 | atomic_inc(&eb->write_locks); | 231 | atomic_inc(&eb->write_locks); |
232 | eb->lock_owner = current->pid; | ||
184 | return 0; | 233 | return 0; |
185 | } | 234 | } |
186 | 235 | ||