diff options
author | Arne Jansen <sensille@gmx.net> | 2011-09-13 04:55:48 -0400 |
---|---|---|
committer | Jan Schmidt <list.btrfs@jan-o-sch.net> | 2012-01-04 10:12:29 -0500 |
commit | 5b25f70f4200766355cdabda604e131d2fb6010d (patch) | |
tree | 20749d5e90763e8c1746d36b00c5beec5e3660b9 /fs/btrfs | |
parent | eebe063b7f916087cd5c61de57b20a3a30894a96 (diff) |
Btrfs: add nested locking mode for paths
This patch adds the possibilty to read-lock an extent even if it is already
write-locked from the same thread. btrfs_find_all_roots() needs this
capability.
Signed-off-by: Arne Jansen <sensille@gmx.net>
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/extent_io.c | 1 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 2 | ||||
-rw-r--r-- | fs/btrfs/locking.c | 53 |
3 files changed, 54 insertions, 2 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index be1bf627a14b..dd8d140eb27b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -3571,6 +3571,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
3571 | atomic_set(&eb->blocking_writers, 0); | 3571 | atomic_set(&eb->blocking_writers, 0); |
3572 | atomic_set(&eb->spinning_readers, 0); | 3572 | atomic_set(&eb->spinning_readers, 0); |
3573 | atomic_set(&eb->spinning_writers, 0); | 3573 | atomic_set(&eb->spinning_writers, 0); |
3574 | eb->lock_nested = 0; | ||
3574 | init_waitqueue_head(&eb->write_lock_wq); | 3575 | init_waitqueue_head(&eb->write_lock_wq); |
3575 | init_waitqueue_head(&eb->read_lock_wq); | 3576 | init_waitqueue_head(&eb->read_lock_wq); |
3576 | 3577 | ||
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7604c3001322..bc6a042cb6fc 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -129,6 +129,7 @@ struct extent_buffer { | |||
129 | struct list_head leak_list; | 129 | struct list_head leak_list; |
130 | struct rcu_head rcu_head; | 130 | struct rcu_head rcu_head; |
131 | atomic_t refs; | 131 | atomic_t refs; |
132 | pid_t lock_owner; | ||
132 | 133 | ||
133 | /* count of read lock holders on the extent buffer */ | 134 | /* count of read lock holders on the extent buffer */ |
134 | atomic_t write_locks; | 135 | atomic_t write_locks; |
@@ -137,6 +138,7 @@ struct extent_buffer { | |||
137 | atomic_t blocking_readers; | 138 | atomic_t blocking_readers; |
138 | atomic_t spinning_readers; | 139 | atomic_t spinning_readers; |
139 | atomic_t spinning_writers; | 140 | atomic_t spinning_writers; |
141 | int lock_nested; | ||
140 | 142 | ||
141 | /* protects write locks */ | 143 | /* protects write locks */ |
142 | rwlock_t lock; | 144 | rwlock_t lock; |
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index d77b67c4b275..5e178d8f7167 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -33,6 +33,14 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb); | |||
33 | */ | 33 | */ |
34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
35 | { | 35 | { |
36 | if (eb->lock_nested) { | ||
37 | read_lock(&eb->lock); | ||
38 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
39 | read_unlock(&eb->lock); | ||
40 | return; | ||
41 | } | ||
42 | read_unlock(&eb->lock); | ||
43 | } | ||
36 | if (rw == BTRFS_WRITE_LOCK) { | 44 | if (rw == BTRFS_WRITE_LOCK) { |
37 | if (atomic_read(&eb->blocking_writers) == 0) { | 45 | if (atomic_read(&eb->blocking_writers) == 0) { |
38 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
@@ -57,6 +65,14 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
57 | */ | 65 | */ |
58 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
59 | { | 67 | { |
68 | if (eb->lock_nested) { | ||
69 | read_lock(&eb->lock); | ||
70 | if (&eb->lock_nested && current->pid == eb->lock_owner) { | ||
71 | read_unlock(&eb->lock); | ||
72 | return; | ||
73 | } | ||
74 | read_unlock(&eb->lock); | ||
75 | } | ||
60 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { | 76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
61 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | 77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); |
62 | write_lock(&eb->lock); | 78 | write_lock(&eb->lock); |
@@ -81,12 +97,25 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
81 | void btrfs_tree_read_lock(struct extent_buffer *eb) | 97 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
82 | { | 98 | { |
83 | again: | 99 | again: |
100 | read_lock(&eb->lock); | ||
101 | if (atomic_read(&eb->blocking_writers) && | ||
102 | current->pid == eb->lock_owner) { | ||
103 | /* | ||
104 | * This extent is already write-locked by our thread. We allow | ||
105 | * an additional read lock to be added because it's for the same | ||
106 | * thread. btrfs_find_all_roots() depends on this as it may be | ||
107 | * called on a partly (write-)locked tree. | ||
108 | */ | ||
109 | BUG_ON(eb->lock_nested); | ||
110 | eb->lock_nested = 1; | ||
111 | read_unlock(&eb->lock); | ||
112 | return; | ||
113 | } | ||
114 | read_unlock(&eb->lock); | ||
84 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | 115 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
85 | read_lock(&eb->lock); | 116 | read_lock(&eb->lock); |
86 | if (atomic_read(&eb->blocking_writers)) { | 117 | if (atomic_read(&eb->blocking_writers)) { |
87 | read_unlock(&eb->lock); | 118 | read_unlock(&eb->lock); |
88 | wait_event(eb->write_lock_wq, | ||
89 | atomic_read(&eb->blocking_writers) == 0); | ||
90 | goto again; | 119 | goto again; |
91 | } | 120 | } |
92 | atomic_inc(&eb->read_locks); | 121 | atomic_inc(&eb->read_locks); |
@@ -129,6 +158,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
129 | } | 158 | } |
130 | atomic_inc(&eb->write_locks); | 159 | atomic_inc(&eb->write_locks); |
131 | atomic_inc(&eb->spinning_writers); | 160 | atomic_inc(&eb->spinning_writers); |
161 | eb->lock_owner = current->pid; | ||
132 | return 1; | 162 | return 1; |
133 | } | 163 | } |
134 | 164 | ||
@@ -137,6 +167,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
137 | */ | 167 | */ |
138 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | 168 | void btrfs_tree_read_unlock(struct extent_buffer *eb) |
139 | { | 169 | { |
170 | if (eb->lock_nested) { | ||
171 | read_lock(&eb->lock); | ||
172 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
173 | eb->lock_nested = 0; | ||
174 | read_unlock(&eb->lock); | ||
175 | return; | ||
176 | } | ||
177 | read_unlock(&eb->lock); | ||
178 | } | ||
140 | btrfs_assert_tree_read_locked(eb); | 179 | btrfs_assert_tree_read_locked(eb); |
141 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 180 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
142 | atomic_dec(&eb->spinning_readers); | 181 | atomic_dec(&eb->spinning_readers); |
@@ -149,6 +188,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) | |||
149 | */ | 188 | */ |
150 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | 189 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |
151 | { | 190 | { |
191 | if (eb->lock_nested) { | ||
192 | read_lock(&eb->lock); | ||
193 | if (eb->lock_nested && current->pid == eb->lock_owner) { | ||
194 | eb->lock_nested = 0; | ||
195 | read_unlock(&eb->lock); | ||
196 | return; | ||
197 | } | ||
198 | read_unlock(&eb->lock); | ||
199 | } | ||
152 | btrfs_assert_tree_read_locked(eb); | 200 | btrfs_assert_tree_read_locked(eb); |
153 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 201 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
154 | if (atomic_dec_and_test(&eb->blocking_readers)) | 202 | if (atomic_dec_and_test(&eb->blocking_readers)) |
@@ -181,6 +229,7 @@ again: | |||
181 | WARN_ON(atomic_read(&eb->spinning_writers)); | 229 | WARN_ON(atomic_read(&eb->spinning_writers)); |
182 | atomic_inc(&eb->spinning_writers); | 230 | atomic_inc(&eb->spinning_writers); |
183 | atomic_inc(&eb->write_locks); | 231 | atomic_inc(&eb->write_locks); |
232 | eb->lock_owner = current->pid; | ||
184 | return 0; | 233 | return 0; |
185 | } | 234 | } |
186 | 235 | ||