aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/gfs2/glock.c83
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--include/linux/lockref.h6
-rw-r--r--lib/lockref.c1
6 files changed, 52 insertions, 49 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c2f41b4d00b9..e66a8009aff1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -31,6 +31,7 @@
31#include <linux/bit_spinlock.h> 31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <linux/list_sort.h> 33#include <linux/list_sort.h>
34#include <linux/lockref.h>
34 35
35#include "gfs2.h" 36#include "gfs2.h"
36#include "incore.h" 37#include "incore.h"
@@ -129,10 +130,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
129 * 130 *
130 */ 131 */
131 132
132void gfs2_glock_hold(struct gfs2_glock *gl) 133static void gfs2_glock_hold(struct gfs2_glock *gl)
133{ 134{
134 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 135 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
135 atomic_inc(&gl->gl_ref); 136 lockref_get(&gl->gl_lockref);
136} 137}
137 138
138/** 139/**
@@ -187,20 +188,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
187} 188}
188 189
189/** 190/**
190 * gfs2_glock_put_nolock() - Decrement reference count on glock
191 * @gl: The glock to put
192 *
193 * This function should only be used if the caller has its own reference
194 * to the glock, in addition to the one it is dropping.
195 */
196
197void gfs2_glock_put_nolock(struct gfs2_glock *gl)
198{
199 if (atomic_dec_and_test(&gl->gl_ref))
200 GLOCK_BUG_ON(gl, 1);
201}
202
203/**
204 * gfs2_glock_put() - Decrement reference count on glock 191 * gfs2_glock_put() - Decrement reference count on glock
205 * @gl: The glock to put 192 * @gl: The glock to put
206 * 193 *
@@ -211,17 +198,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
211 struct gfs2_sbd *sdp = gl->gl_sbd; 198 struct gfs2_sbd *sdp = gl->gl_sbd;
212 struct address_space *mapping = gfs2_glock2aspace(gl); 199 struct address_space *mapping = gfs2_glock2aspace(gl);
213 200
214 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { 201 if (lockref_put_or_lock(&gl->gl_lockref))
215 __gfs2_glock_remove_from_lru(gl); 202 return;
216 spin_unlock(&lru_lock); 203
217 spin_lock_bucket(gl->gl_hash); 204 lockref_mark_dead(&gl->gl_lockref);
218 hlist_bl_del_rcu(&gl->gl_list); 205
219 spin_unlock_bucket(gl->gl_hash); 206 spin_lock(&lru_lock);
220 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 207 __gfs2_glock_remove_from_lru(gl);
221 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 208 spin_unlock(&lru_lock);
222 trace_gfs2_glock_put(gl); 209 spin_unlock(&gl->gl_lockref.lock);
223 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 210 spin_lock_bucket(gl->gl_hash);
224 } 211 hlist_bl_del_rcu(&gl->gl_list);
212 spin_unlock_bucket(gl->gl_hash);
213 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
214 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
215 trace_gfs2_glock_put(gl);
216 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
225} 217}
226 218
227/** 219/**
@@ -244,7 +236,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
244 continue; 236 continue;
245 if (gl->gl_sbd != sdp) 237 if (gl->gl_sbd != sdp)
246 continue; 238 continue;
247 if (atomic_inc_not_zero(&gl->gl_ref)) 239 if (lockref_get_not_dead(&gl->gl_lockref))
248 return gl; 240 return gl;
249 } 241 }
250 242
@@ -396,10 +388,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
396 held2 = (new_state != LM_ST_UNLOCKED); 388 held2 = (new_state != LM_ST_UNLOCKED);
397 389
398 if (held1 != held2) { 390 if (held1 != held2) {
391 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
399 if (held2) 392 if (held2)
400 gfs2_glock_hold(gl); 393 gl->gl_lockref.count++;
401 else 394 else
402 gfs2_glock_put_nolock(gl); 395 gl->gl_lockref.count--;
403 } 396 }
404 if (held1 && held2 && list_empty(&gl->gl_holders)) 397 if (held1 && held2 && list_empty(&gl->gl_holders))
405 clear_bit(GLF_QUEUED, &gl->gl_flags); 398 clear_bit(GLF_QUEUED, &gl->gl_flags);
@@ -626,9 +619,9 @@ out:
626out_sched: 619out_sched:
627 clear_bit(GLF_LOCK, &gl->gl_flags); 620 clear_bit(GLF_LOCK, &gl->gl_flags);
628 smp_mb__after_clear_bit(); 621 smp_mb__after_clear_bit();
629 gfs2_glock_hold(gl); 622 gl->gl_lockref.count++;
630 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 623 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
631 gfs2_glock_put_nolock(gl); 624 gl->gl_lockref.count--;
632 return; 625 return;
633 626
634out_unlock: 627out_unlock:
@@ -754,7 +747,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
754 gl->gl_sbd = sdp; 747 gl->gl_sbd = sdp;
755 gl->gl_flags = 0; 748 gl->gl_flags = 0;
756 gl->gl_name = name; 749 gl->gl_name = name;
757 atomic_set(&gl->gl_ref, 1); 750 gl->gl_lockref.count = 1;
758 gl->gl_state = LM_ST_UNLOCKED; 751 gl->gl_state = LM_ST_UNLOCKED;
759 gl->gl_target = LM_ST_UNLOCKED; 752 gl->gl_target = LM_ST_UNLOCKED;
760 gl->gl_demote_state = LM_ST_EXCLUSIVE; 753 gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1356,10 +1349,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1356 } 1349 }
1357 } 1350 }
1358 1351
1359 spin_unlock(&gl->gl_spin); 1352 gl->gl_lockref.count++;
1360 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1353 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1361 smp_wmb(); 1354 spin_unlock(&gl->gl_spin);
1362 gfs2_glock_hold(gl); 1355
1363 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1356 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1364 gfs2_glock_put(gl); 1357 gfs2_glock_put(gl);
1365} 1358}
@@ -1404,15 +1397,19 @@ __acquires(&lru_lock)
1404 while(!list_empty(list)) { 1397 while(!list_empty(list)) {
1405 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1398 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1406 list_del_init(&gl->gl_lru); 1399 list_del_init(&gl->gl_lru);
1400 if (!spin_trylock(&gl->gl_spin)) {
1401 list_add(&gl->gl_lru, &lru_list);
1402 atomic_inc(&lru_count);
1403 continue;
1404 }
1407 clear_bit(GLF_LRU, &gl->gl_flags); 1405 clear_bit(GLF_LRU, &gl->gl_flags);
1408 gfs2_glock_hold(gl);
1409 spin_unlock(&lru_lock); 1406 spin_unlock(&lru_lock);
1410 spin_lock(&gl->gl_spin); 1407 gl->gl_lockref.count++;
1411 if (demote_ok(gl)) 1408 if (demote_ok(gl))
1412 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1409 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1413 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1410 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1414 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1411 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1415 gfs2_glock_put_nolock(gl); 1412 gl->gl_lockref.count--;
1416 spin_unlock(&gl->gl_spin); 1413 spin_unlock(&gl->gl_spin);
1417 spin_lock(&lru_lock); 1414 spin_lock(&lru_lock);
1418 } 1415 }
@@ -1493,7 +1490,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1493 1490
1494 rcu_read_lock(); 1491 rcu_read_lock();
1495 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { 1492 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1496 if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref)) 1493 if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
1497 examiner(gl); 1494 examiner(gl);
1498 } 1495 }
1499 rcu_read_unlock(); 1496 rcu_read_unlock();
@@ -1746,7 +1743,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1746 state2str(gl->gl_demote_state), dtime, 1743 state2str(gl->gl_demote_state), dtime,
1747 atomic_read(&gl->gl_ail_count), 1744 atomic_read(&gl->gl_ail_count),
1748 atomic_read(&gl->gl_revokes), 1745 atomic_read(&gl->gl_revokes),
1749 atomic_read(&gl->gl_ref), gl->gl_hold_time); 1746 (int)gl->gl_lockref.count, gl->gl_hold_time);
1750 1747
1751 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1748 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1752 error = dump_holder(seq, gh); 1749 error = dump_holder(seq, gh);
@@ -1902,7 +1899,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1902 gi->nhash = 0; 1899 gi->nhash = 0;
1903 } 1900 }
1904 /* Skip entries for other sb and dead entries */ 1901 /* Skip entries for other sb and dead entries */
1905 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0); 1902 } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
1906 1903
1907 return 0; 1904 return 0;
1908} 1905}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 69f66e3d22bf..6647d77366ba 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -181,8 +181,6 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
181extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 181extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
182 const struct gfs2_glock_operations *glops, 182 const struct gfs2_glock_operations *glops,
183 int create, struct gfs2_glock **glp); 183 int create, struct gfs2_glock **glp);
184extern void gfs2_glock_hold(struct gfs2_glock *gl);
185extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
186extern void gfs2_glock_put(struct gfs2_glock *gl); 184extern void gfs2_glock_put(struct gfs2_glock *gl);
187extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 185extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
188 unsigned flags, struct gfs2_holder *gh); 186 unsigned flags, struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index e2e0a90396e7..db908f697139 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
525 525
526 if (gl->gl_demote_state == LM_ST_UNLOCKED && 526 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
527 gl->gl_state == LM_ST_SHARED && ip) { 527 gl->gl_state == LM_ST_SHARED && ip) {
528 gfs2_glock_hold(gl); 528 gl->gl_lockref.count++;
529 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 529 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
530 gfs2_glock_put_nolock(gl); 530 gl->gl_lockref.count--;
531 } 531 }
532} 532}
533 533
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 2ab4f8d8f4c4..bb88e417231f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -21,6 +21,7 @@
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22#include <linux/ktime.h> 22#include <linux/ktime.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/lockref.h>
24 25
25#define DIO_WAIT 0x00000010 26#define DIO_WAIT 0x00000010
26#define DIO_METADATA 0x00000020 27#define DIO_METADATA 0x00000020
@@ -321,9 +322,9 @@ struct gfs2_glock {
321 struct gfs2_sbd *gl_sbd; 322 struct gfs2_sbd *gl_sbd;
322 unsigned long gl_flags; /* GLF_... */ 323 unsigned long gl_flags; /* GLF_... */
323 struct lm_lockname gl_name; 324 struct lm_lockname gl_name;
324 atomic_t gl_ref;
325 325
326 spinlock_t gl_spin; 326 struct lockref gl_lockref;
327#define gl_spin gl_lockref.lock
327 328
328 /* State fields protected by gl_spin */ 329 /* State fields protected by gl_spin */
329 unsigned int gl_state:2, /* Current state */ 330 unsigned int gl_state:2, /* Current state */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index f279ed9a9163..13dfd36a3294 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -36,4 +36,10 @@ extern int lockref_put_or_lock(struct lockref *);
36extern void lockref_mark_dead(struct lockref *); 36extern void lockref_mark_dead(struct lockref *);
37extern int lockref_get_not_dead(struct lockref *); 37extern int lockref_get_not_dead(struct lockref *);
38 38
39/* Must be called under spinlock for reliable results */
40static inline int __lockref_is_dead(const struct lockref *l)
41{
42 return ((int)l->count < 0);
43}
44
39#endif /* __LINUX_LOCKREF_H */ 45#endif /* __LINUX_LOCKREF_H */
diff --git a/lib/lockref.c b/lib/lockref.c
index e2cd2c0a8821..8ff162fe3413 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -136,6 +136,7 @@ void lockref_mark_dead(struct lockref *lockref)
136 assert_spin_locked(&lockref->lock); 136 assert_spin_locked(&lockref->lock);
137 lockref->count = -128; 137 lockref->count = -128;
138} 138}
139EXPORT_SYMBOL(lockref_mark_dead);
139 140
140/** 141/**
141 * lockref_get_not_dead - Increments count unless the ref is dead 142 * lockref_get_not_dead - Increments count unless the ref is dead