aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /fs/gfs2/glock.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c577
1 files changed, 248 insertions, 329 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9adf8f924e08..1c1336e7b3b2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -26,6 +26,9 @@
26#include <linux/freezer.h> 26#include <linux/freezer.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/jiffies.h> 28#include <linux/jiffies.h>
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
29 32
30#include "gfs2.h" 33#include "gfs2.h"
31#include "incore.h" 34#include "incore.h"
@@ -41,10 +44,6 @@
41#define CREATE_TRACE_POINTS 44#define CREATE_TRACE_POINTS
42#include "trace_gfs2.h" 45#include "trace_gfs2.h"
43 46
44struct gfs2_gl_hash_bucket {
45 struct hlist_head hb_list;
46};
47
48struct gfs2_glock_iter { 47struct gfs2_glock_iter {
49 int hash; /* hash bucket index */ 48 int hash; /* hash bucket index */
50 struct gfs2_sbd *sdp; /* incore superblock */ 49 struct gfs2_sbd *sdp; /* incore superblock */
@@ -54,7 +53,6 @@ struct gfs2_glock_iter {
54 53
55typedef void (*glock_examiner) (struct gfs2_glock * gl); 54typedef void (*glock_examiner) (struct gfs2_glock * gl);
56 55
57static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); 56static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) 57#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 58static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
@@ -70,57 +68,9 @@ static DEFINE_SPINLOCK(lru_lock);
70#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 68#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
71#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 69#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
72 70
73static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; 71static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74static struct dentry *gfs2_root; 72static struct dentry *gfs2_root;
75 73
76/*
77 * Despite what you might think, the numbers below are not arbitrary :-)
78 * They are taken from the ipv4 routing hash code, which is well tested
79 * and thus should be nearly optimal. Later on we might tweek the numbers
80 * but for now this should be fine.
81 *
82 * The reason for putting the locks in a separate array from the list heads
83 * is that we can have fewer locks than list heads and save memory. We use
84 * the same hash function for both, but with a different hash mask.
85 */
86#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
87 defined(CONFIG_PROVE_LOCKING)
88
89#ifdef CONFIG_LOCKDEP
90# define GL_HASH_LOCK_SZ 256
91#else
92# if NR_CPUS >= 32
93# define GL_HASH_LOCK_SZ 4096
94# elif NR_CPUS >= 16
95# define GL_HASH_LOCK_SZ 2048
96# elif NR_CPUS >= 8
97# define GL_HASH_LOCK_SZ 1024
98# elif NR_CPUS >= 4
99# define GL_HASH_LOCK_SZ 512
100# else
101# define GL_HASH_LOCK_SZ 256
102# endif
103#endif
104
105/* We never want more locks than chains */
106#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
107# undef GL_HASH_LOCK_SZ
108# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
109#endif
110
111static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
112
113static inline rwlock_t *gl_lock_addr(unsigned int x)
114{
115 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
116}
117#else /* not SMP, so no spinlocks required */
118static inline rwlock_t *gl_lock_addr(unsigned int x)
119{
120 return NULL;
121}
122#endif
123
124/** 74/**
125 * gl_hash() - Turn glock number into hash bucket number 75 * gl_hash() - Turn glock number into hash bucket number
126 * @lock: The glock number 76 * @lock: The glock number
@@ -141,25 +91,33 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
141 return h; 91 return h;
142} 92}
143 93
144/** 94static inline void spin_lock_bucket(unsigned int hash)
145 * glock_free() - Perform a few checks and then release struct gfs2_glock 95{
146 * @gl: The glock to release 96 hlist_bl_lock(&gl_hash_table[hash]);
147 * 97}
148 * Also calls lock module to release its internal structure for this glock. 98
149 * 99static inline void spin_unlock_bucket(unsigned int hash)
150 */ 100{
101 hlist_bl_unlock(&gl_hash_table[hash]);
102}
103
104static void gfs2_glock_dealloc(struct rcu_head *rcu)
105{
106 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
107
108 if (gl->gl_ops->go_flags & GLOF_ASPACE)
109 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 else
111 kmem_cache_free(gfs2_glock_cachep, gl);
112}
151 113
152static void glock_free(struct gfs2_glock *gl) 114void gfs2_glock_free(struct gfs2_glock *gl)
153{ 115{
154 struct gfs2_sbd *sdp = gl->gl_sbd; 116 struct gfs2_sbd *sdp = gl->gl_sbd;
155 struct address_space *mapping = gfs2_glock2aspace(gl);
156 struct kmem_cache *cachep = gfs2_glock_cachep;
157 117
158 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 118 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
159 trace_gfs2_glock_put(gl); 119 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
160 if (mapping) 120 wake_up(&sdp->sd_glock_wait);
161 cachep = gfs2_glock_aspace_cachep;
162 sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
163} 121}
164 122
165/** 123/**
@@ -194,28 +152,47 @@ static int demote_ok(const struct gfs2_glock *gl)
194 return 1; 152 return 1;
195} 153}
196 154
197/**
198 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
199 * @gl: the glock
200 *
201 */
202 155
203static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 156void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
204{ 157{
205 int may_reclaim;
206 may_reclaim = (demote_ok(gl) &&
207 (atomic_read(&gl->gl_ref) == 1 ||
208 (gl->gl_name.ln_type == LM_TYPE_INODE &&
209 atomic_read(&gl->gl_ref) <= 2)));
210 spin_lock(&lru_lock); 158 spin_lock(&lru_lock);
211 if (list_empty(&gl->gl_lru) && may_reclaim) { 159
212 list_add_tail(&gl->gl_lru, &lru_list); 160 if (!list_empty(&gl->gl_lru))
161 list_del_init(&gl->gl_lru);
162 else
213 atomic_inc(&lru_count); 163 atomic_inc(&lru_count);
164
165 list_add_tail(&gl->gl_lru, &lru_list);
166 set_bit(GLF_LRU, &gl->gl_flags);
167 spin_unlock(&lru_lock);
168}
169
170static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
171{
172 spin_lock(&lru_lock);
173 if (!list_empty(&gl->gl_lru)) {
174 list_del_init(&gl->gl_lru);
175 atomic_dec(&lru_count);
176 clear_bit(GLF_LRU, &gl->gl_flags);
214 } 177 }
215 spin_unlock(&lru_lock); 178 spin_unlock(&lru_lock);
216} 179}
217 180
218/** 181/**
182 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
183 * @gl: the glock
184 *
185 * If the glock is demotable, then we add it (or move it) to the end
186 * of the glock LRU list.
187 */
188
189static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
190{
191 if (demote_ok(gl))
192 gfs2_glock_add_to_lru(gl);
193}
194
195/**
219 * gfs2_glock_put_nolock() - Decrement reference count on glock 196 * gfs2_glock_put_nolock() - Decrement reference count on glock
220 * @gl: The glock to put 197 * @gl: The glock to put
221 * 198 *
@@ -227,7 +204,6 @@ void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227{ 204{
228 if (atomic_dec_and_test(&gl->gl_ref)) 205 if (atomic_dec_and_test(&gl->gl_ref))
229 GLOCK_BUG_ON(gl, 1); 206 GLOCK_BUG_ON(gl, 1);
230 gfs2_glock_schedule_for_reclaim(gl);
231} 207}
232 208
233/** 209/**
@@ -236,30 +212,21 @@ void gfs2_glock_put_nolock(struct gfs2_glock *gl)
236 * 212 *
237 */ 213 */
238 214
239int gfs2_glock_put(struct gfs2_glock *gl) 215void gfs2_glock_put(struct gfs2_glock *gl)
240{ 216{
241 int rv = 0; 217 struct gfs2_sbd *sdp = gl->gl_sbd;
218 struct address_space *mapping = gfs2_glock2aspace(gl);
242 219
243 write_lock(gl_lock_addr(gl->gl_hash)); 220 if (atomic_dec_and_test(&gl->gl_ref)) {
244 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { 221 spin_lock_bucket(gl->gl_hash);
245 hlist_del(&gl->gl_list); 222 hlist_bl_del_rcu(&gl->gl_list);
246 if (!list_empty(&gl->gl_lru)) { 223 spin_unlock_bucket(gl->gl_hash);
247 list_del_init(&gl->gl_lru); 224 gfs2_glock_remove_from_lru(gl);
248 atomic_dec(&lru_count);
249 }
250 spin_unlock(&lru_lock);
251 write_unlock(gl_lock_addr(gl->gl_hash));
252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 225 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
253 glock_free(gl); 226 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
254 rv = 1; 227 trace_gfs2_glock_put(gl);
255 goto out; 228 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
256 } 229 }
257 spin_lock(&gl->gl_spin);
258 gfs2_glock_schedule_for_reclaim(gl);
259 spin_unlock(&gl->gl_spin);
260 write_unlock(gl_lock_addr(gl->gl_hash));
261out:
262 return rv;
263} 230}
264 231
265/** 232/**
@@ -275,17 +242,15 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
275 const struct lm_lockname *name) 242 const struct lm_lockname *name)
276{ 243{
277 struct gfs2_glock *gl; 244 struct gfs2_glock *gl;
278 struct hlist_node *h; 245 struct hlist_bl_node *h;
279 246
280 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) { 247 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
281 if (!lm_name_equal(&gl->gl_name, name)) 248 if (!lm_name_equal(&gl->gl_name, name))
282 continue; 249 continue;
283 if (gl->gl_sbd != sdp) 250 if (gl->gl_sbd != sdp)
284 continue; 251 continue;
285 252 if (atomic_inc_not_zero(&gl->gl_ref))
286 atomic_inc(&gl->gl_ref); 253 return gl;
287
288 return gl;
289 } 254 }
290 255
291 return NULL; 256 return NULL;
@@ -441,6 +406,8 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
441 else 406 else
442 gfs2_glock_put_nolock(gl); 407 gfs2_glock_put_nolock(gl);
443 } 408 }
409 if (held1 && held2 && list_empty(&gl->gl_holders))
410 clear_bit(GLF_QUEUED, &gl->gl_flags);
444 411
445 gl->gl_state = new_state; 412 gl->gl_state = new_state;
446 gl->gl_tchange = jiffies; 413 gl->gl_tchange = jiffies;
@@ -539,21 +506,6 @@ out_locked:
539 spin_unlock(&gl->gl_spin); 506 spin_unlock(&gl->gl_spin);
540} 507}
541 508
542static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
543 unsigned int req_state,
544 unsigned int flags)
545{
546 int ret = LM_OUT_ERROR;
547
548 if (!sdp->sd_lockstruct.ls_ops->lm_lock)
549 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
550
551 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
552 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
553 req_state, flags);
554 return ret;
555}
556
557/** 509/**
558 * do_xmote - Calls the DLM to change the state of a lock 510 * do_xmote - Calls the DLM to change the state of a lock
559 * @gl: The lock state 511 * @gl: The lock state
@@ -573,13 +525,14 @@ __acquires(&gl->gl_spin)
573 525
574 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 526 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
575 LM_FLAG_PRIORITY); 527 LM_FLAG_PRIORITY);
576 BUG_ON(gl->gl_state == target); 528 GLOCK_BUG_ON(gl, gl->gl_state == target);
577 BUG_ON(gl->gl_state == gl->gl_target); 529 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
578 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 530 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
579 glops->go_inval) { 531 glops->go_inval) {
580 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 532 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
581 do_error(gl, 0); /* Fail queued try locks */ 533 do_error(gl, 0); /* Fail queued try locks */
582 } 534 }
535 gl->gl_req = target;
583 spin_unlock(&gl->gl_spin); 536 spin_unlock(&gl->gl_spin);
584 if (glops->go_xmote_th) 537 if (glops->go_xmote_th)
585 glops->go_xmote_th(gl); 538 glops->go_xmote_th(gl);
@@ -588,19 +541,16 @@ __acquires(&gl->gl_spin)
588 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 541 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
589 542
590 gfs2_glock_hold(gl); 543 gfs2_glock_hold(gl);
591 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED || 544 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
592 gl->gl_state == LM_ST_DEFERRED) && 545 /* lock_dlm */
593 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 546 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
594 lck_flags |= LM_FLAG_TRY_1CB; 547 GLOCK_BUG_ON(gl, ret);
595 ret = gfs2_lm_lock(sdp, gl, target, lck_flags); 548 } else { /* lock_nolock */
596 549 finish_xmote(gl, target);
597 if (!(ret & LM_OUT_ASYNC)) {
598 finish_xmote(gl, ret);
599 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 550 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
600 gfs2_glock_put(gl); 551 gfs2_glock_put(gl);
601 } else {
602 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
603 } 552 }
553
604 spin_lock(&gl->gl_spin); 554 spin_lock(&gl->gl_spin);
605} 555}
606 556
@@ -684,21 +634,20 @@ static void delete_work_func(struct work_struct *work)
684{ 634{
685 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 635 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686 struct gfs2_sbd *sdp = gl->gl_sbd; 636 struct gfs2_sbd *sdp = gl->gl_sbd;
687 struct gfs2_inode *ip = NULL; 637 struct gfs2_inode *ip;
688 struct inode *inode; 638 struct inode *inode;
689 u64 no_addr = 0; 639 u64 no_addr = gl->gl_name.ln_number;
640
641 ip = gl->gl_object;
642 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
690 643
691 spin_lock(&gl->gl_spin);
692 ip = (struct gfs2_inode *)gl->gl_object;
693 if (ip) 644 if (ip)
694 no_addr = ip->i_no_addr; 645 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
695 spin_unlock(&gl->gl_spin); 646 else
696 if (ip) { 647 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr); 648 if (inode && !IS_ERR(inode)) {
698 if (inode) { 649 d_prune_aliases(inode);
699 d_prune_aliases(inode); 650 iput(inode);
700 iput(inode);
701 }
702 } 651 }
703 gfs2_glock_put(gl); 652 gfs2_glock_put(gl);
704} 653}
@@ -714,14 +663,19 @@ static void glock_work_func(struct work_struct *work)
714 drop_ref = 1; 663 drop_ref = 1;
715 } 664 }
716 spin_lock(&gl->gl_spin); 665 spin_lock(&gl->gl_spin);
717 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 666 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
718 gl->gl_state != LM_ST_UNLOCKED && 667 gl->gl_state != LM_ST_UNLOCKED &&
719 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 668 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
720 unsigned long holdtime, now = jiffies; 669 unsigned long holdtime, now = jiffies;
670
721 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 671 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
722 if (time_before(now, holdtime)) 672 if (time_before(now, holdtime))
723 delay = holdtime - now; 673 delay = holdtime - now;
724 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); 674
675 if (!delay) {
676 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 set_bit(GLF_DEMOTE, &gl->gl_flags);
678 }
725 } 679 }
726 run_queue(gl, 0); 680 run_queue(gl, 0);
727 spin_unlock(&gl->gl_spin); 681 spin_unlock(&gl->gl_spin);
@@ -754,10 +708,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
754 struct gfs2_glock *gl, *tmp; 708 struct gfs2_glock *gl, *tmp;
755 unsigned int hash = gl_hash(sdp, &name); 709 unsigned int hash = gl_hash(sdp, &name);
756 struct address_space *mapping; 710 struct address_space *mapping;
711 struct kmem_cache *cachep;
757 712
758 read_lock(gl_lock_addr(hash)); 713 rcu_read_lock();
759 gl = search_bucket(hash, sdp, &name); 714 gl = search_bucket(hash, sdp, &name);
760 read_unlock(gl_lock_addr(hash)); 715 rcu_read_unlock();
761 716
762 *glp = gl; 717 *glp = gl;
763 if (gl) 718 if (gl)
@@ -766,9 +721,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
766 return -ENOENT; 721 return -ENOENT;
767 722
768 if (glops->go_flags & GLOF_ASPACE) 723 if (glops->go_flags & GLOF_ASPACE)
769 gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL); 724 cachep = gfs2_glock_aspace_cachep;
770 else 725 else
771 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); 726 cachep = gfs2_glock_cachep;
727 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
772 if (!gl) 728 if (!gl)
773 return -ENOMEM; 729 return -ENOMEM;
774 730
@@ -801,15 +757,16 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
801 mapping->writeback_index = 0; 757 mapping->writeback_index = 0;
802 } 758 }
803 759
804 write_lock(gl_lock_addr(hash)); 760 spin_lock_bucket(hash);
805 tmp = search_bucket(hash, sdp, &name); 761 tmp = search_bucket(hash, sdp, &name);
806 if (tmp) { 762 if (tmp) {
807 write_unlock(gl_lock_addr(hash)); 763 spin_unlock_bucket(hash);
808 glock_free(gl); 764 kmem_cache_free(cachep, gl);
765 atomic_dec(&sdp->sd_glock_disposal);
809 gl = tmp; 766 gl = tmp;
810 } else { 767 } else {
811 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list); 768 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
812 write_unlock(gl_lock_addr(hash)); 769 spin_unlock_bucket(hash);
813 } 770 }
814 771
815 *glp = gl; 772 *glp = gl;
@@ -950,17 +907,22 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
950 907
951void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 908void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
952{ 909{
910 struct va_format vaf;
953 va_list args; 911 va_list args;
954 912
955 va_start(args, fmt); 913 va_start(args, fmt);
914
956 if (seq) { 915 if (seq) {
957 struct gfs2_glock_iter *gi = seq->private; 916 struct gfs2_glock_iter *gi = seq->private;
958 vsprintf(gi->string, fmt, args); 917 vsprintf(gi->string, fmt, args);
959 seq_printf(seq, gi->string); 918 seq_printf(seq, gi->string);
960 } else { 919 } else {
961 printk(KERN_ERR " "); 920 vaf.fmt = fmt;
962 vprintk(fmt, args); 921 vaf.va = &args;
922
923 printk(KERN_ERR " %pV", &vaf);
963 } 924 }
925
964 va_end(args); 926 va_end(args);
965} 927}
966 928
@@ -1012,13 +974,14 @@ fail:
1012 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 974 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1013 insert_pt = &gh2->gh_list; 975 insert_pt = &gh2->gh_list;
1014 } 976 }
977 set_bit(GLF_QUEUED, &gl->gl_flags);
978 trace_gfs2_glock_queue(gh, 1);
1015 if (likely(insert_pt == NULL)) { 979 if (likely(insert_pt == NULL)) {
1016 list_add_tail(&gh->gh_list, &gl->gl_holders); 980 list_add_tail(&gh->gh_list, &gl->gl_holders);
1017 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 981 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1018 goto do_cancel; 982 goto do_cancel;
1019 return; 983 return;
1020 } 984 }
1021 trace_gfs2_glock_queue(gh, 1);
1022 list_add_tail(&gh->gh_list, insert_pt); 985 list_add_tail(&gh->gh_list, insert_pt);
1023do_cancel: 986do_cancel:
1024 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 987 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
@@ -1061,6 +1024,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1061 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1024 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1062 return -EIO; 1025 return -EIO;
1063 1026
1027 if (test_bit(GLF_LRU, &gl->gl_flags))
1028 gfs2_glock_remove_from_lru(gl);
1029
1064 spin_lock(&gl->gl_spin); 1030 spin_lock(&gl->gl_spin);
1065 add_to_queue(gh); 1031 add_to_queue(gh);
1066 if ((LM_FLAG_NOEXP & gh->gh_flags) && 1032 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1118,6 +1084,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1118 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1084 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1119 fast_path = 1; 1085 fast_path = 1;
1120 } 1086 }
1087 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1088 __gfs2_glock_schedule_for_reclaim(gl);
1121 trace_gfs2_glock_queue(gh, 0); 1089 trace_gfs2_glock_queue(gh, 0);
1122 spin_unlock(&gl->gl_spin); 1090 spin_unlock(&gl->gl_spin);
1123 if (likely(fast_path)) 1091 if (likely(fast_path))
@@ -1156,7 +1124,7 @@ void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1156 * @number: the lock number 1124 * @number: the lock number
1157 * @glops: the glock operations for the type of glock 1125 * @glops: the glock operations for the type of glock
1158 * @state: the state to acquire the glock in 1126 * @state: the state to acquire the glock in
1159 * @flags: modifier flags for the aquisition 1127 * @flags: modifier flags for the acquisition
1160 * @gh: the struct gfs2_holder 1128 * @gh: the struct gfs2_holder
1161 * 1129 *
1162 * Returns: errno 1130 * Returns: errno
@@ -1281,10 +1249,8 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1281 1249
1282void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1250void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1283{ 1251{
1284 unsigned int x; 1252 while (num_gh--)
1285 1253 gfs2_glock_dq(&ghs[num_gh]);
1286 for (x = 0; x < num_gh; x++)
1287 gfs2_glock_dq(&ghs[x]);
1288} 1254}
1289 1255
1290/** 1256/**
@@ -1296,10 +1262,8 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1296 1262
1297void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) 1263void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1298{ 1264{
1299 unsigned int x; 1265 while (num_gh--)
1300 1266 gfs2_glock_dq_uninit(&ghs[num_gh]);
1301 for (x = 0; x < num_gh; x++)
1302 gfs2_glock_dq_uninit(&ghs[x]);
1303} 1267}
1304 1268
1305void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1269void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
@@ -1310,10 +1274,12 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1310 1274
1311 gfs2_glock_hold(gl); 1275 gfs2_glock_hold(gl);
1312 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 1276 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1313 if (time_before(now, holdtime)) 1277 if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1314 delay = holdtime - now; 1278 if (time_before(now, holdtime))
1315 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1279 delay = holdtime - now;
1316 delay = gl->gl_ops->go_min_hold_time; 1280 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1281 delay = gl->gl_ops->go_min_hold_time;
1282 }
1317 1283
1318 spin_lock(&gl->gl_spin); 1284 spin_lock(&gl->gl_spin);
1319 handle_callback(gl, state, delay); 1285 handle_callback(gl, state, delay);
@@ -1357,35 +1323,42 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
1357 * @gl: Pointer to the glock 1323 * @gl: Pointer to the glock
1358 * @ret: The return value from the dlm 1324 * @ret: The return value from the dlm
1359 * 1325 *
1326 * The gl_reply field is under the gl_spin lock so that it is ok
1327 * to use a bitfield shared with other glock state fields.
1360 */ 1328 */
1361 1329
1362void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1330void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1363{ 1331{
1364 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1332 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1365 1333
1334 spin_lock(&gl->gl_spin);
1366 gl->gl_reply = ret; 1335 gl->gl_reply = ret;
1367 1336
1368 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { 1337 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1369 spin_lock(&gl->gl_spin);
1370 if (gfs2_should_freeze(gl)) { 1338 if (gfs2_should_freeze(gl)) {
1371 set_bit(GLF_FROZEN, &gl->gl_flags); 1339 set_bit(GLF_FROZEN, &gl->gl_flags);
1372 spin_unlock(&gl->gl_spin); 1340 spin_unlock(&gl->gl_spin);
1373 return; 1341 return;
1374 } 1342 }
1375 spin_unlock(&gl->gl_spin);
1376 } 1343 }
1344
1345 spin_unlock(&gl->gl_spin);
1377 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1346 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1347 smp_wmb();
1378 gfs2_glock_hold(gl); 1348 gfs2_glock_hold(gl);
1379 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1349 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1380 gfs2_glock_put(gl); 1350 gfs2_glock_put(gl);
1381} 1351}
1382 1352
1383 1353
1384static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1354static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1355 struct shrink_control *sc)
1385{ 1356{
1386 struct gfs2_glock *gl; 1357 struct gfs2_glock *gl;
1387 int may_demote; 1358 int may_demote;
1388 int nr_skipped = 0; 1359 int nr_skipped = 0;
1360 int nr = sc->nr_to_scan;
1361 gfp_t gfp_mask = sc->gfp_mask;
1389 LIST_HEAD(skipped); 1362 LIST_HEAD(skipped);
1390 1363
1391 if (nr == 0) 1364 if (nr == 0)
@@ -1398,6 +1371,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
1398 while(nr && !list_empty(&lru_list)) { 1371 while(nr && !list_empty(&lru_list)) {
1399 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1372 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1400 list_del_init(&gl->gl_lru); 1373 list_del_init(&gl->gl_lru);
1374 clear_bit(GLF_LRU, &gl->gl_flags);
1401 atomic_dec(&lru_count); 1375 atomic_dec(&lru_count);
1402 1376
1403 /* Test for being demotable */ 1377 /* Test for being demotable */
@@ -1420,6 +1394,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
1420 } 1394 }
1421 nr_skipped++; 1395 nr_skipped++;
1422 list_add(&gl->gl_lru, &skipped); 1396 list_add(&gl->gl_lru, &skipped);
1397 set_bit(GLF_LRU, &gl->gl_flags);
1423 } 1398 }
1424 list_splice(&skipped, &lru_list); 1399 list_splice(&skipped, &lru_list);
1425 atomic_add(nr_skipped, &lru_count); 1400 atomic_add(nr_skipped, &lru_count);
@@ -1439,42 +1414,30 @@ static struct shrinker glock_shrinker = {
1439 * @sdp: the filesystem 1414 * @sdp: the filesystem
1440 * @bucket: the bucket 1415 * @bucket: the bucket
1441 * 1416 *
1442 * Returns: 1 if the bucket has entries
1443 */ 1417 */
1444 1418
1445static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, 1419static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1446 unsigned int hash) 1420 unsigned int hash)
1447{ 1421{
1448 struct gfs2_glock *gl, *prev = NULL; 1422 struct gfs2_glock *gl;
1449 int has_entries = 0; 1423 struct hlist_bl_head *head = &gl_hash_table[hash];
1450 struct hlist_head *head = &gl_hash_table[hash].hb_list; 1424 struct hlist_bl_node *pos;
1451 1425
1452 read_lock(gl_lock_addr(hash)); 1426 rcu_read_lock();
1453 /* Can't use hlist_for_each_entry - don't want prefetch here */ 1427 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1454 if (hlist_empty(head)) 1428 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1455 goto out;
1456 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1457 while(1) {
1458 if (!sdp || gl->gl_sbd == sdp) {
1459 gfs2_glock_hold(gl);
1460 read_unlock(gl_lock_addr(hash));
1461 if (prev)
1462 gfs2_glock_put(prev);
1463 prev = gl;
1464 examiner(gl); 1429 examiner(gl);
1465 has_entries = 1;
1466 read_lock(gl_lock_addr(hash));
1467 }
1468 if (gl->gl_list.next == NULL)
1469 break;
1470 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1471 } 1430 }
1472out: 1431 rcu_read_unlock();
1473 read_unlock(gl_lock_addr(hash));
1474 if (prev)
1475 gfs2_glock_put(prev);
1476 cond_resched(); 1432 cond_resched();
1477 return has_entries; 1433}
1434
1435static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1436{
1437 unsigned x;
1438
1439 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1440 examine_bucket(examiner, sdp, x);
1478} 1441}
1479 1442
1480 1443
@@ -1504,15 +1467,10 @@ static void thaw_glock(struct gfs2_glock *gl)
1504 1467
1505static void clear_glock(struct gfs2_glock *gl) 1468static void clear_glock(struct gfs2_glock *gl)
1506{ 1469{
1507 spin_lock(&lru_lock); 1470 gfs2_glock_remove_from_lru(gl);
1508 if (!list_empty(&gl->gl_lru)) {
1509 list_del_init(&gl->gl_lru);
1510 atomic_dec(&lru_count);
1511 }
1512 spin_unlock(&lru_lock);
1513 1471
1514 spin_lock(&gl->gl_spin); 1472 spin_lock(&gl->gl_spin);
1515 if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) 1473 if (gl->gl_state != LM_ST_UNLOCKED)
1516 handle_callback(gl, LM_ST_UNLOCKED, 0); 1474 handle_callback(gl, LM_ST_UNLOCKED, 0);
1517 spin_unlock(&gl->gl_spin); 1475 spin_unlock(&gl->gl_spin);
1518 gfs2_glock_hold(gl); 1476 gfs2_glock_hold(gl);
@@ -1528,10 +1486,21 @@ static void clear_glock(struct gfs2_glock *gl)
1528 1486
1529void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1487void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1530{ 1488{
1531 unsigned x; 1489 glock_hash_walk(thaw_glock, sdp);
1490}
1532 1491
1533 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 1492static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1534 examine_bucket(thaw_glock, sdp, x); 1493{
1494 int ret;
1495 spin_lock(&gl->gl_spin);
1496 ret = __dump_glock(seq, gl);
1497 spin_unlock(&gl->gl_spin);
1498 return ret;
1499}
1500
1501static void dump_glock_func(struct gfs2_glock *gl)
1502{
1503 dump_glock(NULL, gl);
1535} 1504}
1536 1505
1537/** 1506/**
@@ -1544,13 +1513,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1544 1513
1545void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1514void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1546{ 1515{
1547 unsigned int x; 1516 glock_hash_walk(clear_glock, sdp);
1548
1549 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1550 examine_bucket(clear_glock, sdp, x);
1551 flush_workqueue(glock_workqueue); 1517 flush_workqueue(glock_workqueue);
1552 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); 1518 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1553 gfs2_dump_lockstate(sdp); 1519 glock_hash_walk(dump_glock_func, sdp);
1554} 1520}
1555 1521
1556void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1522void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
@@ -1622,24 +1588,25 @@ static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1622static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) 1588static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1623{ 1589{
1624 struct task_struct *gh_owner = NULL; 1590 struct task_struct *gh_owner = NULL;
1625 char buffer[KSYM_SYMBOL_LEN];
1626 char flags_buf[32]; 1591 char flags_buf[32];
1627 1592
1628 sprint_symbol(buffer, gh->gh_ip);
1629 if (gh->gh_owner_pid) 1593 if (gh->gh_owner_pid)
1630 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1594 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1631 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n", 1595 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1632 state2str(gh->gh_state), 1596 state2str(gh->gh_state),
1633 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 1597 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1634 gh->gh_error, 1598 gh->gh_error,
1635 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 1599 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1636 gh_owner ? gh_owner->comm : "(ended)", buffer); 1600 gh_owner ? gh_owner->comm : "(ended)",
1601 (void *)gh->gh_ip);
1637 return 0; 1602 return 0;
1638} 1603}
1639 1604
1640static const char *gflags2str(char *buf, const unsigned long *gflags) 1605static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1641{ 1606{
1607 const unsigned long *gflags = &gl->gl_flags;
1642 char *p = buf; 1608 char *p = buf;
1609
1643 if (test_bit(GLF_LOCK, gflags)) 1610 if (test_bit(GLF_LOCK, gflags))
1644 *p++ = 'l'; 1611 *p++ = 'l';
1645 if (test_bit(GLF_DEMOTE, gflags)) 1612 if (test_bit(GLF_DEMOTE, gflags))
@@ -1660,6 +1627,12 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
1660 *p++ = 'I'; 1627 *p++ = 'I';
1661 if (test_bit(GLF_FROZEN, gflags)) 1628 if (test_bit(GLF_FROZEN, gflags))
1662 *p++ = 'F'; 1629 *p++ = 'F';
1630 if (test_bit(GLF_QUEUED, gflags))
1631 *p++ = 'q';
1632 if (test_bit(GLF_LRU, gflags))
1633 *p++ = 'L';
1634 if (gl->gl_object)
1635 *p++ = 'o';
1663 *p = 0; 1636 *p = 0;
1664 return buf; 1637 return buf;
1665} 1638}
@@ -1694,14 +1667,15 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1694 dtime *= 1000000/HZ; /* demote time in uSec */ 1667 dtime *= 1000000/HZ; /* demote time in uSec */
1695 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1668 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1696 dtime = 0; 1669 dtime = 0;
1697 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", 1670 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1698 state2str(gl->gl_state), 1671 state2str(gl->gl_state),
1699 gl->gl_name.ln_type, 1672 gl->gl_name.ln_type,
1700 (unsigned long long)gl->gl_name.ln_number, 1673 (unsigned long long)gl->gl_name.ln_number,
1701 gflags2str(gflags_buf, &gl->gl_flags), 1674 gflags2str(gflags_buf, gl),
1702 state2str(gl->gl_target), 1675 state2str(gl->gl_target),
1703 state2str(gl->gl_demote_state), dtime, 1676 state2str(gl->gl_demote_state), dtime,
1704 atomic_read(&gl->gl_ail_count), 1677 atomic_read(&gl->gl_ail_count),
1678 atomic_read(&gl->gl_revokes),
1705 atomic_read(&gl->gl_ref)); 1679 atomic_read(&gl->gl_ref));
1706 1680
1707 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1681 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
@@ -1715,71 +1689,23 @@ out:
1715 return error; 1689 return error;
1716} 1690}
1717 1691
1718static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1719{
1720 int ret;
1721 spin_lock(&gl->gl_spin);
1722 ret = __dump_glock(seq, gl);
1723 spin_unlock(&gl->gl_spin);
1724 return ret;
1725}
1726 1692
1727/**
1728 * gfs2_dump_lockstate - print out the current lockstate
1729 * @sdp: the filesystem
1730 * @ub: the buffer to copy the information into
1731 *
1732 * If @ub is NULL, dump the lockstate to the console.
1733 *
1734 */
1735
1736static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1737{
1738 struct gfs2_glock *gl;
1739 struct hlist_node *h;
1740 unsigned int x;
1741 int error = 0;
1742
1743 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1744
1745 read_lock(gl_lock_addr(x));
1746
1747 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1748 if (gl->gl_sbd != sdp)
1749 continue;
1750
1751 error = dump_glock(NULL, gl);
1752 if (error)
1753 break;
1754 }
1755
1756 read_unlock(gl_lock_addr(x));
1757
1758 if (error)
1759 break;
1760 }
1761
1762
1763 return error;
1764}
1765 1693
1766 1694
1767int __init gfs2_glock_init(void) 1695int __init gfs2_glock_init(void)
1768{ 1696{
1769 unsigned i; 1697 unsigned i;
1770 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 1698 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1771 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list); 1699 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1772 } 1700 }
1773#ifdef GL_HASH_LOCK_SZ
1774 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1775 rwlock_init(&gl_hash_locks[i]);
1776 }
1777#endif
1778 1701
1779 glock_workqueue = create_workqueue("glock_workqueue"); 1702 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1703 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1780 if (IS_ERR(glock_workqueue)) 1704 if (IS_ERR(glock_workqueue))
1781 return PTR_ERR(glock_workqueue); 1705 return PTR_ERR(glock_workqueue);
1782 gfs2_delete_workqueue = create_workqueue("delete_workqueue"); 1706 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1707 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1708 0);
1783 if (IS_ERR(gfs2_delete_workqueue)) { 1709 if (IS_ERR(gfs2_delete_workqueue)) {
1784 destroy_workqueue(glock_workqueue); 1710 destroy_workqueue(glock_workqueue);
1785 return PTR_ERR(gfs2_delete_workqueue); 1711 return PTR_ERR(gfs2_delete_workqueue);
@@ -1797,62 +1723,54 @@ void gfs2_glock_exit(void)
1797 destroy_workqueue(gfs2_delete_workqueue); 1723 destroy_workqueue(gfs2_delete_workqueue);
1798} 1724}
1799 1725
1726static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1727{
1728 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1729 struct gfs2_glock, gl_list);
1730}
1731
1732static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1733{
1734 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1735 struct gfs2_glock, gl_list);
1736}
1737
1800static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1738static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1801{ 1739{
1802 struct gfs2_glock *gl; 1740 struct gfs2_glock *gl;
1803 1741
1804restart: 1742 do {
1805 read_lock(gl_lock_addr(gi->hash)); 1743 gl = gi->gl;
1806 gl = gi->gl; 1744 if (gl) {
1807 if (gl) { 1745 gi->gl = glock_hash_next(gl);
1808 gi->gl = hlist_entry(gl->gl_list.next, 1746 } else {
1809 struct gfs2_glock, gl_list); 1747 gi->gl = glock_hash_chain(gi->hash);
1810 } else { 1748 }
1811 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first, 1749 while (gi->gl == NULL) {
1812 struct gfs2_glock, gl_list); 1750 gi->hash++;
1813 } 1751 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1814 if (gi->gl) 1752 rcu_read_unlock();
1815 gfs2_glock_hold(gi->gl); 1753 return 1;
1816 read_unlock(gl_lock_addr(gi->hash)); 1754 }
1817 if (gl) 1755 gi->gl = glock_hash_chain(gi->hash);
1818 gfs2_glock_put(gl); 1756 }
1819 while (gi->gl == NULL) { 1757 /* Skip entries for other sb and dead entries */
1820 gi->hash++; 1758 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1821 if (gi->hash >= GFS2_GL_HASH_SIZE)
1822 return 1;
1823 read_lock(gl_lock_addr(gi->hash));
1824 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1825 struct gfs2_glock, gl_list);
1826 if (gi->gl)
1827 gfs2_glock_hold(gi->gl);
1828 read_unlock(gl_lock_addr(gi->hash));
1829 }
1830
1831 if (gi->sdp != gi->gl->gl_sbd)
1832 goto restart;
1833 1759
1834 return 0; 1760 return 0;
1835} 1761}
1836 1762
1837static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1838{
1839 if (gi->gl)
1840 gfs2_glock_put(gi->gl);
1841 gi->gl = NULL;
1842}
1843
1844static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 1763static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1845{ 1764{
1846 struct gfs2_glock_iter *gi = seq->private; 1765 struct gfs2_glock_iter *gi = seq->private;
1847 loff_t n = *pos; 1766 loff_t n = *pos;
1848 1767
1849 gi->hash = 0; 1768 gi->hash = 0;
1769 rcu_read_lock();
1850 1770
1851 do { 1771 do {
1852 if (gfs2_glock_iter_next(gi)) { 1772 if (gfs2_glock_iter_next(gi))
1853 gfs2_glock_iter_free(gi);
1854 return NULL; 1773 return NULL;
1855 }
1856 } while (n--); 1774 } while (n--);
1857 1775
1858 return gi->gl; 1776 return gi->gl;
@@ -1865,10 +1783,8 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1865 1783
1866 (*pos)++; 1784 (*pos)++;
1867 1785
1868 if (gfs2_glock_iter_next(gi)) { 1786 if (gfs2_glock_iter_next(gi))
1869 gfs2_glock_iter_free(gi);
1870 return NULL; 1787 return NULL;
1871 }
1872 1788
1873 return gi->gl; 1789 return gi->gl;
1874} 1790}
@@ -1876,7 +1792,10 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1876static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 1792static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1877{ 1793{
1878 struct gfs2_glock_iter *gi = seq->private; 1794 struct gfs2_glock_iter *gi = seq->private;
1879 gfs2_glock_iter_free(gi); 1795
1796 if (gi->gl)
1797 rcu_read_unlock();
1798 gi->gl = NULL;
1880} 1799}
1881 1800
1882static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 1801static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)