diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2011-01-19 04:30:01 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2011-01-21 04:39:08 -0500 |
commit | bc015cb84129eb1451913cfebece270bf7a39e0f (patch) | |
tree | 4f116a61b802d87ae80051e9ae05d8fcb73d9ae7 /fs/gfs2/glock.c | |
parent | 2b1caf6ed7b888c95a1909d343799672731651a5 (diff) |
GFS2: Use RCU for glock hash table
This has a number of advantages:
- Reduces contention on the hash table lock
- Makes the code smaller and simpler
- Should speed up glock dumps when under load
- Removes ref count changing in examine_bucket
- No longer need hash chain lock in glock_put() in common case
There are some further changes which this enables and which
we may do in the future. One is to look at using SLAB_RCU,
and another is to look at using a per-cpu counter for the
per-sb glock counter, since that is touched twice in the
lifetime of each glock (but only used at umount time).
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 390 |
1 files changed, 147 insertions, 243 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e6..c75d4998519e 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -26,6 +26,9 @@ | |||
26 | #include <linux/freezer.h> | 26 | #include <linux/freezer.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/jiffies.h> | 28 | #include <linux/jiffies.h> |
29 | #include <linux/rcupdate.h> | ||
30 | #include <linux/rculist_bl.h> | ||
31 | #include <linux/bit_spinlock.h> | ||
29 | 32 | ||
30 | #include "gfs2.h" | 33 | #include "gfs2.h" |
31 | #include "incore.h" | 34 | #include "incore.h" |
@@ -41,10 +44,6 @@ | |||
41 | #define CREATE_TRACE_POINTS | 44 | #define CREATE_TRACE_POINTS |
42 | #include "trace_gfs2.h" | 45 | #include "trace_gfs2.h" |
43 | 46 | ||
44 | struct gfs2_gl_hash_bucket { | ||
45 | struct hlist_head hb_list; | ||
46 | }; | ||
47 | |||
48 | struct gfs2_glock_iter { | 47 | struct gfs2_glock_iter { |
49 | int hash; /* hash bucket index */ | 48 | int hash; /* hash bucket index */ |
50 | struct gfs2_sbd *sdp; /* incore superblock */ | 49 | struct gfs2_sbd *sdp; /* incore superblock */ |
@@ -54,7 +53,6 @@ struct gfs2_glock_iter { | |||
54 | 53 | ||
55 | typedef void (*glock_examiner) (struct gfs2_glock * gl); | 54 | typedef void (*glock_examiner) (struct gfs2_glock * gl); |
56 | 55 | ||
57 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | ||
58 | static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); | 56 | static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); |
59 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) | 57 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) |
60 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); | 58 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); |
@@ -70,57 +68,9 @@ static DEFINE_SPINLOCK(lru_lock); | |||
70 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | 68 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) |
71 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) | 69 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) |
72 | 70 | ||
73 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; | 71 | static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE]; |
74 | static struct dentry *gfs2_root; | 72 | static struct dentry *gfs2_root; |
75 | 73 | ||
76 | /* | ||
77 | * Despite what you might think, the numbers below are not arbitrary :-) | ||
78 | * They are taken from the ipv4 routing hash code, which is well tested | ||
79 | * and thus should be nearly optimal. Later on we might tweek the numbers | ||
80 | * but for now this should be fine. | ||
81 | * | ||
82 | * The reason for putting the locks in a separate array from the list heads | ||
83 | * is that we can have fewer locks than list heads and save memory. We use | ||
84 | * the same hash function for both, but with a different hash mask. | ||
85 | */ | ||
86 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ | ||
87 | defined(CONFIG_PROVE_LOCKING) | ||
88 | |||
89 | #ifdef CONFIG_LOCKDEP | ||
90 | # define GL_HASH_LOCK_SZ 256 | ||
91 | #else | ||
92 | # if NR_CPUS >= 32 | ||
93 | # define GL_HASH_LOCK_SZ 4096 | ||
94 | # elif NR_CPUS >= 16 | ||
95 | # define GL_HASH_LOCK_SZ 2048 | ||
96 | # elif NR_CPUS >= 8 | ||
97 | # define GL_HASH_LOCK_SZ 1024 | ||
98 | # elif NR_CPUS >= 4 | ||
99 | # define GL_HASH_LOCK_SZ 512 | ||
100 | # else | ||
101 | # define GL_HASH_LOCK_SZ 256 | ||
102 | # endif | ||
103 | #endif | ||
104 | |||
105 | /* We never want more locks than chains */ | ||
106 | #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ | ||
107 | # undef GL_HASH_LOCK_SZ | ||
108 | # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE | ||
109 | #endif | ||
110 | |||
111 | static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ]; | ||
112 | |||
113 | static inline rwlock_t *gl_lock_addr(unsigned int x) | ||
114 | { | ||
115 | return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; | ||
116 | } | ||
117 | #else /* not SMP, so no spinlocks required */ | ||
118 | static inline rwlock_t *gl_lock_addr(unsigned int x) | ||
119 | { | ||
120 | return NULL; | ||
121 | } | ||
122 | #endif | ||
123 | |||
124 | /** | 74 | /** |
125 | * gl_hash() - Turn glock number into hash bucket number | 75 | * gl_hash() - Turn glock number into hash bucket number |
126 | * @lock: The glock number | 76 | * @lock: The glock number |
@@ -141,25 +91,30 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp, | |||
141 | return h; | 91 | return h; |
142 | } | 92 | } |
143 | 93 | ||
144 | /** | 94 | static inline void spin_lock_bucket(unsigned int hash) |
145 | * glock_free() - Perform a few checks and then release struct gfs2_glock | 95 | { |
146 | * @gl: The glock to release | 96 | struct hlist_bl_head *bl = &gl_hash_table[hash]; |
147 | * | 97 | bit_spin_lock(0, (unsigned long *)bl); |
148 | * Also calls lock module to release its internal structure for this glock. | 98 | } |
149 | * | 99 | |
150 | */ | 100 | static inline void spin_unlock_bucket(unsigned int hash) |
101 | { | ||
102 | struct hlist_bl_head *bl = &gl_hash_table[hash]; | ||
103 | __bit_spin_unlock(0, (unsigned long *)bl); | ||
104 | } | ||
151 | 105 | ||
152 | static void glock_free(struct gfs2_glock *gl) | 106 | void gfs2_glock_free(struct rcu_head *rcu) |
153 | { | 107 | { |
108 | struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); | ||
154 | struct gfs2_sbd *sdp = gl->gl_sbd; | 109 | struct gfs2_sbd *sdp = gl->gl_sbd; |
155 | struct address_space *mapping = gfs2_glock2aspace(gl); | ||
156 | struct kmem_cache *cachep = gfs2_glock_cachep; | ||
157 | 110 | ||
158 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); | 111 | if (gl->gl_ops->go_flags & GLOF_ASPACE) |
159 | trace_gfs2_glock_put(gl); | 112 | kmem_cache_free(gfs2_glock_aspace_cachep, gl); |
160 | if (mapping) | 113 | else |
161 | cachep = gfs2_glock_aspace_cachep; | 114 | kmem_cache_free(gfs2_glock_cachep, gl); |
162 | sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl); | 115 | |
116 | if (atomic_dec_and_test(&sdp->sd_glock_disposal)) | ||
117 | wake_up(&sdp->sd_glock_wait); | ||
163 | } | 118 | } |
164 | 119 | ||
165 | /** | 120 | /** |
@@ -185,34 +140,49 @@ static int demote_ok(const struct gfs2_glock *gl) | |||
185 | { | 140 | { |
186 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 141 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
187 | 142 | ||
143 | /* assert_spin_locked(&gl->gl_spin); */ | ||
144 | |||
188 | if (gl->gl_state == LM_ST_UNLOCKED) | 145 | if (gl->gl_state == LM_ST_UNLOCKED) |
189 | return 0; | 146 | return 0; |
190 | if (!list_empty(&gl->gl_holders)) | 147 | if (test_bit(GLF_LFLUSH, &gl->gl_flags)) |
148 | return 0; | ||
149 | if ((gl->gl_name.ln_type != LM_TYPE_INODE) && | ||
150 | !list_empty(&gl->gl_holders)) | ||
191 | return 0; | 151 | return 0; |
192 | if (glops->go_demote_ok) | 152 | if (glops->go_demote_ok) |
193 | return glops->go_demote_ok(gl); | 153 | return glops->go_demote_ok(gl); |
194 | return 1; | 154 | return 1; |
195 | } | 155 | } |
196 | 156 | ||
157 | |||
197 | /** | 158 | /** |
198 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | 159 | * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list |
199 | * @gl: the glock | 160 | * @gl: the glock |
200 | * | 161 | * |
162 | * If the glock is demotable, then we add it (or move it) to the end | ||
163 | * of the glock LRU list. | ||
201 | */ | 164 | */ |
202 | 165 | ||
203 | static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | 166 | static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) |
204 | { | 167 | { |
205 | int may_reclaim; | 168 | if (demote_ok(gl)) { |
206 | may_reclaim = (demote_ok(gl) && | 169 | spin_lock(&lru_lock); |
207 | (atomic_read(&gl->gl_ref) == 1 || | 170 | |
208 | (gl->gl_name.ln_type == LM_TYPE_INODE && | 171 | if (!list_empty(&gl->gl_lru)) |
209 | atomic_read(&gl->gl_ref) <= 2))); | 172 | list_del_init(&gl->gl_lru); |
210 | spin_lock(&lru_lock); | 173 | else |
211 | if (list_empty(&gl->gl_lru) && may_reclaim) { | 174 | atomic_inc(&lru_count); |
175 | |||
212 | list_add_tail(&gl->gl_lru, &lru_list); | 176 | list_add_tail(&gl->gl_lru, &lru_list); |
213 | atomic_inc(&lru_count); | 177 | spin_unlock(&lru_lock); |
214 | } | 178 | } |
215 | spin_unlock(&lru_lock); | 179 | } |
180 | |||
181 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | ||
182 | { | ||
183 | spin_lock(&gl->gl_spin); | ||
184 | __gfs2_glock_schedule_for_reclaim(gl); | ||
185 | spin_unlock(&gl->gl_spin); | ||
216 | } | 186 | } |
217 | 187 | ||
218 | /** | 188 | /** |
@@ -227,7 +197,6 @@ void gfs2_glock_put_nolock(struct gfs2_glock *gl) | |||
227 | { | 197 | { |
228 | if (atomic_dec_and_test(&gl->gl_ref)) | 198 | if (atomic_dec_and_test(&gl->gl_ref)) |
229 | GLOCK_BUG_ON(gl, 1); | 199 | GLOCK_BUG_ON(gl, 1); |
230 | gfs2_glock_schedule_for_reclaim(gl); | ||
231 | } | 200 | } |
232 | 201 | ||
233 | /** | 202 | /** |
@@ -236,30 +205,26 @@ void gfs2_glock_put_nolock(struct gfs2_glock *gl) | |||
236 | * | 205 | * |
237 | */ | 206 | */ |
238 | 207 | ||
239 | int gfs2_glock_put(struct gfs2_glock *gl) | 208 | void gfs2_glock_put(struct gfs2_glock *gl) |
240 | { | 209 | { |
241 | int rv = 0; | 210 | struct gfs2_sbd *sdp = gl->gl_sbd; |
211 | struct address_space *mapping = gfs2_glock2aspace(gl); | ||
242 | 212 | ||
243 | write_lock(gl_lock_addr(gl->gl_hash)); | 213 | if (atomic_dec_and_test(&gl->gl_ref)) { |
244 | if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { | 214 | spin_lock_bucket(gl->gl_hash); |
245 | hlist_del(&gl->gl_list); | 215 | hlist_bl_del_rcu(&gl->gl_list); |
216 | spin_unlock_bucket(gl->gl_hash); | ||
217 | spin_lock(&lru_lock); | ||
246 | if (!list_empty(&gl->gl_lru)) { | 218 | if (!list_empty(&gl->gl_lru)) { |
247 | list_del_init(&gl->gl_lru); | 219 | list_del_init(&gl->gl_lru); |
248 | atomic_dec(&lru_count); | 220 | atomic_dec(&lru_count); |
249 | } | 221 | } |
250 | spin_unlock(&lru_lock); | 222 | spin_unlock(&lru_lock); |
251 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
252 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 223 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
253 | glock_free(gl); | 224 | GLOCK_BUG_ON(gl, mapping && mapping->nrpages); |
254 | rv = 1; | 225 | trace_gfs2_glock_put(gl); |
255 | goto out; | 226 | sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); |
256 | } | 227 | } |
257 | spin_lock(&gl->gl_spin); | ||
258 | gfs2_glock_schedule_for_reclaim(gl); | ||
259 | spin_unlock(&gl->gl_spin); | ||
260 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
261 | out: | ||
262 | return rv; | ||
263 | } | 228 | } |
264 | 229 | ||
265 | /** | 230 | /** |
@@ -275,17 +240,15 @@ static struct gfs2_glock *search_bucket(unsigned int hash, | |||
275 | const struct lm_lockname *name) | 240 | const struct lm_lockname *name) |
276 | { | 241 | { |
277 | struct gfs2_glock *gl; | 242 | struct gfs2_glock *gl; |
278 | struct hlist_node *h; | 243 | struct hlist_bl_node *h; |
279 | 244 | ||
280 | hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) { | 245 | hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) { |
281 | if (!lm_name_equal(&gl->gl_name, name)) | 246 | if (!lm_name_equal(&gl->gl_name, name)) |
282 | continue; | 247 | continue; |
283 | if (gl->gl_sbd != sdp) | 248 | if (gl->gl_sbd != sdp) |
284 | continue; | 249 | continue; |
285 | 250 | if (atomic_inc_not_zero(&gl->gl_ref)) | |
286 | atomic_inc(&gl->gl_ref); | 251 | return gl; |
287 | |||
288 | return gl; | ||
289 | } | 252 | } |
290 | 253 | ||
291 | return NULL; | 254 | return NULL; |
@@ -743,10 +706,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
743 | struct gfs2_glock *gl, *tmp; | 706 | struct gfs2_glock *gl, *tmp; |
744 | unsigned int hash = gl_hash(sdp, &name); | 707 | unsigned int hash = gl_hash(sdp, &name); |
745 | struct address_space *mapping; | 708 | struct address_space *mapping; |
709 | struct kmem_cache *cachep; | ||
746 | 710 | ||
747 | read_lock(gl_lock_addr(hash)); | 711 | rcu_read_lock(); |
748 | gl = search_bucket(hash, sdp, &name); | 712 | gl = search_bucket(hash, sdp, &name); |
749 | read_unlock(gl_lock_addr(hash)); | 713 | rcu_read_unlock(); |
750 | 714 | ||
751 | *glp = gl; | 715 | *glp = gl; |
752 | if (gl) | 716 | if (gl) |
@@ -755,9 +719,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
755 | return -ENOENT; | 719 | return -ENOENT; |
756 | 720 | ||
757 | if (glops->go_flags & GLOF_ASPACE) | 721 | if (glops->go_flags & GLOF_ASPACE) |
758 | gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL); | 722 | cachep = gfs2_glock_aspace_cachep; |
759 | else | 723 | else |
760 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); | 724 | cachep = gfs2_glock_cachep; |
725 | gl = kmem_cache_alloc(cachep, GFP_KERNEL); | ||
761 | if (!gl) | 726 | if (!gl) |
762 | return -ENOMEM; | 727 | return -ENOMEM; |
763 | 728 | ||
@@ -790,15 +755,15 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
790 | mapping->writeback_index = 0; | 755 | mapping->writeback_index = 0; |
791 | } | 756 | } |
792 | 757 | ||
793 | write_lock(gl_lock_addr(hash)); | 758 | spin_lock_bucket(hash); |
794 | tmp = search_bucket(hash, sdp, &name); | 759 | tmp = search_bucket(hash, sdp, &name); |
795 | if (tmp) { | 760 | if (tmp) { |
796 | write_unlock(gl_lock_addr(hash)); | 761 | spin_unlock_bucket(hash); |
797 | glock_free(gl); | 762 | kmem_cache_free(cachep, gl); |
798 | gl = tmp; | 763 | gl = tmp; |
799 | } else { | 764 | } else { |
800 | hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list); | 765 | hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]); |
801 | write_unlock(gl_lock_addr(hash)); | 766 | spin_unlock_bucket(hash); |
802 | } | 767 | } |
803 | 768 | ||
804 | *glp = gl; | 769 | *glp = gl; |
@@ -1113,6 +1078,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
1113 | !test_bit(GLF_DEMOTE, &gl->gl_flags)) | 1078 | !test_bit(GLF_DEMOTE, &gl->gl_flags)) |
1114 | fast_path = 1; | 1079 | fast_path = 1; |
1115 | } | 1080 | } |
1081 | __gfs2_glock_schedule_for_reclaim(gl); | ||
1116 | trace_gfs2_glock_queue(gh, 0); | 1082 | trace_gfs2_glock_queue(gh, 0); |
1117 | spin_unlock(&gl->gl_spin); | 1083 | spin_unlock(&gl->gl_spin); |
1118 | if (likely(fast_path)) | 1084 | if (likely(fast_path)) |
@@ -1440,42 +1406,30 @@ static struct shrinker glock_shrinker = { | |||
1440 | * @sdp: the filesystem | 1406 | * @sdp: the filesystem |
1441 | * @bucket: the bucket | 1407 | * @bucket: the bucket |
1442 | * | 1408 | * |
1443 | * Returns: 1 if the bucket has entries | ||
1444 | */ | 1409 | */ |
1445 | 1410 | ||
1446 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | 1411 | static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp, |
1447 | unsigned int hash) | 1412 | unsigned int hash) |
1448 | { | 1413 | { |
1449 | struct gfs2_glock *gl, *prev = NULL; | 1414 | struct gfs2_glock *gl; |
1450 | int has_entries = 0; | 1415 | struct hlist_bl_head *head = &gl_hash_table[hash]; |
1451 | struct hlist_head *head = &gl_hash_table[hash].hb_list; | 1416 | struct hlist_bl_node *pos; |
1452 | 1417 | ||
1453 | read_lock(gl_lock_addr(hash)); | 1418 | rcu_read_lock(); |
1454 | /* Can't use hlist_for_each_entry - don't want prefetch here */ | 1419 | hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { |
1455 | if (hlist_empty(head)) | 1420 | if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref)) |
1456 | goto out; | ||
1457 | gl = list_entry(head->first, struct gfs2_glock, gl_list); | ||
1458 | while(1) { | ||
1459 | if (!sdp || gl->gl_sbd == sdp) { | ||
1460 | gfs2_glock_hold(gl); | ||
1461 | read_unlock(gl_lock_addr(hash)); | ||
1462 | if (prev) | ||
1463 | gfs2_glock_put(prev); | ||
1464 | prev = gl; | ||
1465 | examiner(gl); | 1421 | examiner(gl); |
1466 | has_entries = 1; | ||
1467 | read_lock(gl_lock_addr(hash)); | ||
1468 | } | ||
1469 | if (gl->gl_list.next == NULL) | ||
1470 | break; | ||
1471 | gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); | ||
1472 | } | 1422 | } |
1473 | out: | 1423 | rcu_read_unlock(); |
1474 | read_unlock(gl_lock_addr(hash)); | ||
1475 | if (prev) | ||
1476 | gfs2_glock_put(prev); | ||
1477 | cond_resched(); | 1424 | cond_resched(); |
1478 | return has_entries; | 1425 | } |
1426 | |||
1427 | static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) | ||
1428 | { | ||
1429 | unsigned x; | ||
1430 | |||
1431 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | ||
1432 | examine_bucket(examiner, sdp, x); | ||
1479 | } | 1433 | } |
1480 | 1434 | ||
1481 | 1435 | ||
@@ -1529,10 +1483,21 @@ static void clear_glock(struct gfs2_glock *gl) | |||
1529 | 1483 | ||
1530 | void gfs2_glock_thaw(struct gfs2_sbd *sdp) | 1484 | void gfs2_glock_thaw(struct gfs2_sbd *sdp) |
1531 | { | 1485 | { |
1532 | unsigned x; | 1486 | glock_hash_walk(thaw_glock, sdp); |
1487 | } | ||
1533 | 1488 | ||
1534 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | 1489 | static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl) |
1535 | examine_bucket(thaw_glock, sdp, x); | 1490 | { |
1491 | int ret; | ||
1492 | spin_lock(&gl->gl_spin); | ||
1493 | ret = __dump_glock(seq, gl); | ||
1494 | spin_unlock(&gl->gl_spin); | ||
1495 | return ret; | ||
1496 | } | ||
1497 | |||
1498 | static void dump_glock_func(struct gfs2_glock *gl) | ||
1499 | { | ||
1500 | dump_glock(NULL, gl); | ||
1536 | } | 1501 | } |
1537 | 1502 | ||
1538 | /** | 1503 | /** |
@@ -1545,13 +1510,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp) | |||
1545 | 1510 | ||
1546 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) | 1511 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) |
1547 | { | 1512 | { |
1548 | unsigned int x; | 1513 | glock_hash_walk(clear_glock, sdp); |
1549 | |||
1550 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | ||
1551 | examine_bucket(clear_glock, sdp, x); | ||
1552 | flush_workqueue(glock_workqueue); | 1514 | flush_workqueue(glock_workqueue); |
1553 | wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); | 1515 | wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); |
1554 | gfs2_dump_lockstate(sdp); | 1516 | glock_hash_walk(dump_glock_func, sdp); |
1555 | } | 1517 | } |
1556 | 1518 | ||
1557 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip) | 1519 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip) |
@@ -1717,66 +1679,15 @@ out: | |||
1717 | return error; | 1679 | return error; |
1718 | } | 1680 | } |
1719 | 1681 | ||
1720 | static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl) | ||
1721 | { | ||
1722 | int ret; | ||
1723 | spin_lock(&gl->gl_spin); | ||
1724 | ret = __dump_glock(seq, gl); | ||
1725 | spin_unlock(&gl->gl_spin); | ||
1726 | return ret; | ||
1727 | } | ||
1728 | |||
1729 | /** | ||
1730 | * gfs2_dump_lockstate - print out the current lockstate | ||
1731 | * @sdp: the filesystem | ||
1732 | * @ub: the buffer to copy the information into | ||
1733 | * | ||
1734 | * If @ub is NULL, dump the lockstate to the console. | ||
1735 | * | ||
1736 | */ | ||
1737 | |||
1738 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | ||
1739 | { | ||
1740 | struct gfs2_glock *gl; | ||
1741 | struct hlist_node *h; | ||
1742 | unsigned int x; | ||
1743 | int error = 0; | ||
1744 | |||
1745 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | ||
1746 | |||
1747 | read_lock(gl_lock_addr(x)); | ||
1748 | |||
1749 | hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) { | ||
1750 | if (gl->gl_sbd != sdp) | ||
1751 | continue; | ||
1752 | |||
1753 | error = dump_glock(NULL, gl); | ||
1754 | if (error) | ||
1755 | break; | ||
1756 | } | ||
1757 | |||
1758 | read_unlock(gl_lock_addr(x)); | ||
1759 | |||
1760 | if (error) | ||
1761 | break; | ||
1762 | } | ||
1763 | |||
1764 | 1682 | ||
1765 | return error; | ||
1766 | } | ||
1767 | 1683 | ||
1768 | 1684 | ||
1769 | int __init gfs2_glock_init(void) | 1685 | int __init gfs2_glock_init(void) |
1770 | { | 1686 | { |
1771 | unsigned i; | 1687 | unsigned i; |
1772 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { | 1688 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { |
1773 | INIT_HLIST_HEAD(&gl_hash_table[i].hb_list); | 1689 | INIT_HLIST_BL_HEAD(&gl_hash_table[i]); |
1774 | } | ||
1775 | #ifdef GL_HASH_LOCK_SZ | ||
1776 | for(i = 0; i < GL_HASH_LOCK_SZ; i++) { | ||
1777 | rwlock_init(&gl_hash_locks[i]); | ||
1778 | } | 1690 | } |
1779 | #endif | ||
1780 | 1691 | ||
1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | | 1692 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | |
1782 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | 1693 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); |
@@ -1802,62 +1713,54 @@ void gfs2_glock_exit(void) | |||
1802 | destroy_workqueue(gfs2_delete_workqueue); | 1713 | destroy_workqueue(gfs2_delete_workqueue); |
1803 | } | 1714 | } |
1804 | 1715 | ||
1716 | static inline struct gfs2_glock *glock_hash_chain(unsigned hash) | ||
1717 | { | ||
1718 | return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]), | ||
1719 | struct gfs2_glock, gl_list); | ||
1720 | } | ||
1721 | |||
1722 | static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl) | ||
1723 | { | ||
1724 | return hlist_bl_entry(rcu_dereference_raw(gl->gl_list.next), | ||
1725 | struct gfs2_glock, gl_list); | ||
1726 | } | ||
1727 | |||
1805 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) | 1728 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |
1806 | { | 1729 | { |
1807 | struct gfs2_glock *gl; | 1730 | struct gfs2_glock *gl; |
1808 | 1731 | ||
1809 | restart: | 1732 | do { |
1810 | read_lock(gl_lock_addr(gi->hash)); | 1733 | gl = gi->gl; |
1811 | gl = gi->gl; | 1734 | if (gl) { |
1812 | if (gl) { | 1735 | gi->gl = glock_hash_next(gl); |
1813 | gi->gl = hlist_entry(gl->gl_list.next, | 1736 | } else { |
1814 | struct gfs2_glock, gl_list); | 1737 | gi->gl = glock_hash_chain(gi->hash); |
1815 | } else { | 1738 | } |
1816 | gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first, | 1739 | while (gi->gl == NULL) { |
1817 | struct gfs2_glock, gl_list); | 1740 | gi->hash++; |
1818 | } | 1741 | if (gi->hash >= GFS2_GL_HASH_SIZE) { |
1819 | if (gi->gl) | 1742 | rcu_read_unlock(); |
1820 | gfs2_glock_hold(gi->gl); | 1743 | return 1; |
1821 | read_unlock(gl_lock_addr(gi->hash)); | 1744 | } |
1822 | if (gl) | 1745 | gi->gl = glock_hash_chain(gi->hash); |
1823 | gfs2_glock_put(gl); | 1746 | } |
1824 | while (gi->gl == NULL) { | 1747 | /* Skip entries for other sb and dead entries */ |
1825 | gi->hash++; | 1748 | } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0); |
1826 | if (gi->hash >= GFS2_GL_HASH_SIZE) | ||
1827 | return 1; | ||
1828 | read_lock(gl_lock_addr(gi->hash)); | ||
1829 | gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first, | ||
1830 | struct gfs2_glock, gl_list); | ||
1831 | if (gi->gl) | ||
1832 | gfs2_glock_hold(gi->gl); | ||
1833 | read_unlock(gl_lock_addr(gi->hash)); | ||
1834 | } | ||
1835 | |||
1836 | if (gi->sdp != gi->gl->gl_sbd) | ||
1837 | goto restart; | ||
1838 | 1749 | ||
1839 | return 0; | 1750 | return 0; |
1840 | } | 1751 | } |
1841 | 1752 | ||
1842 | static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi) | ||
1843 | { | ||
1844 | if (gi->gl) | ||
1845 | gfs2_glock_put(gi->gl); | ||
1846 | gi->gl = NULL; | ||
1847 | } | ||
1848 | |||
1849 | static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) | 1753 | static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) |
1850 | { | 1754 | { |
1851 | struct gfs2_glock_iter *gi = seq->private; | 1755 | struct gfs2_glock_iter *gi = seq->private; |
1852 | loff_t n = *pos; | 1756 | loff_t n = *pos; |
1853 | 1757 | ||
1854 | gi->hash = 0; | 1758 | gi->hash = 0; |
1759 | rcu_read_lock(); | ||
1855 | 1760 | ||
1856 | do { | 1761 | do { |
1857 | if (gfs2_glock_iter_next(gi)) { | 1762 | if (gfs2_glock_iter_next(gi)) |
1858 | gfs2_glock_iter_free(gi); | ||
1859 | return NULL; | 1763 | return NULL; |
1860 | } | ||
1861 | } while (n--); | 1764 | } while (n--); |
1862 | 1765 | ||
1863 | return gi->gl; | 1766 | return gi->gl; |
@@ -1870,10 +1773,8 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, | |||
1870 | 1773 | ||
1871 | (*pos)++; | 1774 | (*pos)++; |
1872 | 1775 | ||
1873 | if (gfs2_glock_iter_next(gi)) { | 1776 | if (gfs2_glock_iter_next(gi)) |
1874 | gfs2_glock_iter_free(gi); | ||
1875 | return NULL; | 1777 | return NULL; |
1876 | } | ||
1877 | 1778 | ||
1878 | return gi->gl; | 1779 | return gi->gl; |
1879 | } | 1780 | } |
@@ -1881,7 +1782,10 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, | |||
1881 | static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) | 1782 | static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) |
1882 | { | 1783 | { |
1883 | struct gfs2_glock_iter *gi = seq->private; | 1784 | struct gfs2_glock_iter *gi = seq->private; |
1884 | gfs2_glock_iter_free(gi); | 1785 | |
1786 | if (gi->gl) | ||
1787 | rcu_read_unlock(); | ||
1788 | gi->gl = NULL; | ||
1885 | } | 1789 | } |
1886 | 1790 | ||
1887 | static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) | 1791 | static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) |