aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c99
1 files changed, 52 insertions, 47 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7a4fb630a320..2792a790e50b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -143,14 +143,9 @@ static int demote_ok(const struct gfs2_glock *gl)
143{ 143{
144 const struct gfs2_glock_operations *glops = gl->gl_ops; 144 const struct gfs2_glock_operations *glops = gl->gl_ops;
145 145
146 /* assert_spin_locked(&gl->gl_spin); */
147
148 if (gl->gl_state == LM_ST_UNLOCKED) 146 if (gl->gl_state == LM_ST_UNLOCKED)
149 return 0; 147 return 0;
150 if (test_bit(GLF_LFLUSH, &gl->gl_flags)) 148 if (!list_empty(&gl->gl_holders))
151 return 0;
152 if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
153 !list_empty(&gl->gl_holders))
154 return 0; 149 return 0;
155 if (glops->go_demote_ok) 150 if (glops->go_demote_ok)
156 return glops->go_demote_ok(gl); 151 return glops->go_demote_ok(gl);
@@ -158,6 +153,31 @@ static int demote_ok(const struct gfs2_glock *gl)
158} 153}
159 154
160 155
156void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
157{
158 spin_lock(&lru_lock);
159
160 if (!list_empty(&gl->gl_lru))
161 list_del_init(&gl->gl_lru);
162 else
163 atomic_inc(&lru_count);
164
165 list_add_tail(&gl->gl_lru, &lru_list);
166 set_bit(GLF_LRU, &gl->gl_flags);
167 spin_unlock(&lru_lock);
168}
169
170static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
171{
172 spin_lock(&lru_lock);
173 if (!list_empty(&gl->gl_lru)) {
174 list_del_init(&gl->gl_lru);
175 atomic_dec(&lru_count);
176 clear_bit(GLF_LRU, &gl->gl_flags);
177 }
178 spin_unlock(&lru_lock);
179}
180
161/** 181/**
162 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 182 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
163 * @gl: the glock 183 * @gl: the glock
@@ -168,24 +188,8 @@ static int demote_ok(const struct gfs2_glock *gl)
168 188
169static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 189static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
170{ 190{
171 if (demote_ok(gl)) { 191 if (demote_ok(gl))
172 spin_lock(&lru_lock); 192 gfs2_glock_add_to_lru(gl);
173
174 if (!list_empty(&gl->gl_lru))
175 list_del_init(&gl->gl_lru);
176 else
177 atomic_inc(&lru_count);
178
179 list_add_tail(&gl->gl_lru, &lru_list);
180 spin_unlock(&lru_lock);
181 }
182}
183
184void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
185{
186 spin_lock(&gl->gl_spin);
187 __gfs2_glock_schedule_for_reclaim(gl);
188 spin_unlock(&gl->gl_spin);
189} 193}
190 194
191/** 195/**
@@ -217,12 +221,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
217 spin_lock_bucket(gl->gl_hash); 221 spin_lock_bucket(gl->gl_hash);
218 hlist_bl_del_rcu(&gl->gl_list); 222 hlist_bl_del_rcu(&gl->gl_list);
219 spin_unlock_bucket(gl->gl_hash); 223 spin_unlock_bucket(gl->gl_hash);
220 spin_lock(&lru_lock); 224 gfs2_glock_remove_from_lru(gl);
221 if (!list_empty(&gl->gl_lru)) {
222 list_del_init(&gl->gl_lru);
223 atomic_dec(&lru_count);
224 }
225 spin_unlock(&lru_lock);
226 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 225 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
227 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 226 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
228 trace_gfs2_glock_put(gl); 227 trace_gfs2_glock_put(gl);
@@ -542,11 +541,6 @@ __acquires(&gl->gl_spin)
542 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 541 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
543 542
544 gfs2_glock_hold(gl); 543 gfs2_glock_hold(gl);
545 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
546 gl->gl_state == LM_ST_DEFERRED) &&
547 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
548 lck_flags |= LM_FLAG_TRY_1CB;
549
550 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 544 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
551 /* lock_dlm */ 545 /* lock_dlm */
552 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 546 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
@@ -648,7 +642,7 @@ static void delete_work_func(struct work_struct *work)
648 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */ 642 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
649 643
650 if (ip) 644 if (ip)
651 inode = gfs2_ilookup(sdp->sd_vfs, no_addr); 645 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
652 else 646 else
653 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); 647 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
654 if (inode && !IS_ERR(inode)) { 648 if (inode && !IS_ERR(inode)) {
@@ -1025,6 +1019,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1025 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1019 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1026 return -EIO; 1020 return -EIO;
1027 1021
1022 if (test_bit(GLF_LRU, &gl->gl_flags))
1023 gfs2_glock_remove_from_lru(gl);
1024
1028 spin_lock(&gl->gl_spin); 1025 spin_lock(&gl->gl_spin);
1029 add_to_queue(gh); 1026 add_to_queue(gh);
1030 if ((LM_FLAG_NOEXP & gh->gh_flags) && 1027 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1082,7 +1079,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1082 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1079 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1083 fast_path = 1; 1080 fast_path = 1;
1084 } 1081 }
1085 __gfs2_glock_schedule_for_reclaim(gl); 1082 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1083 __gfs2_glock_schedule_for_reclaim(gl);
1086 trace_gfs2_glock_queue(gh, 0); 1084 trace_gfs2_glock_queue(gh, 0);
1087 spin_unlock(&gl->gl_spin); 1085 spin_unlock(&gl->gl_spin);
1088 if (likely(fast_path)) 1086 if (likely(fast_path))
@@ -1348,11 +1346,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1348} 1346}
1349 1347
1350 1348
1351static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1349static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1350 struct shrink_control *sc)
1352{ 1351{
1353 struct gfs2_glock *gl; 1352 struct gfs2_glock *gl;
1354 int may_demote; 1353 int may_demote;
1355 int nr_skipped = 0; 1354 int nr_skipped = 0;
1355 int nr = sc->nr_to_scan;
1356 gfp_t gfp_mask = sc->gfp_mask;
1356 LIST_HEAD(skipped); 1357 LIST_HEAD(skipped);
1357 1358
1358 if (nr == 0) 1359 if (nr == 0)
@@ -1365,6 +1366,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
1365 while(nr && !list_empty(&lru_list)) { 1366 while(nr && !list_empty(&lru_list)) {
1366 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1367 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1367 list_del_init(&gl->gl_lru); 1368 list_del_init(&gl->gl_lru);
1369 clear_bit(GLF_LRU, &gl->gl_flags);
1368 atomic_dec(&lru_count); 1370 atomic_dec(&lru_count);
1369 1371
1370 /* Test for being demotable */ 1372 /* Test for being demotable */
@@ -1387,6 +1389,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
1387 } 1389 }
1388 nr_skipped++; 1390 nr_skipped++;
1389 list_add(&gl->gl_lru, &skipped); 1391 list_add(&gl->gl_lru, &skipped);
1392 set_bit(GLF_LRU, &gl->gl_flags);
1390 } 1393 }
1391 list_splice(&skipped, &lru_list); 1394 list_splice(&skipped, &lru_list);
1392 atomic_add(nr_skipped, &lru_count); 1395 atomic_add(nr_skipped, &lru_count);
@@ -1459,12 +1462,7 @@ static void thaw_glock(struct gfs2_glock *gl)
1459 1462
1460static void clear_glock(struct gfs2_glock *gl) 1463static void clear_glock(struct gfs2_glock *gl)
1461{ 1464{
1462 spin_lock(&lru_lock); 1465 gfs2_glock_remove_from_lru(gl);
1463 if (!list_empty(&gl->gl_lru)) {
1464 list_del_init(&gl->gl_lru);
1465 atomic_dec(&lru_count);
1466 }
1467 spin_unlock(&lru_lock);
1468 1466
1469 spin_lock(&gl->gl_spin); 1467 spin_lock(&gl->gl_spin);
1470 if (gl->gl_state != LM_ST_UNLOCKED) 1468 if (gl->gl_state != LM_ST_UNLOCKED)
@@ -1599,9 +1597,11 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1599 return 0; 1597 return 0;
1600} 1598}
1601 1599
1602static const char *gflags2str(char *buf, const unsigned long *gflags) 1600static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1603{ 1601{
1602 const unsigned long *gflags = &gl->gl_flags;
1604 char *p = buf; 1603 char *p = buf;
1604
1605 if (test_bit(GLF_LOCK, gflags)) 1605 if (test_bit(GLF_LOCK, gflags))
1606 *p++ = 'l'; 1606 *p++ = 'l';
1607 if (test_bit(GLF_DEMOTE, gflags)) 1607 if (test_bit(GLF_DEMOTE, gflags))
@@ -1624,6 +1624,10 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
1624 *p++ = 'F'; 1624 *p++ = 'F';
1625 if (test_bit(GLF_QUEUED, gflags)) 1625 if (test_bit(GLF_QUEUED, gflags))
1626 *p++ = 'q'; 1626 *p++ = 'q';
1627 if (test_bit(GLF_LRU, gflags))
1628 *p++ = 'L';
1629 if (gl->gl_object)
1630 *p++ = 'o';
1627 *p = 0; 1631 *p = 0;
1628 return buf; 1632 return buf;
1629} 1633}
@@ -1658,14 +1662,15 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1658 dtime *= 1000000/HZ; /* demote time in uSec */ 1662 dtime *= 1000000/HZ; /* demote time in uSec */
1659 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1663 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1660 dtime = 0; 1664 dtime = 0;
1661 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", 1665 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1662 state2str(gl->gl_state), 1666 state2str(gl->gl_state),
1663 gl->gl_name.ln_type, 1667 gl->gl_name.ln_type,
1664 (unsigned long long)gl->gl_name.ln_number, 1668 (unsigned long long)gl->gl_name.ln_number,
1665 gflags2str(gflags_buf, &gl->gl_flags), 1669 gflags2str(gflags_buf, gl),
1666 state2str(gl->gl_target), 1670 state2str(gl->gl_target),
1667 state2str(gl->gl_demote_state), dtime, 1671 state2str(gl->gl_demote_state), dtime,
1668 atomic_read(&gl->gl_ail_count), 1672 atomic_read(&gl->gl_ail_count),
1673 atomic_read(&gl->gl_revokes),
1669 atomic_read(&gl->gl_ref)); 1674 atomic_read(&gl->gl_ref));
1670 1675
1671 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1676 list_for_each_entry(gh, &gl->gl_holders, gh_list) {