aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-18 16:25:27 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-18 16:25:27 -0400
commit320dd101e2d595a03439adb92b319f3af53dd1d0 (patch)
tree56a88401a218622018030045fec009bafdc4ce76 /fs/gfs2/glock.c
parent3a8a9a1034813aa99f5ae3150f652d490c5ff10d (diff)
[GFS2] glock debugging and inode cache changes
This adds some extra debugging to glock.c and changes inode.c's deallocation code to call the debugging code at a suitable moment. I'm chasing down a particular bug to do with deallocation at the moment and the code can go again once the bug is fixed. Also this includes the first part of some changes to unify the Linux struct inode and GFS2's struct gfs2_inode. This transformation will happen in small parts over the next short period. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c04159031538..2ef8accf1cbc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -47,6 +47,7 @@ struct greedy {
47typedef void (*glock_examiner) (struct gfs2_glock * gl); 47typedef void (*glock_examiner) (struct gfs2_glock * gl);
48 48
49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 49static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
50static int dump_glock(struct gfs2_glock *gl);
50 51
51/** 52/**
52 * relaxed_state_ok - is a requested lock compatible with the current lock mode? 53 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
@@ -290,6 +291,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
290 spin_lock_init(&gl->gl_spin); 291 spin_lock_init(&gl->gl_spin);
291 292
292 gl->gl_state = LM_ST_UNLOCKED; 293 gl->gl_state = LM_ST_UNLOCKED;
294 gl->gl_owner = NULL;
295 gl->gl_ip = 0;
293 INIT_LIST_HEAD(&gl->gl_holders); 296 INIT_LIST_HEAD(&gl->gl_holders);
294 INIT_LIST_HEAD(&gl->gl_waiters1); 297 INIT_LIST_HEAD(&gl->gl_waiters1);
295 INIT_LIST_HEAD(&gl->gl_waiters2); 298 INIT_LIST_HEAD(&gl->gl_waiters2);
@@ -661,8 +664,11 @@ void gfs2_glmutex_lock(struct gfs2_glock *gl)
661 spin_lock(&gl->gl_spin); 664 spin_lock(&gl->gl_spin);
662 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 665 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
663 list_add_tail(&gh.gh_list, &gl->gl_waiters1); 666 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
664 else 667 else {
668 gl->gl_owner = current;
669 gl->gl_ip = (unsigned long)__builtin_return_address(0);
665 complete(&gh.gh_wait); 670 complete(&gh.gh_wait);
671 }
666 spin_unlock(&gl->gl_spin); 672 spin_unlock(&gl->gl_spin);
667 673
668 wait_for_completion(&gh.gh_wait); 674 wait_for_completion(&gh.gh_wait);
@@ -683,6 +689,10 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
683 spin_lock(&gl->gl_spin); 689 spin_lock(&gl->gl_spin);
684 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 690 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
685 acquired = 0; 691 acquired = 0;
692 else {
693 gl->gl_owner = current;
694 gl->gl_ip = (unsigned long)__builtin_return_address(0);
695 }
686 spin_unlock(&gl->gl_spin); 696 spin_unlock(&gl->gl_spin);
687 697
688 return acquired; 698 return acquired;
@@ -698,6 +708,8 @@ void gfs2_glmutex_unlock(struct gfs2_glock *gl)
698{ 708{
699 spin_lock(&gl->gl_spin); 709 spin_lock(&gl->gl_spin);
700 clear_bit(GLF_LOCK, &gl->gl_flags); 710 clear_bit(GLF_LOCK, &gl->gl_flags);
711 gl->gl_owner = NULL;
712 gl->gl_ip = 0;
701 run_queue(gl); 713 run_queue(gl);
702 BUG_ON(!spin_is_locked(&gl->gl_spin)); 714 BUG_ON(!spin_is_locked(&gl->gl_spin));
703 spin_unlock(&gl->gl_spin); 715 spin_unlock(&gl->gl_spin);
@@ -1173,7 +1185,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1173 struct gfs2_sbd *sdp = gl->gl_sbd; 1185 struct gfs2_sbd *sdp = gl->gl_sbd;
1174 int error = 0; 1186 int error = 0;
1175 1187
1176 restart: 1188restart:
1177 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 1189 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1178 set_bit(HIF_ABORTED, &gh->gh_iflags); 1190 set_bit(HIF_ABORTED, &gh->gh_iflags);
1179 return -EIO; 1191 return -EIO;
@@ -1196,6 +1208,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1196 1208
1197 clear_bit(GLF_PREFETCH, &gl->gl_flags); 1209 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1198 1210
1211 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1212 dump_glock(gl);
1213
1199 return error; 1214 return error;
1200} 1215}
1201 1216
@@ -2212,9 +2227,8 @@ static int dump_glock(struct gfs2_glock *gl)
2212 2227
2213 spin_lock(&gl->gl_spin); 2228 spin_lock(&gl->gl_spin);
2214 2229
2215 printk(KERN_INFO "Glock (%u, %llu)\n", 2230 printk(KERN_INFO "Glock (%u, %llu)\n", gl->gl_name.ln_type,
2216 gl->gl_name.ln_type, 2231 gl->gl_name.ln_number);
2217 gl->gl_name.ln_number);
2218 printk(KERN_INFO " gl_flags ="); 2232 printk(KERN_INFO " gl_flags =");
2219 for (x = 0; x < 32; x++) 2233 for (x = 0; x < 32; x++)
2220 if (test_bit(x, &gl->gl_flags)) 2234 if (test_bit(x, &gl->gl_flags))
@@ -2222,6 +2236,8 @@ static int dump_glock(struct gfs2_glock *gl)
2222 printk(" \n"); 2236 printk(" \n");
2223 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount)); 2237 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2224 printk(KERN_INFO " gl_state = %u\n", gl->gl_state); 2238 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
2239 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
2240 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
2225 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); 2241 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2226 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); 2242 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2227 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); 2243 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));