aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-04-20 16:57:23 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-04-20 16:57:23 -0400
commit190562bd84a484bf6590425aa2bb4d6d611c112b (patch)
treedd99bcd847f8d2376f7836ea9d861a31d1021c71 /fs/gfs2
parentfe1bdedc6c16adedc6fd3636185ea91596b1d6eb (diff)
[GFS2] Fix a bug: scheduling under a spinlock
At some stage, a mutex was added to gfs2_glock_put() without checking all its call sites. Two of them were called from under a spinlock causing random delays at various points and crashes. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c30
-rw-r--r--fs/gfs2/glock.h6
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/gfs2/log.c1
-rw-r--r--fs/gfs2/meta_io.c2
5 files changed, 27 insertions, 14 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4ed13787b7ec..32cc4005307d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -158,6 +158,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
158 if (kref_put(&gl->gl_ref, kill_glock)) { 158 if (kref_put(&gl->gl_ref, kill_glock)) {
159 list_del_init(&gl->gl_list); 159 list_del_init(&gl->gl_list);
160 write_unlock(&bucket->hb_lock); 160 write_unlock(&bucket->hb_lock);
161 BUG_ON(spin_is_locked(&gl->gl_spin));
161 glock_free(gl); 162 glock_free(gl);
162 rv = 1; 163 rv = 1;
163 goto out; 164 goto out;
@@ -353,13 +354,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
353 * 354 *
354 */ 355 */
355 356
356void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags, 357void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
357 struct gfs2_holder *gh) 358 struct gfs2_holder *gh)
358{ 359{
360 flags |= GL_NEVER_RECURSE;
359 INIT_LIST_HEAD(&gh->gh_list); 361 INIT_LIST_HEAD(&gh->gh_list);
360 gh->gh_gl = gl; 362 gh->gh_gl = gl;
361 gh->gh_ip = (unsigned long)__builtin_return_address(0); 363 gh->gh_ip = (unsigned long)__builtin_return_address(0);
362 gh->gh_owner = (flags & GL_NEVER_RECURSE) ? NULL : current; 364 gh->gh_owner = current;
363 gh->gh_state = state; 365 gh->gh_state = state;
364 gh->gh_flags = flags; 366 gh->gh_flags = flags;
365 gh->gh_error = 0; 367 gh->gh_error = 0;
@@ -382,10 +384,10 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
382 * 384 *
383 */ 385 */
384 386
385void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh) 387void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
386{ 388{
387 gh->gh_state = state; 389 gh->gh_state = state;
388 gh->gh_flags = flags; 390 gh->gh_flags = flags | GL_NEVER_RECURSE;
389 if (gh->gh_state == LM_ST_EXCLUSIVE) 391 if (gh->gh_state == LM_ST_EXCLUSIVE)
390 gh->gh_flags |= GL_LOCAL_EXCL; 392 gh->gh_flags |= GL_LOCAL_EXCL;
391 393
@@ -461,6 +463,8 @@ static void handle_recurse(struct gfs2_holder *gh)
461 struct gfs2_holder *tmp_gh, *safe; 463 struct gfs2_holder *tmp_gh, *safe;
462 int found = 0; 464 int found = 0;
463 465
466 BUG_ON(!spin_is_locked(&gl->gl_spin));
467
464 printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number, 468 printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number,
465 gl->gl_name.ln_type); 469 gl->gl_name.ln_type);
466 470
@@ -502,6 +506,8 @@ static void do_unrecurse(struct gfs2_holder *gh)
502 struct gfs2_holder *tmp_gh, *last_gh = NULL; 506 struct gfs2_holder *tmp_gh, *last_gh = NULL;
503 int found = 0; 507 int found = 0;
504 508
509 BUG_ON(!spin_is_locked(&gl->gl_spin));
510
505 if (gfs2_assert_warn(sdp, gh->gh_owner)) 511 if (gfs2_assert_warn(sdp, gh->gh_owner))
506 return; 512 return;
507 513
@@ -676,7 +682,6 @@ static int rq_greedy(struct gfs2_holder *gh)
676 * @gl: the glock 682 * @gl: the glock
677 * 683 *
678 */ 684 */
679
680static void run_queue(struct gfs2_glock *gl) 685static void run_queue(struct gfs2_glock *gl)
681{ 686{
682 struct gfs2_holder *gh; 687 struct gfs2_holder *gh;
@@ -779,6 +784,7 @@ void gfs2_glmutex_unlock(struct gfs2_glock *gl)
779 spin_lock(&gl->gl_spin); 784 spin_lock(&gl->gl_spin);
780 clear_bit(GLF_LOCK, &gl->gl_flags); 785 clear_bit(GLF_LOCK, &gl->gl_flags);
781 run_queue(gl); 786 run_queue(gl);
787 BUG_ON(!spin_is_locked(&gl->gl_spin));
782 spin_unlock(&gl->gl_spin); 788 spin_unlock(&gl->gl_spin);
783} 789}
784 790
@@ -1244,7 +1250,7 @@ static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new,
1244 1250
1245 return 0; 1251 return 0;
1246 1252
1247 fail: 1253fail:
1248 print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n", 1254 print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n",
1249 existing->gh_ip); 1255 existing->gh_ip);
1250 print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip); 1256 print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip);
@@ -1263,6 +1269,8 @@ static void add_to_queue(struct gfs2_holder *gh)
1263 struct gfs2_glock *gl = gh->gh_gl; 1269 struct gfs2_glock *gl = gh->gh_gl;
1264 struct gfs2_holder *existing; 1270 struct gfs2_holder *existing;
1265 1271
1272 BUG_ON(!gh->gh_owner);
1273
1266 if (!gh->gh_owner) 1274 if (!gh->gh_owner)
1267 goto out; 1275 goto out;
1268 1276
@@ -1331,7 +1339,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1331 if (!(gh->gh_flags & GL_ASYNC)) { 1339 if (!(gh->gh_flags & GL_ASYNC)) {
1332 error = glock_wait_internal(gh); 1340 error = glock_wait_internal(gh);
1333 if (error == GLR_CANCELED) { 1341 if (error == GLR_CANCELED) {
1334 msleep(1000); 1342 msleep(100);
1335 goto restart; 1343 goto restart;
1336 } 1344 }
1337 } 1345 }
@@ -1360,7 +1368,7 @@ int gfs2_glock_poll(struct gfs2_holder *gh)
1360 else if (list_empty(&gh->gh_list)) { 1368 else if (list_empty(&gh->gh_list)) {
1361 if (gh->gh_error == GLR_CANCELED) { 1369 if (gh->gh_error == GLR_CANCELED) {
1362 spin_unlock(&gl->gl_spin); 1370 spin_unlock(&gl->gl_spin);
1363 msleep(1000); 1371 msleep(100);
1364 if (gfs2_glock_nq(gh)) 1372 if (gfs2_glock_nq(gh))
1365 return 1; 1373 return 1;
1366 return 0; 1374 return 0;
@@ -1386,7 +1394,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
1386 1394
1387 error = glock_wait_internal(gh); 1395 error = glock_wait_internal(gh);
1388 if (error == GLR_CANCELED) { 1396 if (error == GLR_CANCELED) {
1389 msleep(1000); 1397 msleep(100);
1390 gh->gh_flags &= ~GL_ASYNC; 1398 gh->gh_flags &= ~GL_ASYNC;
1391 error = gfs2_glock_nq(gh); 1399 error = gfs2_glock_nq(gh);
1392 } 1400 }
@@ -2217,10 +2225,12 @@ static void clear_glock(struct gfs2_glock *gl)
2217 if (!list_empty(&gl->gl_reclaim)) { 2225 if (!list_empty(&gl->gl_reclaim)) {
2218 list_del_init(&gl->gl_reclaim); 2226 list_del_init(&gl->gl_reclaim);
2219 atomic_dec(&sdp->sd_reclaim_count); 2227 atomic_dec(&sdp->sd_reclaim_count);
2228 spin_unlock(&sdp->sd_reclaim_lock);
2220 released = gfs2_glock_put(gl); 2229 released = gfs2_glock_put(gl);
2221 gfs2_assert(sdp, !released); 2230 gfs2_assert(sdp, !released);
2231 } else {
2232 spin_unlock(&sdp->sd_reclaim_lock);
2222 } 2233 }
2223 spin_unlock(&sdp->sd_reclaim_lock);
2224 2234
2225 if (gfs2_glmutex_trylock(gl)) { 2235 if (gfs2_glmutex_trylock(gl)) {
2226 if (gl->gl_ops == &gfs2_inode_glops) { 2236 if (gl->gl_ops == &gfs2_inode_glops) {
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index b6646e7fed15..ed5bc3e65397 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -81,10 +81,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp,
81 int create, struct gfs2_glock **glp); 81 int create, struct gfs2_glock **glp);
82void gfs2_glock_hold(struct gfs2_glock *gl); 82void gfs2_glock_hold(struct gfs2_glock *gl);
83int gfs2_glock_put(struct gfs2_glock *gl); 83int gfs2_glock_put(struct gfs2_glock *gl);
84 84void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
85void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
86 struct gfs2_holder *gh); 85 struct gfs2_holder *gh);
87void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh); 86void gfs2_holder_reinit(unsigned int state, unsigned flags,
87 struct gfs2_holder *gh);
88void gfs2_holder_uninit(struct gfs2_holder *gh); 88void gfs2_holder_uninit(struct gfs2_holder *gh);
89struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state, 89struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state,
90 int flags, gfp_t gfp_flags); 90 int flags, gfp_t gfp_flags);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6140c2434e85..fb5a4d06e926 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -409,8 +409,8 @@ void gfs2_inode_destroy(struct gfs2_inode *ip)
409 409
410 spin_lock(&io_gl->gl_spin); 410 spin_lock(&io_gl->gl_spin);
411 io_gl->gl_object = NULL; 411 io_gl->gl_object = NULL;
412 gfs2_glock_put(i_gl);
413 spin_unlock(&io_gl->gl_spin); 412 spin_unlock(&io_gl->gl_spin);
413 gfs2_glock_put(i_gl);
414 414
415 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 415 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
416 416
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index b0dd0b9ad79b..9e32e0faaf20 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -578,6 +578,7 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
578 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 578 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
579 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg); 579 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
580 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf); 580 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
581 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_hdrs);
581 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); 582 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
582 583
583 sdp->sd_log_flush_head = sdp->sd_log_head; 584 sdp->sd_log_flush_head = sdp->sd_log_head;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 74cf28e77b47..da49973a90d1 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -214,6 +214,8 @@ void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
214 struct buffer_head *bh; 214 struct buffer_head *bh;
215 int retry; 215 int retry;
216 216
217 BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
218
217 do { 219 do {
218 retry = 0; 220 retry = 0;
219 221