diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-20 16:57:23 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-20 16:57:23 -0400 |
commit | 190562bd84a484bf6590425aa2bb4d6d611c112b (patch) | |
tree | dd99bcd847f8d2376f7836ea9d861a31d1021c71 /fs/gfs2/glock.c | |
parent | fe1bdedc6c16adedc6fd3636185ea91596b1d6eb (diff) |
[GFS2] Fix a bug: scheduling under a spinlock
At some stage, a mutex was added to gfs2_glock_put() without
checking all its call sites. Two of them were called from
under a spinlock causing random delays at various points and
crashes.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 30 |
1 files changed, 20 insertions, 10 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 4ed13787b7ec..32cc4005307d 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -158,6 +158,7 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
158 | if (kref_put(&gl->gl_ref, kill_glock)) { | 158 | if (kref_put(&gl->gl_ref, kill_glock)) { |
159 | list_del_init(&gl->gl_list); | 159 | list_del_init(&gl->gl_list); |
160 | write_unlock(&bucket->hb_lock); | 160 | write_unlock(&bucket->hb_lock); |
161 | BUG_ON(spin_is_locked(&gl->gl_spin)); | ||
161 | glock_free(gl); | 162 | glock_free(gl); |
162 | rv = 1; | 163 | rv = 1; |
163 | goto out; | 164 | goto out; |
@@ -353,13 +354,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number, | |||
353 | * | 354 | * |
354 | */ | 355 | */ |
355 | 356 | ||
356 | void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags, | 357 | void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, |
357 | struct gfs2_holder *gh) | 358 | struct gfs2_holder *gh) |
358 | { | 359 | { |
360 | flags |= GL_NEVER_RECURSE; | ||
359 | INIT_LIST_HEAD(&gh->gh_list); | 361 | INIT_LIST_HEAD(&gh->gh_list); |
360 | gh->gh_gl = gl; | 362 | gh->gh_gl = gl; |
361 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 363 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
362 | gh->gh_owner = (flags & GL_NEVER_RECURSE) ? NULL : current; | 364 | gh->gh_owner = current; |
363 | gh->gh_state = state; | 365 | gh->gh_state = state; |
364 | gh->gh_flags = flags; | 366 | gh->gh_flags = flags; |
365 | gh->gh_error = 0; | 367 | gh->gh_error = 0; |
@@ -382,10 +384,10 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags, | |||
382 | * | 384 | * |
383 | */ | 385 | */ |
384 | 386 | ||
385 | void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh) | 387 | void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) |
386 | { | 388 | { |
387 | gh->gh_state = state; | 389 | gh->gh_state = state; |
388 | gh->gh_flags = flags; | 390 | gh->gh_flags = flags | GL_NEVER_RECURSE; |
389 | if (gh->gh_state == LM_ST_EXCLUSIVE) | 391 | if (gh->gh_state == LM_ST_EXCLUSIVE) |
390 | gh->gh_flags |= GL_LOCAL_EXCL; | 392 | gh->gh_flags |= GL_LOCAL_EXCL; |
391 | 393 | ||
@@ -461,6 +463,8 @@ static void handle_recurse(struct gfs2_holder *gh) | |||
461 | struct gfs2_holder *tmp_gh, *safe; | 463 | struct gfs2_holder *tmp_gh, *safe; |
462 | int found = 0; | 464 | int found = 0; |
463 | 465 | ||
466 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | ||
467 | |||
464 | printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number, | 468 | printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number, |
465 | gl->gl_name.ln_type); | 469 | gl->gl_name.ln_type); |
466 | 470 | ||
@@ -502,6 +506,8 @@ static void do_unrecurse(struct gfs2_holder *gh) | |||
502 | struct gfs2_holder *tmp_gh, *last_gh = NULL; | 506 | struct gfs2_holder *tmp_gh, *last_gh = NULL; |
503 | int found = 0; | 507 | int found = 0; |
504 | 508 | ||
509 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | ||
510 | |||
505 | if (gfs2_assert_warn(sdp, gh->gh_owner)) | 511 | if (gfs2_assert_warn(sdp, gh->gh_owner)) |
506 | return; | 512 | return; |
507 | 513 | ||
@@ -676,7 +682,6 @@ static int rq_greedy(struct gfs2_holder *gh) | |||
676 | * @gl: the glock | 682 | * @gl: the glock |
677 | * | 683 | * |
678 | */ | 684 | */ |
679 | |||
680 | static void run_queue(struct gfs2_glock *gl) | 685 | static void run_queue(struct gfs2_glock *gl) |
681 | { | 686 | { |
682 | struct gfs2_holder *gh; | 687 | struct gfs2_holder *gh; |
@@ -779,6 +784,7 @@ void gfs2_glmutex_unlock(struct gfs2_glock *gl) | |||
779 | spin_lock(&gl->gl_spin); | 784 | spin_lock(&gl->gl_spin); |
780 | clear_bit(GLF_LOCK, &gl->gl_flags); | 785 | clear_bit(GLF_LOCK, &gl->gl_flags); |
781 | run_queue(gl); | 786 | run_queue(gl); |
787 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | ||
782 | spin_unlock(&gl->gl_spin); | 788 | spin_unlock(&gl->gl_spin); |
783 | } | 789 | } |
784 | 790 | ||
@@ -1244,7 +1250,7 @@ static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new, | |||
1244 | 1250 | ||
1245 | return 0; | 1251 | return 0; |
1246 | 1252 | ||
1247 | fail: | 1253 | fail: |
1248 | print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n", | 1254 | print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n", |
1249 | existing->gh_ip); | 1255 | existing->gh_ip); |
1250 | print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip); | 1256 | print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip); |
@@ -1263,6 +1269,8 @@ static void add_to_queue(struct gfs2_holder *gh) | |||
1263 | struct gfs2_glock *gl = gh->gh_gl; | 1269 | struct gfs2_glock *gl = gh->gh_gl; |
1264 | struct gfs2_holder *existing; | 1270 | struct gfs2_holder *existing; |
1265 | 1271 | ||
1272 | BUG_ON(!gh->gh_owner); | ||
1273 | |||
1266 | if (!gh->gh_owner) | 1274 | if (!gh->gh_owner) |
1267 | goto out; | 1275 | goto out; |
1268 | 1276 | ||
@@ -1331,7 +1339,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh) | |||
1331 | if (!(gh->gh_flags & GL_ASYNC)) { | 1339 | if (!(gh->gh_flags & GL_ASYNC)) { |
1332 | error = glock_wait_internal(gh); | 1340 | error = glock_wait_internal(gh); |
1333 | if (error == GLR_CANCELED) { | 1341 | if (error == GLR_CANCELED) { |
1334 | msleep(1000); | 1342 | msleep(100); |
1335 | goto restart; | 1343 | goto restart; |
1336 | } | 1344 | } |
1337 | } | 1345 | } |
@@ -1360,7 +1368,7 @@ int gfs2_glock_poll(struct gfs2_holder *gh) | |||
1360 | else if (list_empty(&gh->gh_list)) { | 1368 | else if (list_empty(&gh->gh_list)) { |
1361 | if (gh->gh_error == GLR_CANCELED) { | 1369 | if (gh->gh_error == GLR_CANCELED) { |
1362 | spin_unlock(&gl->gl_spin); | 1370 | spin_unlock(&gl->gl_spin); |
1363 | msleep(1000); | 1371 | msleep(100); |
1364 | if (gfs2_glock_nq(gh)) | 1372 | if (gfs2_glock_nq(gh)) |
1365 | return 1; | 1373 | return 1; |
1366 | return 0; | 1374 | return 0; |
@@ -1386,7 +1394,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh) | |||
1386 | 1394 | ||
1387 | error = glock_wait_internal(gh); | 1395 | error = glock_wait_internal(gh); |
1388 | if (error == GLR_CANCELED) { | 1396 | if (error == GLR_CANCELED) { |
1389 | msleep(1000); | 1397 | msleep(100); |
1390 | gh->gh_flags &= ~GL_ASYNC; | 1398 | gh->gh_flags &= ~GL_ASYNC; |
1391 | error = gfs2_glock_nq(gh); | 1399 | error = gfs2_glock_nq(gh); |
1392 | } | 1400 | } |
@@ -2217,10 +2225,12 @@ static void clear_glock(struct gfs2_glock *gl) | |||
2217 | if (!list_empty(&gl->gl_reclaim)) { | 2225 | if (!list_empty(&gl->gl_reclaim)) { |
2218 | list_del_init(&gl->gl_reclaim); | 2226 | list_del_init(&gl->gl_reclaim); |
2219 | atomic_dec(&sdp->sd_reclaim_count); | 2227 | atomic_dec(&sdp->sd_reclaim_count); |
2228 | spin_unlock(&sdp->sd_reclaim_lock); | ||
2220 | released = gfs2_glock_put(gl); | 2229 | released = gfs2_glock_put(gl); |
2221 | gfs2_assert(sdp, !released); | 2230 | gfs2_assert(sdp, !released); |
2231 | } else { | ||
2232 | spin_unlock(&sdp->sd_reclaim_lock); | ||
2222 | } | 2233 | } |
2223 | spin_unlock(&sdp->sd_reclaim_lock); | ||
2224 | 2234 | ||
2225 | if (gfs2_glmutex_trylock(gl)) { | 2235 | if (gfs2_glmutex_trylock(gl)) { |
2226 | if (gl->gl_ops == &gfs2_inode_glops) { | 2236 | if (gl->gl_ops == &gfs2_inode_glops) { |