aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/inode.c')
-rw-r--r--fs/gfs2/inode.c95
1 files changed, 45 insertions, 50 deletions
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb5a4d06e926..9084d6037a0c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -287,7 +287,7 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
287 287
288static int inode_create(struct gfs2_glock *i_gl, const struct gfs2_inum *inum, 288static int inode_create(struct gfs2_glock *i_gl, const struct gfs2_inum *inum,
289 struct gfs2_glock *io_gl, unsigned int io_state, 289 struct gfs2_glock *io_gl, unsigned int io_state,
290 struct gfs2_inode **ipp) 290 struct gfs2_inode **ipp, int need_lock)
291{ 291{
292 struct gfs2_sbd *sdp = i_gl->gl_sbd; 292 struct gfs2_sbd *sdp = i_gl->gl_sbd;
293 struct gfs2_inode *ip; 293 struct gfs2_inode *ip;
@@ -312,11 +312,13 @@ static int inode_create(struct gfs2_glock *i_gl, const struct gfs2_inum *inum,
312 312
313 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default); 313 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
314 314
315 error = gfs2_glock_nq_init(io_gl, 315 if (need_lock) {
316 io_state, GL_LOCAL_EXCL | GL_EXACT, 316 error = gfs2_glock_nq_init(io_gl,
317 &ip->i_iopen_gh); 317 io_state, GL_LOCAL_EXCL | GL_EXACT,
318 if (error) 318 &ip->i_iopen_gh);
319 goto fail; 319 if (error)
320 goto fail;
321 }
320 ip->i_iopen_gh.gh_owner = NULL; 322 ip->i_iopen_gh.gh_owner = NULL;
321 323
322 spin_lock(&io_gl->gl_spin); 324 spin_lock(&io_gl->gl_spin);
@@ -376,7 +378,7 @@ int gfs2_inode_get(struct gfs2_glock *i_gl, const struct gfs2_inum *inum,
376 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops, 378 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops,
377 CREATE, &io_gl); 379 CREATE, &io_gl);
378 if (!error) { 380 if (!error) {
379 error = inode_create(i_gl, inum, io_gl, LM_ST_SHARED, ipp); 381 error = inode_create(i_gl, inum, io_gl, LM_ST_SHARED, ipp, 1);
380 gfs2_glock_put(io_gl); 382 gfs2_glock_put(io_gl);
381 } 383 }
382 384
@@ -398,21 +400,23 @@ void gfs2_inode_put(struct gfs2_inode *ip)
398 atomic_dec(&ip->i_count); 400 atomic_dec(&ip->i_count);
399} 401}
400 402
401void gfs2_inode_destroy(struct gfs2_inode *ip) 403void gfs2_inode_destroy(struct gfs2_inode *ip, int unlock)
402{ 404{
403 struct gfs2_sbd *sdp = ip->i_sbd; 405 struct gfs2_sbd *sdp = ip->i_sbd;
404 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
405 struct gfs2_glock *i_gl = ip->i_gl; 406 struct gfs2_glock *i_gl = ip->i_gl;
406 407
407 gfs2_assert_warn(sdp, !atomic_read(&ip->i_count)); 408 gfs2_assert_warn(sdp, !atomic_read(&ip->i_count));
408 gfs2_assert(sdp, io_gl->gl_object == i_gl); 409 if (unlock) {
409 410 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
410 spin_lock(&io_gl->gl_spin); 411 gfs2_assert(sdp, io_gl->gl_object == i_gl);
411 io_gl->gl_object = NULL; 412
412 spin_unlock(&io_gl->gl_spin); 413 spin_lock(&io_gl->gl_spin);
413 gfs2_glock_put(i_gl); 414 io_gl->gl_object = NULL;
414 415 spin_unlock(&io_gl->gl_spin);
415 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 416 gfs2_glock_put(i_gl);
417
418 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
419 }
416 420
417 gfs2_meta_cache_flush(ip); 421 gfs2_meta_cache_flush(ip);
418 kmem_cache_free(gfs2_inode_cachep, ip); 422 kmem_cache_free(gfs2_inode_cachep, ip);
@@ -493,6 +497,13 @@ static int dinode_dealloc(struct gfs2_inode *ip, struct gfs2_unlinked *ul)
493 * @inum: the inode number to deallocate 497 * @inum: the inode number to deallocate
494 * @io_gh: a holder for the iopen glock for this inode 498 * @io_gh: a holder for the iopen glock for this inode
495 * 499 *
500 * N.B. When we enter this we already hold the iopen glock and getting
501 * the glock for the inode means that we are grabbing the locks in the
502 * "wrong" order so we must only so a try lock operation and fail if we
503 * don't get the lock. Thats ok, since if we fail it means someone else
504 * is using the inode still and thus we shouldn't be deallocating it
505 * anyway.
506 *
496 * Returns: errno 507 * Returns: errno
497 */ 508 */
498 509
@@ -503,33 +514,21 @@ static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
503 struct gfs2_holder i_gh; 514 struct gfs2_holder i_gh;
504 int error; 515 int error;
505 516
506 error = gfs2_glock_nq_num(sdp, 517 error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
507 ul->ul_ut.ut_inum.no_addr, &gfs2_inode_glops, 518 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
508 LM_ST_EXCLUSIVE, 0, &i_gh); 519 LM_FLAG_TRY_1CB, &i_gh);
509 if (error) 520 switch(error) {
510 return error;
511
512 /* We reacquire the iopen lock here to avoid a race with the NFS server
513 calling gfs2_read_inode() with the inode number of a inode we're in
514 the process of deallocating. And we can't keep our hold on the lock
515 from inode_dealloc_init() for deadlock reasons. */
516
517 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY, io_gh);
518 error = gfs2_glock_nq(io_gh);
519 switch (error) {
520 case 0: 521 case 0:
521 break; 522 break;
522 case GLR_TRYFAILED: 523 case GLR_TRYFAILED:
523 error = 1; 524 return 1; /* or back off and relock in different order? */
524 default: 525 default:
525 goto out; 526 return error;
526 } 527 }
527 528
528 gfs2_assert_warn(sdp, !i_gh.gh_gl->gl_object); 529 gfs2_assert_warn(sdp, !i_gh.gh_gl->gl_object);
529 error = inode_create(i_gh.gh_gl, &ul->ul_ut.ut_inum, io_gh->gh_gl, 530 error = inode_create(i_gh.gh_gl, &ul->ul_ut.ut_inum, io_gh->gh_gl,
530 LM_ST_EXCLUSIVE, &ip); 531 LM_ST_EXCLUSIVE, &ip, 0);
531
532 gfs2_glock_dq(io_gh);
533 532
534 if (error) 533 if (error)
535 goto out; 534 goto out;
@@ -568,13 +567,13 @@ static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
568 if (error) 567 if (error)
569 goto out_iput; 568 goto out_iput;
570 569
571 out_iput: 570out_iput:
572 gfs2_glmutex_lock(i_gh.gh_gl); 571 gfs2_glmutex_lock(i_gh.gh_gl);
573 gfs2_inode_put(ip); 572 gfs2_inode_put(ip);
574 gfs2_inode_destroy(ip); 573 gfs2_inode_destroy(ip, 0);
575 gfs2_glmutex_unlock(i_gh.gh_gl); 574 gfs2_glmutex_unlock(i_gh.gh_gl);
576 575
577 out: 576out:
578 gfs2_glock_dq_uninit(&i_gh); 577 gfs2_glock_dq_uninit(&i_gh);
579 578
580 return error; 579 return error;
@@ -589,14 +588,13 @@ static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
589 588
590static int try_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul) 589static int try_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
591{ 590{
592 struct gfs2_holder io_gh;
593 int error = 0; 591 int error = 0;
592 struct gfs2_holder iogh;
594 593
595 gfs2_try_toss_inode(sdp, &ul->ul_ut.ut_inum); 594 gfs2_try_toss_inode(sdp, &ul->ul_ut.ut_inum);
596 595 error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
597 error = gfs2_glock_nq_num(sdp, 596 &gfs2_iopen_glops, LM_ST_EXCLUSIVE,
598 ul->ul_ut.ut_inum.no_addr, &gfs2_iopen_glops, 597 LM_FLAG_TRY_1CB, &iogh);
599 LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &io_gh);
600 switch (error) { 598 switch (error) {
601 case 0: 599 case 0:
602 break; 600 break;
@@ -606,9 +604,8 @@ static int try_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
606 return error; 604 return error;
607 } 605 }
608 606
609 gfs2_glock_dq(&io_gh); 607 error = inode_dealloc(sdp, ul, &iogh);
610 error = inode_dealloc(sdp, ul, &io_gh); 608 gfs2_glock_dq_uninit(&iogh);
611 gfs2_holder_uninit(&io_gh);
612 609
613 return error; 610 return error;
614} 611}
@@ -634,9 +631,7 @@ static int inode_dealloc_uninit(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
634 if (error) 631 if (error)
635 goto out; 632 goto out;
636 633
637 error = gfs2_trans_begin(sdp, 634 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED + RES_STATFS, 0);
638 RES_RG_BIT + RES_UNLINKED + RES_STATFS,
639 0);
640 if (error) 635 if (error)
641 goto out_gunlock; 636 goto out_gunlock;
642 637