diff options
author | Christoph Hellwig <hch@infradead.org> | 2007-08-30 03:21:22 -0400 |
---|---|---|
committer | Tim Shimmin <tes@chook.melbourne.sgi.com> | 2007-10-15 22:16:35 -0400 |
commit | 743944967021f3759d3540b0dfbc7ee7215bc4b0 (patch) | |
tree | 6a7eed892bf6f6fd70d2072cd88cb536325cc4b8 /fs/xfs/linux-2.6/xfs_super.c | |
parent | bd186aa901c183d6e25257711b6c64b42a90dde0 (diff) |
[XFS] move syncing related members from struct bhv_vfs to struct xfs_mount
SGI-PV: 969608
SGI-Modid: xfs-linux-melb:xfs-kern:29508a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_super.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_super.c | 121 |
1 files changed, 57 insertions, 64 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index e275b7a82bc1..3ce9426f57d8 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -457,9 +457,9 @@ xfs_fs_clear_inode( | |||
457 | */ | 457 | */ |
458 | STATIC void | 458 | STATIC void |
459 | xfs_syncd_queue_work( | 459 | xfs_syncd_queue_work( |
460 | struct bhv_vfs *vfs, | 460 | struct xfs_mount *mp, |
461 | void *data, | 461 | void *data, |
462 | void (*syncer)(bhv_vfs_t *, void *)) | 462 | void (*syncer)(struct xfs_mount *, void *)) |
463 | { | 463 | { |
464 | struct bhv_vfs_sync_work *work; | 464 | struct bhv_vfs_sync_work *work; |
465 | 465 | ||
@@ -467,11 +467,11 @@ xfs_syncd_queue_work( | |||
467 | INIT_LIST_HEAD(&work->w_list); | 467 | INIT_LIST_HEAD(&work->w_list); |
468 | work->w_syncer = syncer; | 468 | work->w_syncer = syncer; |
469 | work->w_data = data; | 469 | work->w_data = data; |
470 | work->w_vfs = vfs; | 470 | work->w_mount = mp; |
471 | spin_lock(&vfs->vfs_sync_lock); | 471 | spin_lock(&mp->m_sync_lock); |
472 | list_add_tail(&work->w_list, &vfs->vfs_sync_list); | 472 | list_add_tail(&work->w_list, &mp->m_sync_list); |
473 | spin_unlock(&vfs->vfs_sync_lock); | 473 | spin_unlock(&mp->m_sync_lock); |
474 | wake_up_process(vfs->vfs_sync_task); | 474 | wake_up_process(mp->m_sync_task); |
475 | } | 475 | } |
476 | 476 | ||
477 | /* | 477 | /* |
@@ -482,22 +482,22 @@ xfs_syncd_queue_work( | |||
482 | */ | 482 | */ |
483 | STATIC void | 483 | STATIC void |
484 | xfs_flush_inode_work( | 484 | xfs_flush_inode_work( |
485 | bhv_vfs_t *vfs, | 485 | struct xfs_mount *mp, |
486 | void *inode) | 486 | void *arg) |
487 | { | 487 | { |
488 | filemap_flush(((struct inode *)inode)->i_mapping); | 488 | struct inode *inode = arg; |
489 | iput((struct inode *)inode); | 489 | filemap_flush(inode->i_mapping); |
490 | iput(inode); | ||
490 | } | 491 | } |
491 | 492 | ||
492 | void | 493 | void |
493 | xfs_flush_inode( | 494 | xfs_flush_inode( |
494 | xfs_inode_t *ip) | 495 | xfs_inode_t *ip) |
495 | { | 496 | { |
496 | struct inode *inode = vn_to_inode(XFS_ITOV(ip)); | 497 | struct inode *inode = ip->i_vnode; |
497 | struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount); | ||
498 | 498 | ||
499 | igrab(inode); | 499 | igrab(inode); |
500 | xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work); | 500 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); |
501 | delay(msecs_to_jiffies(500)); | 501 | delay(msecs_to_jiffies(500)); |
502 | } | 502 | } |
503 | 503 | ||
@@ -507,11 +507,12 @@ xfs_flush_inode( | |||
507 | */ | 507 | */ |
508 | STATIC void | 508 | STATIC void |
509 | xfs_flush_device_work( | 509 | xfs_flush_device_work( |
510 | bhv_vfs_t *vfs, | 510 | struct xfs_mount *mp, |
511 | void *inode) | 511 | void *arg) |
512 | { | 512 | { |
513 | sync_blockdev(vfs->vfs_super->s_bdev); | 513 | struct inode *inode = arg; |
514 | iput((struct inode *)inode); | 514 | sync_blockdev(mp->m_vfsp->vfs_super->s_bdev); |
515 | iput(inode); | ||
515 | } | 516 | } |
516 | 517 | ||
517 | void | 518 | void |
@@ -519,34 +520,33 @@ xfs_flush_device( | |||
519 | xfs_inode_t *ip) | 520 | xfs_inode_t *ip) |
520 | { | 521 | { |
521 | struct inode *inode = vn_to_inode(XFS_ITOV(ip)); | 522 | struct inode *inode = vn_to_inode(XFS_ITOV(ip)); |
522 | struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount); | ||
523 | 523 | ||
524 | igrab(inode); | 524 | igrab(inode); |
525 | xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work); | 525 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); |
526 | delay(msecs_to_jiffies(500)); | 526 | delay(msecs_to_jiffies(500)); |
527 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); | 527 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); |
528 | } | 528 | } |
529 | 529 | ||
530 | STATIC void | 530 | STATIC void |
531 | vfs_sync_worker( | 531 | xfs_sync_worker( |
532 | bhv_vfs_t *vfsp, | 532 | struct xfs_mount *mp, |
533 | void *unused) | 533 | void *unused) |
534 | { | 534 | { |
535 | int error; | 535 | int error; |
536 | 536 | ||
537 | if (!(XFS_VFSTOM(vfsp)->m_flags & XFS_MOUNT_RDONLY)) | 537 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) |
538 | error = xfs_sync(XFS_VFSTOM(vfsp), SYNC_FSDATA | SYNC_BDFLUSH | \ | 538 | error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR | |
539 | SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER); | 539 | SYNC_REFCACHE | SYNC_SUPER); |
540 | vfsp->vfs_sync_seq++; | 540 | mp->m_sync_seq++; |
541 | wake_up(&vfsp->vfs_wait_single_sync_task); | 541 | wake_up(&mp->m_wait_single_sync_task); |
542 | } | 542 | } |
543 | 543 | ||
544 | STATIC int | 544 | STATIC int |
545 | xfssyncd( | 545 | xfssyncd( |
546 | void *arg) | 546 | void *arg) |
547 | { | 547 | { |
548 | struct xfs_mount *mp = arg; | ||
548 | long timeleft; | 549 | long timeleft; |
549 | bhv_vfs_t *vfsp = (bhv_vfs_t *) arg; | ||
550 | bhv_vfs_sync_work_t *work, *n; | 550 | bhv_vfs_sync_work_t *work, *n; |
551 | LIST_HEAD (tmp); | 551 | LIST_HEAD (tmp); |
552 | 552 | ||
@@ -556,31 +556,31 @@ xfssyncd( | |||
556 | timeleft = schedule_timeout_interruptible(timeleft); | 556 | timeleft = schedule_timeout_interruptible(timeleft); |
557 | /* swsusp */ | 557 | /* swsusp */ |
558 | try_to_freeze(); | 558 | try_to_freeze(); |
559 | if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list)) | 559 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) |
560 | break; | 560 | break; |
561 | 561 | ||
562 | spin_lock(&vfsp->vfs_sync_lock); | 562 | spin_lock(&mp->m_sync_lock); |
563 | /* | 563 | /* |
564 | * We can get woken by laptop mode, to do a sync - | 564 | * We can get woken by laptop mode, to do a sync - |
565 | * that's the (only!) case where the list would be | 565 | * that's the (only!) case where the list would be |
566 | * empty with time remaining. | 566 | * empty with time remaining. |
567 | */ | 567 | */ |
568 | if (!timeleft || list_empty(&vfsp->vfs_sync_list)) { | 568 | if (!timeleft || list_empty(&mp->m_sync_list)) { |
569 | if (!timeleft) | 569 | if (!timeleft) |
570 | timeleft = xfs_syncd_centisecs * | 570 | timeleft = xfs_syncd_centisecs * |
571 | msecs_to_jiffies(10); | 571 | msecs_to_jiffies(10); |
572 | INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list); | 572 | INIT_LIST_HEAD(&mp->m_sync_work.w_list); |
573 | list_add_tail(&vfsp->vfs_sync_work.w_list, | 573 | list_add_tail(&mp->m_sync_work.w_list, |
574 | &vfsp->vfs_sync_list); | 574 | &mp->m_sync_list); |
575 | } | 575 | } |
576 | list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list) | 576 | list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) |
577 | list_move(&work->w_list, &tmp); | 577 | list_move(&work->w_list, &tmp); |
578 | spin_unlock(&vfsp->vfs_sync_lock); | 578 | spin_unlock(&mp->m_sync_lock); |
579 | 579 | ||
580 | list_for_each_entry_safe(work, n, &tmp, w_list) { | 580 | list_for_each_entry_safe(work, n, &tmp, w_list) { |
581 | (*work->w_syncer)(vfsp, work->w_data); | 581 | (*work->w_syncer)(mp, work->w_data); |
582 | list_del(&work->w_list); | 582 | list_del(&work->w_list); |
583 | if (work == &vfsp->vfs_sync_work) | 583 | if (work == &mp->m_sync_work) |
584 | continue; | 584 | continue; |
585 | kmem_free(work, sizeof(struct bhv_vfs_sync_work)); | 585 | kmem_free(work, sizeof(struct bhv_vfs_sync_work)); |
586 | } | 586 | } |
@@ -589,25 +589,6 @@ xfssyncd( | |||
589 | return 0; | 589 | return 0; |
590 | } | 590 | } |
591 | 591 | ||
592 | STATIC int | ||
593 | xfs_fs_start_syncd( | ||
594 | bhv_vfs_t *vfsp) | ||
595 | { | ||
596 | vfsp->vfs_sync_work.w_syncer = vfs_sync_worker; | ||
597 | vfsp->vfs_sync_work.w_vfs = vfsp; | ||
598 | vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd"); | ||
599 | if (IS_ERR(vfsp->vfs_sync_task)) | ||
600 | return -PTR_ERR(vfsp->vfs_sync_task); | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | STATIC void | ||
605 | xfs_fs_stop_syncd( | ||
606 | bhv_vfs_t *vfsp) | ||
607 | { | ||
608 | kthread_stop(vfsp->vfs_sync_task); | ||
609 | } | ||
610 | |||
611 | STATIC void | 592 | STATIC void |
612 | xfs_fs_put_super( | 593 | xfs_fs_put_super( |
613 | struct super_block *sb) | 594 | struct super_block *sb) |
@@ -616,7 +597,8 @@ xfs_fs_put_super( | |||
616 | struct xfs_mount *mp = XFS_M(sb); | 597 | struct xfs_mount *mp = XFS_M(sb); |
617 | int error; | 598 | int error; |
618 | 599 | ||
619 | xfs_fs_stop_syncd(vfsp); | 600 | kthread_stop(mp->m_sync_task); |
601 | |||
620 | xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI); | 602 | xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI); |
621 | error = xfs_unmount(mp, 0, NULL); | 603 | error = xfs_unmount(mp, 0, NULL); |
622 | if (error) { | 604 | if (error) { |
@@ -641,7 +623,6 @@ xfs_fs_sync_super( | |||
641 | struct super_block *sb, | 623 | struct super_block *sb, |
642 | int wait) | 624 | int wait) |
643 | { | 625 | { |
644 | bhv_vfs_t *vfsp = vfs_from_sb(sb); | ||
645 | struct xfs_mount *mp = XFS_M(sb); | 626 | struct xfs_mount *mp = XFS_M(sb); |
646 | int error; | 627 | int error; |
647 | int flags; | 628 | int flags; |
@@ -663,22 +644,22 @@ xfs_fs_sync_super( | |||
663 | sb->s_dirt = 0; | 644 | sb->s_dirt = 0; |
664 | 645 | ||
665 | if (unlikely(laptop_mode)) { | 646 | if (unlikely(laptop_mode)) { |
666 | int prev_sync_seq = vfsp->vfs_sync_seq; | 647 | int prev_sync_seq = mp->m_sync_seq; |
667 | 648 | ||
668 | /* | 649 | /* |
669 | * The disk must be active because we're syncing. | 650 | * The disk must be active because we're syncing. |
670 | * We schedule xfssyncd now (now that the disk is | 651 | * We schedule xfssyncd now (now that the disk is |
671 | * active) instead of later (when it might not be). | 652 | * active) instead of later (when it might not be). |
672 | */ | 653 | */ |
673 | wake_up_process(vfsp->vfs_sync_task); | 654 | wake_up_process(mp->m_sync_task); |
674 | /* | 655 | /* |
675 | * We have to wait for the sync iteration to complete. | 656 | * We have to wait for the sync iteration to complete. |
676 | * If we don't, the disk activity caused by the sync | 657 | * If we don't, the disk activity caused by the sync |
677 | * will come after the sync is completed, and that | 658 | * will come after the sync is completed, and that |
678 | * triggers another sync from laptop mode. | 659 | * triggers another sync from laptop mode. |
679 | */ | 660 | */ |
680 | wait_event(vfsp->vfs_wait_single_sync_task, | 661 | wait_event(mp->m_wait_single_sync_task, |
681 | vfsp->vfs_sync_seq != prev_sync_seq); | 662 | mp->m_sync_seq != prev_sync_seq); |
682 | } | 663 | } |
683 | 664 | ||
684 | return -error; | 665 | return -error; |
@@ -790,6 +771,11 @@ xfs_fs_fill_super( | |||
790 | int error; | 771 | int error; |
791 | 772 | ||
792 | mp = xfs_mount_init(); | 773 | mp = xfs_mount_init(); |
774 | |||
775 | INIT_LIST_HEAD(&mp->m_sync_list); | ||
776 | spin_lock_init(&mp->m_sync_lock); | ||
777 | init_waitqueue_head(&mp->m_wait_single_sync_task); | ||
778 | |||
793 | mp->m_vfsp = vfsp; | 779 | mp->m_vfsp = vfsp; |
794 | vfsp->vfs_mount = mp; | 780 | vfsp->vfs_mount = mp; |
795 | 781 | ||
@@ -834,8 +820,15 @@ xfs_fs_fill_super( | |||
834 | error = EINVAL; | 820 | error = EINVAL; |
835 | goto fail_vnrele; | 821 | goto fail_vnrele; |
836 | } | 822 | } |
837 | if ((error = xfs_fs_start_syncd(vfsp))) | 823 | |
824 | mp->m_sync_work.w_syncer = xfs_sync_worker; | ||
825 | mp->m_sync_work.w_mount = mp; | ||
826 | mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); | ||
827 | if (IS_ERR(mp->m_sync_task)) { | ||
828 | error = -PTR_ERR(mp->m_sync_task); | ||
838 | goto fail_vnrele; | 829 | goto fail_vnrele; |
830 | } | ||
831 | |||
839 | vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__, | 832 | vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__, |
840 | (inst_t *)__return_address); | 833 | (inst_t *)__return_address); |
841 | 834 | ||