diff options
| author | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:43:03 -0400 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:43:06 -0400 | 
| commit | 9dcdbf7a33d9018ac5d45debcf261be648bdd56a (patch) | |
| tree | bbcc1a018f11ff76cd7ce174ef3ffe2c02da07ee /fs/xfs/linux-2.6/xfs_sync.c | |
| parent | cc5edb0eb9ce892b530e34a5d110382483587942 (diff) | |
| parent | cd5b8f8755a89a57fc8c408d284b8b613f090345 (diff) | |
Merge branch 'linus' into perf/core
Merge reason: Pick up the latest perf fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_sync.c | 130 | 
1 files changed, 77 insertions, 53 deletions
| diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ef7f0218bccb..a51a07c3a70c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -144,6 +144,41 @@ restart: | |||
| 144 | return last_error; | 144 | return last_error; | 
| 145 | } | 145 | } | 
| 146 | 146 | ||
| 147 | /* | ||
| 148 | * Select the next per-ag structure to iterate during the walk. The reclaim | ||
| 149 | * walk is optimised only to walk AGs with reclaimable inodes in them. | ||
| 150 | */ | ||
| 151 | static struct xfs_perag * | ||
| 152 | xfs_inode_ag_iter_next_pag( | ||
| 153 | struct xfs_mount *mp, | ||
| 154 | xfs_agnumber_t *first, | ||
| 155 | int tag) | ||
| 156 | { | ||
| 157 | struct xfs_perag *pag = NULL; | ||
| 158 | |||
| 159 | if (tag == XFS_ICI_RECLAIM_TAG) { | ||
| 160 | int found; | ||
| 161 | int ref; | ||
| 162 | |||
| 163 | spin_lock(&mp->m_perag_lock); | ||
| 164 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, | ||
| 165 | (void **)&pag, *first, 1, tag); | ||
| 166 | if (found <= 0) { | ||
| 167 | spin_unlock(&mp->m_perag_lock); | ||
| 168 | return NULL; | ||
| 169 | } | ||
| 170 | *first = pag->pag_agno + 1; | ||
| 171 | /* open coded pag reference increment */ | ||
| 172 | ref = atomic_inc_return(&pag->pag_ref); | ||
| 173 | spin_unlock(&mp->m_perag_lock); | ||
| 174 | trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_); | ||
| 175 | } else { | ||
| 176 | pag = xfs_perag_get(mp, *first); | ||
| 177 | (*first)++; | ||
| 178 | } | ||
| 179 | return pag; | ||
| 180 | } | ||
| 181 | |||
| 147 | int | 182 | int | 
| 148 | xfs_inode_ag_iterator( | 183 | xfs_inode_ag_iterator( | 
| 149 | struct xfs_mount *mp, | 184 | struct xfs_mount *mp, | 
| @@ -154,16 +189,15 @@ xfs_inode_ag_iterator( | |||
| 154 | int exclusive, | 189 | int exclusive, | 
| 155 | int *nr_to_scan) | 190 | int *nr_to_scan) | 
| 156 | { | 191 | { | 
| 192 | struct xfs_perag *pag; | ||
| 157 | int error = 0; | 193 | int error = 0; | 
| 158 | int last_error = 0; | 194 | int last_error = 0; | 
| 159 | xfs_agnumber_t ag; | 195 | xfs_agnumber_t ag; | 
| 160 | int nr; | 196 | int nr; | 
| 161 | 197 | ||
| 162 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; | 198 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; | 
| 163 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 199 | ag = 0; | 
| 164 | struct xfs_perag *pag; | 200 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) { | 
| 165 | |||
| 166 | pag = xfs_perag_get(mp, ag); | ||
| 167 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, | 201 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, | 
| 168 | exclusive, &nr); | 202 | exclusive, &nr); | 
| 169 | xfs_perag_put(pag); | 203 | xfs_perag_put(pag); | 
| @@ -640,6 +674,17 @@ __xfs_inode_set_reclaim_tag( | |||
| 640 | radix_tree_tag_set(&pag->pag_ici_root, | 674 | radix_tree_tag_set(&pag->pag_ici_root, | 
| 641 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 675 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 
| 642 | XFS_ICI_RECLAIM_TAG); | 676 | XFS_ICI_RECLAIM_TAG); | 
| 677 | |||
| 678 | if (!pag->pag_ici_reclaimable) { | ||
| 679 | /* propagate the reclaim tag up into the perag radix tree */ | ||
| 680 | spin_lock(&ip->i_mount->m_perag_lock); | ||
| 681 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | ||
| 682 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | ||
| 683 | XFS_ICI_RECLAIM_TAG); | ||
| 684 | spin_unlock(&ip->i_mount->m_perag_lock); | ||
| 685 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | ||
| 686 | -1, _RET_IP_); | ||
| 687 | } | ||
| 643 | pag->pag_ici_reclaimable++; | 688 | pag->pag_ici_reclaimable++; | 
| 644 | } | 689 | } | 
| 645 | 690 | ||
| @@ -674,6 +719,16 @@ __xfs_inode_clear_reclaim_tag( | |||
| 674 | radix_tree_tag_clear(&pag->pag_ici_root, | 719 | radix_tree_tag_clear(&pag->pag_ici_root, | 
| 675 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 720 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 
| 676 | pag->pag_ici_reclaimable--; | 721 | pag->pag_ici_reclaimable--; | 
| 722 | if (!pag->pag_ici_reclaimable) { | ||
| 723 | /* clear the reclaim tag from the perag radix tree */ | ||
| 724 | spin_lock(&ip->i_mount->m_perag_lock); | ||
| 725 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | ||
| 726 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | ||
| 727 | XFS_ICI_RECLAIM_TAG); | ||
| 728 | spin_unlock(&ip->i_mount->m_perag_lock); | ||
| 729 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | ||
| 730 | -1, _RET_IP_); | ||
| 731 | } | ||
| 677 | } | 732 | } | 
| 678 | 733 | ||
| 679 | /* | 734 | /* | 
| @@ -828,83 +883,52 @@ xfs_reclaim_inodes( | |||
| 828 | 883 | ||
| 829 | /* | 884 | /* | 
| 830 | * Shrinker infrastructure. | 885 | * Shrinker infrastructure. | 
| 831 | * | ||
| 832 | * This is all far more complex than it needs to be. It adds a global list of | ||
| 833 | * mounts because the shrinkers can only call a global context. We need to make | ||
| 834 | * the shrinkers pass a context to avoid the need for global state. | ||
| 835 | */ | 886 | */ | 
| 836 | static LIST_HEAD(xfs_mount_list); | ||
| 837 | static struct rw_semaphore xfs_mount_list_lock; | ||
| 838 | |||
| 839 | static int | 887 | static int | 
| 840 | xfs_reclaim_inode_shrink( | 888 | xfs_reclaim_inode_shrink( | 
| 889 | struct shrinker *shrink, | ||
| 841 | int nr_to_scan, | 890 | int nr_to_scan, | 
| 842 | gfp_t gfp_mask) | 891 | gfp_t gfp_mask) | 
| 843 | { | 892 | { | 
| 844 | struct xfs_mount *mp; | 893 | struct xfs_mount *mp; | 
| 845 | struct xfs_perag *pag; | 894 | struct xfs_perag *pag; | 
| 846 | xfs_agnumber_t ag; | 895 | xfs_agnumber_t ag; | 
| 847 | int reclaimable = 0; | 896 | int reclaimable; | 
| 848 | 897 | ||
| 898 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | ||
| 849 | if (nr_to_scan) { | 899 | if (nr_to_scan) { | 
| 850 | if (!(gfp_mask & __GFP_FS)) | 900 | if (!(gfp_mask & __GFP_FS)) | 
| 851 | return -1; | 901 | return -1; | 
| 852 | 902 | ||
| 853 | down_read(&xfs_mount_list_lock); | 903 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | 
| 854 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | ||
| 855 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | ||
| 856 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 904 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 
| 857 | if (nr_to_scan <= 0) | 905 | /* if we don't exhaust the scan, don't bother coming back */ | 
| 858 | break; | 906 | if (nr_to_scan > 0) | 
| 859 | } | 907 | return -1; | 
| 860 | up_read(&xfs_mount_list_lock); | 908 | } | 
| 861 | } | ||
| 862 | 909 | ||
| 863 | down_read(&xfs_mount_list_lock); | 910 | reclaimable = 0; | 
| 864 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | 911 | ag = 0; | 
| 865 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 912 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, | 
| 866 | pag = xfs_perag_get(mp, ag); | 913 | XFS_ICI_RECLAIM_TAG))) { | 
| 867 | reclaimable += pag->pag_ici_reclaimable; | 914 | reclaimable += pag->pag_ici_reclaimable; | 
| 868 | xfs_perag_put(pag); | 915 | xfs_perag_put(pag); | 
| 869 | } | ||
| 870 | } | 916 | } | 
| 871 | up_read(&xfs_mount_list_lock); | ||
| 872 | return reclaimable; | 917 | return reclaimable; | 
| 873 | } | 918 | } | 
| 874 | 919 | ||
| 875 | static struct shrinker xfs_inode_shrinker = { | ||
| 876 | .shrink = xfs_reclaim_inode_shrink, | ||
| 877 | .seeks = DEFAULT_SEEKS, | ||
| 878 | }; | ||
| 879 | |||
| 880 | void __init | ||
| 881 | xfs_inode_shrinker_init(void) | ||
| 882 | { | ||
| 883 | init_rwsem(&xfs_mount_list_lock); | ||
| 884 | register_shrinker(&xfs_inode_shrinker); | ||
| 885 | } | ||
| 886 | |||
| 887 | void | ||
| 888 | xfs_inode_shrinker_destroy(void) | ||
| 889 | { | ||
| 890 | ASSERT(list_empty(&xfs_mount_list)); | ||
| 891 | unregister_shrinker(&xfs_inode_shrinker); | ||
| 892 | } | ||
| 893 | |||
| 894 | void | 920 | void | 
| 895 | xfs_inode_shrinker_register( | 921 | xfs_inode_shrinker_register( | 
| 896 | struct xfs_mount *mp) | 922 | struct xfs_mount *mp) | 
| 897 | { | 923 | { | 
| 898 | down_write(&xfs_mount_list_lock); | 924 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; | 
| 899 | list_add_tail(&mp->m_mplist, &xfs_mount_list); | 925 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; | 
| 900 | up_write(&xfs_mount_list_lock); | 926 | register_shrinker(&mp->m_inode_shrink); | 
| 901 | } | 927 | } | 
| 902 | 928 | ||
| 903 | void | 929 | void | 
| 904 | xfs_inode_shrinker_unregister( | 930 | xfs_inode_shrinker_unregister( | 
| 905 | struct xfs_mount *mp) | 931 | struct xfs_mount *mp) | 
| 906 | { | 932 | { | 
| 907 | down_write(&xfs_mount_list_lock); | 933 | unregister_shrinker(&mp->m_inode_shrink); | 
| 908 | list_del(&mp->m_mplist); | ||
| 909 | up_write(&xfs_mount_list_lock); | ||
| 910 | } | 934 | } | 
