diff options
| author | Dave Chinner <dchinner@redhat.com> | 2016-05-18 00:20:08 -0400 |
|---|---|---|
| committer | Dave Chinner <david@fromorbit.com> | 2016-05-18 00:20:08 -0400 |
| commit | ad438c4038968e5ca5248f851212634e474983e8 (patch) | |
| tree | 28d6b8ba7400608dc1a403187b0f8482f6aa706c | |
| parent | 545c0889d26d47e1139c527002eb131343d13b63 (diff) | |
xfs: move reclaim tagging functions
Rearrange the inode tagging functions so that they are higher up in
xfs_cache.c and so there is no need for forward prototypes to be
defined. This is purely code movement, no other change.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
| -rw-r--r-- | fs/xfs/xfs_icache.c | 234 |
1 files changed, 116 insertions, 118 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 789f8c32e65f..99ee6eee5e0b 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
| @@ -37,8 +37,6 @@ | |||
| 37 | #include <linux/kthread.h> | 37 | #include <linux/kthread.h> |
| 38 | #include <linux/freezer.h> | 38 | #include <linux/freezer.h> |
| 39 | 39 | ||
| 40 | STATIC void xfs_inode_clear_reclaim_tag(struct xfs_perag *pag, xfs_ino_t ino); | ||
| 41 | |||
| 42 | /* | 40 | /* |
| 43 | * Allocate and initialise an xfs_inode. | 41 | * Allocate and initialise an xfs_inode. |
| 44 | */ | 42 | */ |
| @@ -144,6 +142,122 @@ xfs_inode_free( | |||
| 144 | } | 142 | } |
| 145 | 143 | ||
| 146 | /* | 144 | /* |
| 145 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | ||
| 146 | * isn't a reclaim pass already in progress. By default it runs every 5s based | ||
| 147 | * on the xfs periodic sync default of 30s. Perhaps this should have it's own | ||
| 148 | * tunable, but that can be done if this method proves to be ineffective or too | ||
| 149 | * aggressive. | ||
| 150 | */ | ||
| 151 | static void | ||
| 152 | xfs_reclaim_work_queue( | ||
| 153 | struct xfs_mount *mp) | ||
| 154 | { | ||
| 155 | |||
| 156 | rcu_read_lock(); | ||
| 157 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | ||
| 158 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | ||
| 159 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | ||
| 160 | } | ||
| 161 | rcu_read_unlock(); | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * This is a fast pass over the inode cache to try to get reclaim moving on as | ||
| 166 | * many inodes as possible in a short period of time. It kicks itself every few | ||
| 167 | * seconds, as well as being kicked by the inode cache shrinker when memory | ||
| 168 | * goes low. It scans as quickly as possible avoiding locked inodes or those | ||
| 169 | * already being flushed, and once done schedules a future pass. | ||
| 170 | */ | ||
| 171 | void | ||
| 172 | xfs_reclaim_worker( | ||
| 173 | struct work_struct *work) | ||
| 174 | { | ||
| 175 | struct xfs_mount *mp = container_of(to_delayed_work(work), | ||
| 176 | struct xfs_mount, m_reclaim_work); | ||
| 177 | |||
| 178 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | ||
| 179 | xfs_reclaim_work_queue(mp); | ||
| 180 | } | ||
| 181 | |||
| 182 | static void | ||
| 183 | xfs_perag_set_reclaim_tag( | ||
| 184 | struct xfs_perag *pag) | ||
| 185 | { | ||
| 186 | struct xfs_mount *mp = pag->pag_mount; | ||
| 187 | |||
| 188 | ASSERT(spin_is_locked(&pag->pag_ici_lock)); | ||
| 189 | if (pag->pag_ici_reclaimable++) | ||
| 190 | return; | ||
| 191 | |||
| 192 | /* propagate the reclaim tag up into the perag radix tree */ | ||
| 193 | spin_lock(&mp->m_perag_lock); | ||
| 194 | radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, | ||
| 195 | XFS_ICI_RECLAIM_TAG); | ||
| 196 | spin_unlock(&mp->m_perag_lock); | ||
| 197 | |||
| 198 | /* schedule periodic background inode reclaim */ | ||
| 199 | xfs_reclaim_work_queue(mp); | ||
| 200 | |||
| 201 | trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | ||
| 202 | } | ||
| 203 | |||
| 204 | static void | ||
| 205 | xfs_perag_clear_reclaim_tag( | ||
| 206 | struct xfs_perag *pag) | ||
| 207 | { | ||
| 208 | struct xfs_mount *mp = pag->pag_mount; | ||
| 209 | |||
| 210 | ASSERT(spin_is_locked(&pag->pag_ici_lock)); | ||
| 211 | if (--pag->pag_ici_reclaimable) | ||
| 212 | return; | ||
| 213 | |||
| 214 | /* clear the reclaim tag from the perag radix tree */ | ||
| 215 | spin_lock(&mp->m_perag_lock); | ||
| 216 | radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, | ||
| 217 | XFS_ICI_RECLAIM_TAG); | ||
| 218 | spin_unlock(&mp->m_perag_lock); | ||
| 219 | trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | ||
| 220 | } | ||
| 221 | |||
| 222 | |||
| 223 | /* | ||
| 224 | * We set the inode flag atomically with the radix tree tag. | ||
| 225 | * Once we get tag lookups on the radix tree, this inode flag | ||
| 226 | * can go away. | ||
| 227 | */ | ||
| 228 | void | ||
| 229 | xfs_inode_set_reclaim_tag( | ||
| 230 | struct xfs_inode *ip) | ||
| 231 | { | ||
| 232 | struct xfs_mount *mp = ip->i_mount; | ||
| 233 | struct xfs_perag *pag; | ||
| 234 | |||
| 235 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | ||
| 236 | spin_lock(&pag->pag_ici_lock); | ||
| 237 | spin_lock(&ip->i_flags_lock); | ||
| 238 | |||
| 239 | radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), | ||
| 240 | XFS_ICI_RECLAIM_TAG); | ||
| 241 | xfs_perag_set_reclaim_tag(pag); | ||
| 242 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | ||
| 243 | |||
| 244 | spin_unlock(&ip->i_flags_lock); | ||
| 245 | spin_unlock(&pag->pag_ici_lock); | ||
| 246 | xfs_perag_put(pag); | ||
| 247 | } | ||
| 248 | |||
| 249 | STATIC void | ||
| 250 | xfs_inode_clear_reclaim_tag( | ||
| 251 | struct xfs_perag *pag, | ||
| 252 | xfs_ino_t ino) | ||
| 253 | { | ||
| 254 | radix_tree_tag_clear(&pag->pag_ici_root, | ||
| 255 | XFS_INO_TO_AGINO(pag->pag_mount, ino), | ||
| 256 | XFS_ICI_RECLAIM_TAG); | ||
| 257 | xfs_perag_clear_reclaim_tag(pag); | ||
| 258 | } | ||
| 259 | |||
| 260 | /* | ||
| 147 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode | 261 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode |
| 148 | * part of the structure. This is made more complex by the fact we store | 262 | * part of the structure. This is made more complex by the fact we store |
| 149 | * information about the on-disk values in the VFS inode and so we can't just | 263 | * information about the on-disk values in the VFS inode and so we can't just |
| @@ -729,122 +843,6 @@ xfs_inode_ag_iterator_tag( | |||
| 729 | } | 843 | } |
| 730 | 844 | ||
| 731 | /* | 845 | /* |
| 732 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | ||
| 733 | * isn't a reclaim pass already in progress. By default it runs every 5s based | ||
| 734 | * on the xfs periodic sync default of 30s. Perhaps this should have it's own | ||
| 735 | * tunable, but that can be done if this method proves to be ineffective or too | ||
| 736 | * aggressive. | ||
| 737 | */ | ||
| 738 | static void | ||
| 739 | xfs_reclaim_work_queue( | ||
| 740 | struct xfs_mount *mp) | ||
| 741 | { | ||
| 742 | |||
| 743 | rcu_read_lock(); | ||
| 744 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | ||
| 745 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | ||
| 746 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | ||
| 747 | } | ||
| 748 | rcu_read_unlock(); | ||
| 749 | } | ||
| 750 | |||
| 751 | /* | ||
| 752 | * This is a fast pass over the inode cache to try to get reclaim moving on as | ||
| 753 | * many inodes as possible in a short period of time. It kicks itself every few | ||
| 754 | * seconds, as well as being kicked by the inode cache shrinker when memory | ||
| 755 | * goes low. It scans as quickly as possible avoiding locked inodes or those | ||
| 756 | * already being flushed, and once done schedules a future pass. | ||
| 757 | */ | ||
| 758 | void | ||
| 759 | xfs_reclaim_worker( | ||
| 760 | struct work_struct *work) | ||
| 761 | { | ||
| 762 | struct xfs_mount *mp = container_of(to_delayed_work(work), | ||
| 763 | struct xfs_mount, m_reclaim_work); | ||
| 764 | |||
| 765 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | ||
| 766 | xfs_reclaim_work_queue(mp); | ||
| 767 | } | ||
| 768 | |||
| 769 | static void | ||
| 770 | xfs_perag_set_reclaim_tag( | ||
| 771 | struct xfs_perag *pag) | ||
| 772 | { | ||
| 773 | struct xfs_mount *mp = pag->pag_mount; | ||
| 774 | |||
| 775 | ASSERT(spin_is_locked(&pag->pag_ici_lock)); | ||
| 776 | if (pag->pag_ici_reclaimable++) | ||
| 777 | return; | ||
| 778 | |||
| 779 | /* propagate the reclaim tag up into the perag radix tree */ | ||
| 780 | spin_lock(&mp->m_perag_lock); | ||
| 781 | radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, | ||
| 782 | XFS_ICI_RECLAIM_TAG); | ||
| 783 | spin_unlock(&mp->m_perag_lock); | ||
| 784 | |||
| 785 | /* schedule periodic background inode reclaim */ | ||
| 786 | xfs_reclaim_work_queue(mp); | ||
| 787 | |||
| 788 | trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | ||
| 789 | } | ||
| 790 | |||
| 791 | static void | ||
| 792 | xfs_perag_clear_reclaim_tag( | ||
| 793 | struct xfs_perag *pag) | ||
| 794 | { | ||
| 795 | struct xfs_mount *mp = pag->pag_mount; | ||
| 796 | |||
| 797 | ASSERT(spin_is_locked(&pag->pag_ici_lock)); | ||
| 798 | if (--pag->pag_ici_reclaimable) | ||
| 799 | return; | ||
| 800 | |||
| 801 | /* clear the reclaim tag from the perag radix tree */ | ||
| 802 | spin_lock(&mp->m_perag_lock); | ||
| 803 | radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, | ||
| 804 | XFS_ICI_RECLAIM_TAG); | ||
| 805 | spin_unlock(&mp->m_perag_lock); | ||
| 806 | trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | ||
| 807 | } | ||
| 808 | |||
| 809 | |||
| 810 | /* | ||
| 811 | * We set the inode flag atomically with the radix tree tag. | ||
| 812 | * Once we get tag lookups on the radix tree, this inode flag | ||
| 813 | * can go away. | ||
| 814 | */ | ||
| 815 | void | ||
| 816 | xfs_inode_set_reclaim_tag( | ||
| 817 | struct xfs_inode *ip) | ||
| 818 | { | ||
| 819 | struct xfs_mount *mp = ip->i_mount; | ||
| 820 | struct xfs_perag *pag; | ||
| 821 | |||
| 822 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | ||
| 823 | spin_lock(&pag->pag_ici_lock); | ||
| 824 | spin_lock(&ip->i_flags_lock); | ||
| 825 | |||
| 826 | radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), | ||
| 827 | XFS_ICI_RECLAIM_TAG); | ||
| 828 | xfs_perag_set_reclaim_tag(pag); | ||
| 829 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | ||
| 830 | |||
| 831 | spin_unlock(&ip->i_flags_lock); | ||
| 832 | spin_unlock(&pag->pag_ici_lock); | ||
| 833 | xfs_perag_put(pag); | ||
| 834 | } | ||
| 835 | |||
| 836 | STATIC void | ||
| 837 | xfs_inode_clear_reclaim_tag( | ||
| 838 | struct xfs_perag *pag, | ||
| 839 | xfs_ino_t ino) | ||
| 840 | { | ||
| 841 | radix_tree_tag_clear(&pag->pag_ici_root, | ||
| 842 | XFS_INO_TO_AGINO(pag->pag_mount, ino), | ||
| 843 | XFS_ICI_RECLAIM_TAG); | ||
| 844 | xfs_perag_clear_reclaim_tag(pag); | ||
| 845 | } | ||
| 846 | |||
| 847 | /* | ||
| 848 | * Grab the inode for reclaim exclusively. | 846 | * Grab the inode for reclaim exclusively. |
| 849 | * Return 0 if we grabbed it, non-zero otherwise. | 847 | * Return 0 if we grabbed it, non-zero otherwise. |
| 850 | */ | 848 | */ |
