diff options
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r-- | fs/xfs/xfs_mount.c | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 00c7a876807d..14fc6e9e1816 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -199,6 +199,8 @@ xfs_uuid_unmount( | |||
199 | 199 | ||
200 | /* | 200 | /* |
201 | * Reference counting access wrappers to the perag structures. | 201 | * Reference counting access wrappers to the perag structures. |
202 | * Because we never free per-ag structures, the only thing we | ||
203 | * have to protect against changes is the tree structure itself. | ||
202 | */ | 204 | */ |
203 | struct xfs_perag * | 205 | struct xfs_perag * |
204 | xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) | 206 | xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) |
@@ -206,13 +208,13 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) | |||
206 | struct xfs_perag *pag; | 208 | struct xfs_perag *pag; |
207 | int ref = 0; | 209 | int ref = 0; |
208 | 210 | ||
209 | spin_lock(&mp->m_perag_lock); | 211 | rcu_read_lock(); |
210 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); | 212 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); |
211 | if (pag) { | 213 | if (pag) { |
212 | ASSERT(atomic_read(&pag->pag_ref) >= 0); | 214 | ASSERT(atomic_read(&pag->pag_ref) >= 0); |
213 | ref = atomic_inc_return(&pag->pag_ref); | 215 | ref = atomic_inc_return(&pag->pag_ref); |
214 | } | 216 | } |
215 | spin_unlock(&mp->m_perag_lock); | 217 | rcu_read_unlock(); |
216 | trace_xfs_perag_get(mp, agno, ref, _RET_IP_); | 218 | trace_xfs_perag_get(mp, agno, ref, _RET_IP_); |
217 | return pag; | 219 | return pag; |
218 | } | 220 | } |
@@ -227,10 +229,18 @@ xfs_perag_put(struct xfs_perag *pag) | |||
227 | trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); | 229 | trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); |
228 | } | 230 | } |
229 | 231 | ||
232 | STATIC void | ||
233 | __xfs_free_perag( | ||
234 | struct rcu_head *head) | ||
235 | { | ||
236 | struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); | ||
237 | |||
238 | ASSERT(atomic_read(&pag->pag_ref) == 0); | ||
239 | kmem_free(pag); | ||
240 | } | ||
241 | |||
230 | /* | 242 | /* |
231 | * Free up the resources associated with a mount structure. Assume that | 243 | * Free up the per-ag resources associated with the mount structure. |
232 | * the structure was initially zeroed, so we can tell which fields got | ||
233 | * initialized. | ||
234 | */ | 244 | */ |
235 | STATIC void | 245 | STATIC void |
236 | xfs_free_perag( | 246 | xfs_free_perag( |
@@ -242,10 +252,9 @@ xfs_free_perag( | |||
242 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { | 252 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
243 | spin_lock(&mp->m_perag_lock); | 253 | spin_lock(&mp->m_perag_lock); |
244 | pag = radix_tree_delete(&mp->m_perag_tree, agno); | 254 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
245 | ASSERT(pag); | ||
246 | ASSERT(atomic_read(&pag->pag_ref) == 0); | ||
247 | spin_unlock(&mp->m_perag_lock); | 255 | spin_unlock(&mp->m_perag_lock); |
248 | kmem_free(pag); | 256 | ASSERT(pag); |
257 | call_rcu(&pag->rcu_head, __xfs_free_perag); | ||
249 | } | 258 | } |
250 | } | 259 | } |
251 | 260 | ||