aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 00c7a876807d..14fc6e9e1816 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -199,6 +199,8 @@ xfs_uuid_unmount(
199 199
200/* 200/*
201 * Reference counting access wrappers to the perag structures. 201 * Reference counting access wrappers to the perag structures.
202 * Because we never free per-ag structures, the only thing we
203 * have to protect against changes is the tree structure itself.
202 */ 204 */
203struct xfs_perag * 205struct xfs_perag *
204xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) 206xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
@@ -206,13 +208,13 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
206 struct xfs_perag *pag; 208 struct xfs_perag *pag;
207 int ref = 0; 209 int ref = 0;
208 210
209 spin_lock(&mp->m_perag_lock); 211 rcu_read_lock();
210 pag = radix_tree_lookup(&mp->m_perag_tree, agno); 212 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
211 if (pag) { 213 if (pag) {
212 ASSERT(atomic_read(&pag->pag_ref) >= 0); 214 ASSERT(atomic_read(&pag->pag_ref) >= 0);
213 ref = atomic_inc_return(&pag->pag_ref); 215 ref = atomic_inc_return(&pag->pag_ref);
214 } 216 }
215 spin_unlock(&mp->m_perag_lock); 217 rcu_read_unlock();
216 trace_xfs_perag_get(mp, agno, ref, _RET_IP_); 218 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
217 return pag; 219 return pag;
218} 220}
@@ -227,10 +229,18 @@ xfs_perag_put(struct xfs_perag *pag)
227 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); 229 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
228} 230}
229 231
232STATIC void
233__xfs_free_perag(
234 struct rcu_head *head)
235{
236 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
237
238 ASSERT(atomic_read(&pag->pag_ref) == 0);
239 kmem_free(pag);
240}
241
230/* 242/*
231 * Free up the resources associated with a mount structure. Assume that 243 * Free up the per-ag resources associated with the mount structure.
232 * the structure was initially zeroed, so we can tell which fields got
233 * initialized.
234 */ 244 */
235STATIC void 245STATIC void
236xfs_free_perag( 246xfs_free_perag(
@@ -242,10 +252,9 @@ xfs_free_perag(
242 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 252 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
243 spin_lock(&mp->m_perag_lock); 253 spin_lock(&mp->m_perag_lock);
244 pag = radix_tree_delete(&mp->m_perag_tree, agno); 254 pag = radix_tree_delete(&mp->m_perag_tree, agno);
245 ASSERT(pag);
246 ASSERT(atomic_read(&pag->pag_ref) == 0);
247 spin_unlock(&mp->m_perag_lock); 255 spin_unlock(&mp->m_perag_lock);
248 kmem_free(pag); 256 ASSERT(pag);
257 call_rcu(&pag->rcu_head, __xfs_free_perag);
249 } 258 }
250} 259}
251 260