aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-09-21 20:47:20 -0400
committerAlex Elder <aelder@sgi.com>2010-10-18 16:07:44 -0400
commite176579e70118ed7cfdb60f963628fe0ca771f3d (patch)
treee2d252fcd692aa4e3907300521e2af81c974f875 /fs/xfs/xfs_mount.c
parentbd32d25a7cf7242512e77e70bab63df4402ab91c (diff)
xfs: lockless per-ag lookups
When we start taking a reference to the per-ag for every cached buffer in the system, kernel lockstat profiling on an 8-way create workload shows the mp->m_perag_lock has higher acquisition rates than the inode lock and has significantly more contention. That is, it becomes the highest contended lock in the system. The perag lookup is trivial to convert to lock-less RCU lookups because perag structures never go away. Hence the only thing we need to protect against is tree structure changes during a grow. This can be done simply by replacing the locking in xfs_perag_get() with RCU read locking. This removes the mp->m_perag_lock completely from this path. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 00c7a876807d..14fc6e9e1816 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -199,6 +199,8 @@ xfs_uuid_unmount(
199 199
200/* 200/*
201 * Reference counting access wrappers to the perag structures. 201 * Reference counting access wrappers to the perag structures.
202 * Because we never free per-ag structures, the only thing we
203 * have to protect against changes is the tree structure itself.
202 */ 204 */
203struct xfs_perag * 205struct xfs_perag *
204xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) 206xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
@@ -206,13 +208,13 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
206 struct xfs_perag *pag; 208 struct xfs_perag *pag;
207 int ref = 0; 209 int ref = 0;
208 210
209 spin_lock(&mp->m_perag_lock); 211 rcu_read_lock();
210 pag = radix_tree_lookup(&mp->m_perag_tree, agno); 212 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
211 if (pag) { 213 if (pag) {
212 ASSERT(atomic_read(&pag->pag_ref) >= 0); 214 ASSERT(atomic_read(&pag->pag_ref) >= 0);
213 ref = atomic_inc_return(&pag->pag_ref); 215 ref = atomic_inc_return(&pag->pag_ref);
214 } 216 }
215 spin_unlock(&mp->m_perag_lock); 217 rcu_read_unlock();
216 trace_xfs_perag_get(mp, agno, ref, _RET_IP_); 218 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
217 return pag; 219 return pag;
218} 220}
@@ -227,10 +229,18 @@ xfs_perag_put(struct xfs_perag *pag)
227 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); 229 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
228} 230}
229 231
232STATIC void
233__xfs_free_perag(
234 struct rcu_head *head)
235{
236 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
237
238 ASSERT(atomic_read(&pag->pag_ref) == 0);
239 kmem_free(pag);
240}
241
230/* 242/*
231 * Free up the resources associated with a mount structure. Assume that 243 * Free up the per-ag resources associated with the mount structure.
232 * the structure was initially zeroed, so we can tell which fields got
233 * initialized.
234 */ 244 */
235STATIC void 245STATIC void
236xfs_free_perag( 246xfs_free_perag(
@@ -242,10 +252,9 @@ xfs_free_perag(
242 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 252 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
243 spin_lock(&mp->m_perag_lock); 253 spin_lock(&mp->m_perag_lock);
244 pag = radix_tree_delete(&mp->m_perag_tree, agno); 254 pag = radix_tree_delete(&mp->m_perag_tree, agno);
245 ASSERT(pag);
246 ASSERT(atomic_read(&pag->pag_ref) == 0);
247 spin_unlock(&mp->m_perag_lock); 255 spin_unlock(&mp->m_perag_lock);
248 kmem_free(pag); 256 ASSERT(pag);
257 call_rcu(&pag->rcu_head, __xfs_free_perag);
249 } 258 }
250} 259}
251 260