aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_dquot_item.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-04-23 01:58:39 -0400
committerBen Myers <bpm@sgi.com>2012-05-14 17:20:31 -0400
commit43ff2122e6492bcc88b065c433453dce88223b30 (patch)
tree0f762cfb753edd73402b8830e0927d9efba30c61 /fs/xfs/xfs_dquot_item.c
parent960c60af8b9481595e68875e79b2602e73169c29 (diff)
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one, and write back the buffers per-process instead of by waking up xfsbufd. This is now easily doable given that we have very few places left that write delwri buffers: - log recovery: Only done at mount time, and already forcing out the buffers synchronously using xfs_flush_buftarg - quotacheck: Same story. - dquot reclaim: Writes out dirty dquots on the LRU under memory pressure. We might want to look into doing more of this via xfsaild, but it's already more optimal than the synchronous inode reclaim that writes each buffer synchronously. - xfsaild: This is the main beneficiary of the change. By keeping a local list of buffers to write we reduce latency of writing out buffers, and more importably we can remove all the delwri list promotions which were hitting the buffer cache hard under sustained metadata loads. The implementation is very straight forward - xfs_buf_delwri_queue now gets a new list_head pointer that it adds the delwri buffers to, and all callers need to eventually submit the list using xfs_buf_delwi_submit or xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are skipped in xfs_buf_delwri_queue, assuming they already are on another delwri list. The biggest change to pass down the buffer list was done to the AIL pushing. Now that we operate on buffers the trylock, push and pushbuf log item methods are merged into a single push routine, which tries to lock the item, and if possible add the buffer that needs writeback to the buffer list. This leads to much simpler code than the previous split but requires the individual IOP_PUSH instances to unlock and reacquire the AIL around calls to blocking routines. Given that xfsailds now also handle writing out buffers, the conditions for log forcing and the sleep times needed some small changes. The most important one is that we consider an AIL busy as long we still have buffers to push, and the other one is that we do increment the pushed LSN for buffers that are under flushing at this moment, but still count them towards the stuck items for restart purposes. Without this we could hammer on stuck items without ever forcing the log and not make progress under heavy random delete workloads on fast flash storage devices. [ Dave Chinner: - rebase on previous patches. - improved comments for XBF_DELWRI_Q handling - fix XBF_ASYNC handling in queue submission (test 106 failure) - rename delwri submit function buffer list parameters for clarity - xfs_efd_item_push() should return XFS_ITEM_PINNED ] Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_dquot_item.c')
-rw-r--r--fs/xfs/xfs_dquot_item.c161
1 files changed, 36 insertions, 125 deletions
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 8d8295814272..9c5d58d24e54 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -108,46 +108,6 @@ xfs_qm_dquot_logitem_unpin(
108 wake_up(&dqp->q_pinwait); 108 wake_up(&dqp->q_pinwait);
109} 109}
110 110
111/*
112 * Given the logitem, this writes the corresponding dquot entry to disk
113 * asynchronously. This is called with the dquot entry securely locked;
114 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
115 * at the end.
116 */
117STATIC void
118xfs_qm_dquot_logitem_push(
119 struct xfs_log_item *lip)
120{
121 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
122 struct xfs_buf *bp = NULL;
123 int error;
124
125 ASSERT(XFS_DQ_IS_LOCKED(dqp));
126 ASSERT(!completion_done(&dqp->q_flush));
127 ASSERT(atomic_read(&dqp->q_pincount) == 0);
128
129 /*
130 * Since we were able to lock the dquot's flush lock and
131 * we found it on the AIL, the dquot must be dirty. This
132 * is because the dquot is removed from the AIL while still
133 * holding the flush lock in xfs_dqflush_done(). Thus, if
134 * we found it in the AIL and were able to obtain the flush
135 * lock without sleeping, then there must not have been
136 * anyone in the process of flushing the dquot.
137 */
138 error = xfs_qm_dqflush(dqp, &bp);
139 if (error) {
140 xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
141 __func__, error, dqp);
142 goto out_unlock;
143 }
144
145 xfs_buf_delwri_queue(bp);
146 xfs_buf_relse(bp);
147out_unlock:
148 xfs_dqunlock(dqp);
149}
150
151STATIC xfs_lsn_t 111STATIC xfs_lsn_t
152xfs_qm_dquot_logitem_committed( 112xfs_qm_dquot_logitem_committed(
153 struct xfs_log_item *lip, 113 struct xfs_log_item *lip,
@@ -179,67 +139,15 @@ xfs_qm_dqunpin_wait(
179 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); 139 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
180} 140}
181 141
182/*
183 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
184 * the dquot is locked by us, but the flush lock isn't. So, here we are
185 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
186 * If so, we want to push it out to help us take this item off the AIL as soon
187 * as possible.
188 *
189 * We must not be holding the AIL lock at this point. Calling incore() to
190 * search the buffer cache can be a time consuming thing, and AIL lock is a
191 * spinlock.
192 */
193STATIC bool
194xfs_qm_dquot_logitem_pushbuf(
195 struct xfs_log_item *lip)
196{
197 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
198 struct xfs_dquot *dqp = qlip->qli_dquot;
199 struct xfs_buf *bp;
200 bool ret = true;
201
202 ASSERT(XFS_DQ_IS_LOCKED(dqp));
203
204 /*
205 * If flushlock isn't locked anymore, chances are that the
206 * inode flush completed and the inode was taken off the AIL.
207 * So, just get out.
208 */
209 if (completion_done(&dqp->q_flush) ||
210 !(lip->li_flags & XFS_LI_IN_AIL)) {
211 xfs_dqunlock(dqp);
212 return true;
213 }
214
215 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
216 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
217 xfs_dqunlock(dqp);
218 if (!bp)
219 return true;
220 if (XFS_BUF_ISDELAYWRITE(bp))
221 xfs_buf_delwri_promote(bp);
222 if (xfs_buf_ispinned(bp))
223 ret = false;
224 xfs_buf_relse(bp);
225 return ret;
226}
227
228/*
229 * This is called to attempt to lock the dquot associated with this
230 * dquot log item. Don't sleep on the dquot lock or the flush lock.
231 * If the flush lock is already held, indicating that the dquot has
232 * been or is in the process of being flushed, then see if we can
233 * find the dquot's buffer in the buffer cache without sleeping. If
234 * we can and it is marked delayed write, then we want to send it out.
235 * We delay doing so until the push routine, though, to avoid sleeping
236 * in any device strategy routines.
237 */
238STATIC uint 142STATIC uint
239xfs_qm_dquot_logitem_trylock( 143xfs_qm_dquot_logitem_push(
240 struct xfs_log_item *lip) 144 struct xfs_log_item *lip,
145 struct list_head *buffer_list)
241{ 146{
242 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; 147 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
148 struct xfs_buf *bp = NULL;
149 uint rval = XFS_ITEM_SUCCESS;
150 int error;
243 151
244 if (atomic_read(&dqp->q_pincount) > 0) 152 if (atomic_read(&dqp->q_pincount) > 0)
245 return XFS_ITEM_PINNED; 153 return XFS_ITEM_PINNED;
@@ -252,20 +160,36 @@ xfs_qm_dquot_logitem_trylock(
252 * taking the quota lock. 160 * taking the quota lock.
253 */ 161 */
254 if (atomic_read(&dqp->q_pincount) > 0) { 162 if (atomic_read(&dqp->q_pincount) > 0) {
255 xfs_dqunlock(dqp); 163 rval = XFS_ITEM_PINNED;
256 return XFS_ITEM_PINNED; 164 goto out_unlock;
257 } 165 }
258 166
167 /*
168 * Someone else is already flushing the dquot. Nothing we can do
169 * here but wait for the flush to finish and remove the item from
170 * the AIL.
171 */
259 if (!xfs_dqflock_nowait(dqp)) { 172 if (!xfs_dqflock_nowait(dqp)) {
260 /* 173 rval = XFS_ITEM_FLUSHING;
261 * dquot has already been flushed to the backing buffer, 174 goto out_unlock;
262 * leave it locked, pushbuf routine will unlock it. 175 }
263 */ 176
264 return XFS_ITEM_PUSHBUF; 177 spin_unlock(&lip->li_ailp->xa_lock);
178
179 error = xfs_qm_dqflush(dqp, &bp);
180 if (error) {
181 xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
182 __func__, error, dqp);
183 } else {
184 if (!xfs_buf_delwri_queue(bp, buffer_list))
185 rval = XFS_ITEM_FLUSHING;
186 xfs_buf_relse(bp);
265 } 187 }
266 188
267 ASSERT(lip->li_flags & XFS_LI_IN_AIL); 189 spin_lock(&lip->li_ailp->xa_lock);
268 return XFS_ITEM_SUCCESS; 190out_unlock:
191 xfs_dqunlock(dqp);
192 return rval;
269} 193}
270 194
271/* 195/*
@@ -316,11 +240,9 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
316 .iop_format = xfs_qm_dquot_logitem_format, 240 .iop_format = xfs_qm_dquot_logitem_format,
317 .iop_pin = xfs_qm_dquot_logitem_pin, 241 .iop_pin = xfs_qm_dquot_logitem_pin,
318 .iop_unpin = xfs_qm_dquot_logitem_unpin, 242 .iop_unpin = xfs_qm_dquot_logitem_unpin,
319 .iop_trylock = xfs_qm_dquot_logitem_trylock,
320 .iop_unlock = xfs_qm_dquot_logitem_unlock, 243 .iop_unlock = xfs_qm_dquot_logitem_unlock,
321 .iop_committed = xfs_qm_dquot_logitem_committed, 244 .iop_committed = xfs_qm_dquot_logitem_committed,
322 .iop_push = xfs_qm_dquot_logitem_push, 245 .iop_push = xfs_qm_dquot_logitem_push,
323 .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
324 .iop_committing = xfs_qm_dquot_logitem_committing 246 .iop_committing = xfs_qm_dquot_logitem_committing
325}; 247};
326 248
@@ -415,11 +337,13 @@ xfs_qm_qoff_logitem_unpin(
415} 337}
416 338
417/* 339/*
418 * Quotaoff items have no locking, so just return success. 340 * There isn't much you can do to push a quotaoff item. It is simply
341 * stuck waiting for the log to be flushed to disk.
419 */ 342 */
420STATIC uint 343STATIC uint
421xfs_qm_qoff_logitem_trylock( 344xfs_qm_qoff_logitem_push(
422 struct xfs_log_item *lip) 345 struct xfs_log_item *lip,
346 struct list_head *buffer_list)
423{ 347{
424 return XFS_ITEM_LOCKED; 348 return XFS_ITEM_LOCKED;
425} 349}
@@ -446,17 +370,6 @@ xfs_qm_qoff_logitem_committed(
446 return lsn; 370 return lsn;
447} 371}
448 372
449/*
450 * There isn't much you can do to push on an quotaoff item. It is simply
451 * stuck waiting for the log to be flushed to disk.
452 */
453STATIC void
454xfs_qm_qoff_logitem_push(
455 struct xfs_log_item *lip)
456{
457}
458
459
460STATIC xfs_lsn_t 373STATIC xfs_lsn_t
461xfs_qm_qoffend_logitem_committed( 374xfs_qm_qoffend_logitem_committed(
462 struct xfs_log_item *lip, 375 struct xfs_log_item *lip,
@@ -504,7 +417,6 @@ static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
504 .iop_format = xfs_qm_qoff_logitem_format, 417 .iop_format = xfs_qm_qoff_logitem_format,
505 .iop_pin = xfs_qm_qoff_logitem_pin, 418 .iop_pin = xfs_qm_qoff_logitem_pin,
506 .iop_unpin = xfs_qm_qoff_logitem_unpin, 419 .iop_unpin = xfs_qm_qoff_logitem_unpin,
507 .iop_trylock = xfs_qm_qoff_logitem_trylock,
508 .iop_unlock = xfs_qm_qoff_logitem_unlock, 420 .iop_unlock = xfs_qm_qoff_logitem_unlock,
509 .iop_committed = xfs_qm_qoffend_logitem_committed, 421 .iop_committed = xfs_qm_qoffend_logitem_committed,
510 .iop_push = xfs_qm_qoff_logitem_push, 422 .iop_push = xfs_qm_qoff_logitem_push,
@@ -519,7 +431,6 @@ static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
519 .iop_format = xfs_qm_qoff_logitem_format, 431 .iop_format = xfs_qm_qoff_logitem_format,
520 .iop_pin = xfs_qm_qoff_logitem_pin, 432 .iop_pin = xfs_qm_qoff_logitem_pin,
521 .iop_unpin = xfs_qm_qoff_logitem_unpin, 433 .iop_unpin = xfs_qm_qoff_logitem_unpin,
522 .iop_trylock = xfs_qm_qoff_logitem_trylock,
523 .iop_unlock = xfs_qm_qoff_logitem_unlock, 434 .iop_unlock = xfs_qm_qoff_logitem_unlock,
524 .iop_committed = xfs_qm_qoff_logitem_committed, 435 .iop_committed = xfs_qm_qoff_logitem_committed,
525 .iop_push = xfs_qm_qoff_logitem_push, 436 .iop_push = xfs_qm_qoff_logitem_push,