aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/kmem.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c3
-rw-r--r--fs/xfs/support/debug.h10
-rw-r--r--fs/xfs/xfs_da_btree.c1
-rw-r--r--fs/xfs/xfs_filestream.c10
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mru_cache.c72
-rw-r--r--fs/xfs/xfs_mru_cache.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c20
13 files changed, 77 insertions, 86 deletions
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index b4acc7f3c374..e6ea293f303c 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
103static inline int 103static inline int
104kmem_shake_allow(gfp_t gfp_mask) 104kmem_shake_allow(gfp_t gfp_mask)
105{ 105{
106 return (gfp_mask & __GFP_WAIT); 106 return (gfp_mask & __GFP_WAIT) != 0;
107} 107}
108 108
109#endif /* __XFS_SUPPORT_KMEM_H__ */ 109#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fd4105d662e0..5f152f60d74d 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -181,6 +181,7 @@ xfs_setfilesize(
181 ip->i_d.di_size = isize; 181 ip->i_d.di_size = isize;
182 ip->i_update_core = 1; 182 ip->i_update_core = 1;
183 ip->i_update_size = 1; 183 ip->i_update_size = 1;
184 mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
184 } 185 }
185 186
186 xfs_iunlock(ip, XFS_ILOCK_EXCL); 187 xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -652,7 +653,7 @@ xfs_probe_cluster(
652 653
653 for (i = 0; i < pagevec_count(&pvec); i++) { 654 for (i = 0; i < pagevec_count(&pvec); i++) {
654 struct page *page = pvec.pages[i]; 655 struct page *page = pvec.pages[i];
655 size_t pg_offset, len = 0; 656 size_t pg_offset, pg_len = 0;
656 657
657 if (tindex == tlast) { 658 if (tindex == tlast) {
658 pg_offset = 659 pg_offset =
@@ -665,16 +666,16 @@ xfs_probe_cluster(
665 pg_offset = PAGE_CACHE_SIZE; 666 pg_offset = PAGE_CACHE_SIZE;
666 667
667 if (page->index == tindex && !TestSetPageLocked(page)) { 668 if (page->index == tindex && !TestSetPageLocked(page)) {
668 len = xfs_probe_page(page, pg_offset, mapped); 669 pg_len = xfs_probe_page(page, pg_offset, mapped);
669 unlock_page(page); 670 unlock_page(page);
670 } 671 }
671 672
672 if (!len) { 673 if (!pg_len) {
673 done = 1; 674 done = 1;
674 break; 675 break;
675 } 676 }
676 677
677 total += len; 678 total += pg_len;
678 tindex++; 679 tindex++;
679 } 680 }
680 681
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index bb72c3d4141f..81565dea9af7 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -46,7 +46,7 @@ xfs_param_t xfs_params = {
46 .inherit_nosym = { 0, 0, 1 }, 46 .inherit_nosym = { 0, 0, 1 },
47 .rotorstep = { 1, 1, 255 }, 47 .rotorstep = { 1, 1, 255 },
48 .inherit_nodfrg = { 0, 1, 1 }, 48 .inherit_nodfrg = { 0, 1, 1 },
49 .fstrm_timer = { 1, 50, 3600*100}, 49 .fstrm_timer = { 1, 30*100, 3600*100},
50}; 50};
51 51
52/* 52/*
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 4528f9a3f304..491d1f4f202d 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -415,8 +415,10 @@ xfs_fs_write_inode(
415 415
416 if (vp) { 416 if (vp) {
417 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); 417 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
418 if (sync) 418 if (sync) {
419 filemap_fdatawait(inode->i_mapping);
419 flags |= FLUSH_SYNC; 420 flags |= FLUSH_SYNC;
421 }
420 error = bhv_vop_iflush(vp, flags); 422 error = bhv_vop_iflush(vp, flags);
421 if (error == EAGAIN) 423 if (error == EAGAIN)
422 error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0; 424 error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 2d274b23ade5..6ff0f4de1630 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -120,7 +120,8 @@ xfs_Gqm_init(void)
120 * Initialize the dquot hash tables. 120 * Initialize the dquot hash tables.
121 */ 121 */
122 udqhash = kmem_zalloc_greedy(&hsize, 122 udqhash = kmem_zalloc_greedy(&hsize,
123 XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH, 123 XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
124 XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
124 KM_SLEEP | KM_MAYFAIL | KM_LARGE); 125 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
125 gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE); 126 gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
126 hsize /= sizeof(xfs_dqhash_t); 127 hsize /= sizeof(xfs_dqhash_t);
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index a27a7c8c0526..855da0408647 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -34,10 +34,10 @@ extern void cmn_err(int, char *, ...)
34extern void assfail(char *expr, char *f, int l); 34extern void assfail(char *expr, char *f, int l);
35 35
36#define ASSERT_ALWAYS(expr) \ 36#define ASSERT_ALWAYS(expr) \
37 (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 37 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
38 38
39#ifndef DEBUG 39#ifndef DEBUG
40# define ASSERT(expr) ((void)0) 40#define ASSERT(expr) ((void)0)
41 41
42#ifndef STATIC 42#ifndef STATIC
43# define STATIC static noinline 43# define STATIC static noinline
@@ -49,8 +49,10 @@ extern void assfail(char *expr, char *f, int l);
49 49
50#else /* DEBUG */ 50#else /* DEBUG */
51 51
52# define ASSERT(expr) ASSERT_ALWAYS(expr) 52#include <linux/random.h>
53# include <linux/random.h> 53
54#define ASSERT(expr) \
55 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
54 56
55#ifndef STATIC 57#ifndef STATIC
56# define STATIC noinline 58# define STATIC noinline
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index aea37df4aa62..26d09e2e1a7f 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1975,7 +1975,6 @@ xfs_da_do_buf(
1975 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); 1975 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
1976 if (unlikely(error == EFSCORRUPTED)) { 1976 if (unlikely(error == EFSCORRUPTED)) {
1977 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 1977 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
1978 int i;
1979 cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", 1978 cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
1980 (long long)bno); 1979 (long long)bno);
1981 cmn_err(CE_ALERT, "dir: inode %lld\n", 1980 cmn_err(CE_ALERT, "dir: inode %lld\n",
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce2278611bb7..36d8f6aa11af 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag(
350/* xfs_fstrm_free_func(): callback for freeing cached stream items. */ 350/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
351void 351void
352xfs_fstrm_free_func( 352xfs_fstrm_free_func(
353 xfs_ino_t ino, 353 unsigned long ino,
354 fstrm_item_t *item) 354 void *data)
355{ 355{
356 fstrm_item_t *item = (fstrm_item_t *)data;
356 xfs_inode_t *ip = item->ip; 357 xfs_inode_t *ip = item->ip;
357 int ref; 358 int ref;
358 359
@@ -438,7 +439,7 @@ xfs_filestream_mount(
438 grp_count = 10; 439 grp_count = 10;
439 440
440 err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count, 441 err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
441 (xfs_mru_cache_free_func_t)xfs_fstrm_free_func); 442 xfs_fstrm_free_func);
442 443
443 return err; 444 return err;
444} 445}
@@ -467,8 +468,7 @@ void
467xfs_filestream_flush( 468xfs_filestream_flush(
468 xfs_mount_t *mp) 469 xfs_mount_t *mp)
469{ 470{
470 /* point in time flush, so keep the reaper running */ 471 xfs_mru_cache_flush(mp->m_filestream);
471 xfs_mru_cache_flush(mp->m_filestream, 1);
472} 472}
473 473
474/* 474/*
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9d4c4fbeb3ee..9bfb69e1e885 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2185,13 +2185,13 @@ xlog_state_do_callback(
2185 } 2185 }
2186 cb = iclog->ic_callback; 2186 cb = iclog->ic_callback;
2187 2187
2188 while (cb != 0) { 2188 while (cb) {
2189 iclog->ic_callback_tail = &(iclog->ic_callback); 2189 iclog->ic_callback_tail = &(iclog->ic_callback);
2190 iclog->ic_callback = NULL; 2190 iclog->ic_callback = NULL;
2191 LOG_UNLOCK(log, s); 2191 LOG_UNLOCK(log, s);
2192 2192
2193 /* perform callbacks in the order given */ 2193 /* perform callbacks in the order given */
2194 for (; cb != 0; cb = cb_next) { 2194 for (; cb; cb = cb_next) {
2195 cb_next = cb->cb_next; 2195 cb_next = cb->cb_next;
2196 cb->cb_func(cb->cb_arg, aborted); 2196 cb->cb_func(cb->cb_arg, aborted);
2197 } 2197 }
@@ -2202,7 +2202,7 @@ xlog_state_do_callback(
2202 loopdidcallbacks++; 2202 loopdidcallbacks++;
2203 funcdidcallbacks++; 2203 funcdidcallbacks++;
2204 2204
2205 ASSERT(iclog->ic_callback == 0); 2205 ASSERT(iclog->ic_callback == NULL);
2206 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2206 if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2207 iclog->ic_state = XLOG_STATE_DIRTY; 2207 iclog->ic_state = XLOG_STATE_DIRTY;
2208 2208
@@ -3242,10 +3242,10 @@ xlog_ticket_put(xlog_t *log,
3242#else 3242#else
3243 /* When we debug, it is easier if tickets are cycled */ 3243 /* When we debug, it is easier if tickets are cycled */
3244 ticket->t_next = NULL; 3244 ticket->t_next = NULL;
3245 if (log->l_tail != 0) { 3245 if (log->l_tail) {
3246 log->l_tail->t_next = ticket; 3246 log->l_tail->t_next = ticket;
3247 } else { 3247 } else {
3248 ASSERT(log->l_freelist == 0); 3248 ASSERT(log->l_freelist == NULL);
3249 log->l_freelist = ticket; 3249 log->l_freelist = ticket;
3250 } 3250 }
3251 log->l_tail = ticket; 3251 log->l_tail = ticket;
@@ -3463,7 +3463,7 @@ xlog_verify_iclog(xlog_t *log,
3463 s = LOG_LOCK(log); 3463 s = LOG_LOCK(log);
3464 icptr = log->l_iclog; 3464 icptr = log->l_iclog;
3465 for (i=0; i < log->l_iclog_bufs; i++) { 3465 for (i=0; i < log->l_iclog_bufs; i++) {
3466 if (icptr == 0) 3466 if (icptr == NULL)
3467 xlog_panic("xlog_verify_iclog: invalid ptr"); 3467 xlog_panic("xlog_verify_iclog: invalid ptr");
3468 icptr = icptr->ic_next; 3468 icptr = icptr->ic_next;
3469 } 3469 }
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fddbb091a86f..8ae6e8e5f3db 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1366,7 +1366,7 @@ xlog_recover_add_to_cont_trans(
1366 int old_len; 1366 int old_len;
1367 1367
1368 item = trans->r_itemq; 1368 item = trans->r_itemq;
1369 if (item == 0) { 1369 if (item == NULL) {
1370 /* finish copying rest of trans header */ 1370 /* finish copying rest of trans header */
1371 xlog_recover_add_item(&trans->r_itemq); 1371 xlog_recover_add_item(&trans->r_itemq);
1372 ptr = (xfs_caddr_t) &trans->r_theader + 1372 ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1412,7 +1412,7 @@ xlog_recover_add_to_trans(
1412 if (!len) 1412 if (!len)
1413 return 0; 1413 return 0;
1414 item = trans->r_itemq; 1414 item = trans->r_itemq;
1415 if (item == 0) { 1415 if (item == NULL) {
1416 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC); 1416 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
1417 if (len == sizeof(xfs_trans_header_t)) 1417 if (len == sizeof(xfs_trans_header_t))
1418 xlog_recover_add_item(&trans->r_itemq); 1418 xlog_recover_add_item(&trans->r_itemq);
@@ -1467,12 +1467,12 @@ xlog_recover_unlink_tid(
1467 xlog_recover_t *tp; 1467 xlog_recover_t *tp;
1468 int found = 0; 1468 int found = 0;
1469 1469
1470 ASSERT(trans != 0); 1470 ASSERT(trans != NULL);
1471 if (trans == *q) { 1471 if (trans == *q) {
1472 *q = (*q)->r_next; 1472 *q = (*q)->r_next;
1473 } else { 1473 } else {
1474 tp = *q; 1474 tp = *q;
1475 while (tp != 0) { 1475 while (tp) {
1476 if (tp->r_next == trans) { 1476 if (tp->r_next == trans) {
1477 found = 1; 1477 found = 1;
1478 break; 1478 break;
@@ -1495,7 +1495,7 @@ xlog_recover_insert_item_backq(
1495 xlog_recover_item_t **q, 1495 xlog_recover_item_t **q,
1496 xlog_recover_item_t *item) 1496 xlog_recover_item_t *item)
1497{ 1497{
1498 if (*q == 0) { 1498 if (*q == NULL) {
1499 item->ri_prev = item->ri_next = item; 1499 item->ri_prev = item->ri_next = item;
1500 *q = item; 1500 *q = item;
1501 } else { 1501 } else {
@@ -1899,7 +1899,7 @@ xlog_recover_do_reg_buffer(
1899 break; 1899 break;
1900 nbits = xfs_contig_bits(data_map, map_size, bit); 1900 nbits = xfs_contig_bits(data_map, map_size, bit);
1901 ASSERT(nbits > 0); 1901 ASSERT(nbits > 0);
1902 ASSERT(item->ri_buf[i].i_addr != 0); 1902 ASSERT(item->ri_buf[i].i_addr != NULL);
1903 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0); 1903 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1904 ASSERT(XFS_BUF_COUNT(bp) >= 1904 ASSERT(XFS_BUF_COUNT(bp) >=
1905 ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT)); 1905 ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 7deb9e3cbbd3..e0b358c1c533 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert(
206 */ 206 */
207 if (!_xfs_mru_cache_migrate(mru, now)) { 207 if (!_xfs_mru_cache_migrate(mru, now)) {
208 mru->time_zero = now; 208 mru->time_zero = now;
209 if (!mru->next_reap) 209 if (!mru->queued) {
210 mru->next_reap = mru->grp_count * mru->grp_time; 210 mru->queued = 1;
211 queue_delayed_work(xfs_mru_reap_wq, &mru->work,
212 mru->grp_count * mru->grp_time);
213 }
211 } else { 214 } else {
212 grp = (now - mru->time_zero) / mru->grp_time; 215 grp = (now - mru->time_zero) / mru->grp_time;
213 grp = (mru->lru_grp + grp) % mru->grp_count; 216 grp = (mru->lru_grp + grp) % mru->grp_count;
@@ -271,29 +274,26 @@ _xfs_mru_cache_reap(
271 struct work_struct *work) 274 struct work_struct *work)
272{ 275{
273 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); 276 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
274 unsigned long now; 277 unsigned long now, next;
275 278
276 ASSERT(mru && mru->lists); 279 ASSERT(mru && mru->lists);
277 if (!mru || !mru->lists) 280 if (!mru || !mru->lists)
278 return; 281 return;
279 282
280 mutex_spinlock(&mru->lock); 283 mutex_spinlock(&mru->lock);
281 now = jiffies; 284 next = _xfs_mru_cache_migrate(mru, jiffies);
282 if (mru->reap_all || 285 _xfs_mru_cache_clear_reap_list(mru);
283 (mru->next_reap && time_after(now, mru->next_reap))) { 286
284 if (mru->reap_all) 287 mru->queued = next;
285 now += mru->grp_count * mru->grp_time * 2; 288 if ((mru->queued > 0)) {
286 mru->next_reap = _xfs_mru_cache_migrate(mru, now); 289 now = jiffies;
287 _xfs_mru_cache_clear_reap_list(mru); 290 if (next <= now)
291 next = 0;
292 else
293 next -= now;
294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
288 } 295 }
289 296
290 /*
291 * the process that triggered the reap_all is responsible
292 * for restating the periodic reap if it is required.
293 */
294 if (!mru->reap_all)
295 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
296 mru->reap_all = 0;
297 mutex_spinunlock(&mru->lock, 0); 297 mutex_spinunlock(&mru->lock, 0);
298} 298}
299 299
@@ -352,7 +352,7 @@ xfs_mru_cache_create(
352 352
353 /* An extra list is needed to avoid reaping up to a grp_time early. */ 353 /* An extra list is needed to avoid reaping up to a grp_time early. */
354 mru->grp_count = grp_count + 1; 354 mru->grp_count = grp_count + 1;
355 mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 355 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
356 356
357 if (!mru->lists) { 357 if (!mru->lists) {
358 err = ENOMEM; 358 err = ENOMEM;
@@ -374,11 +374,6 @@ xfs_mru_cache_create(
374 mru->grp_time = grp_time; 374 mru->grp_time = grp_time;
375 mru->free_func = free_func; 375 mru->free_func = free_func;
376 376
377 /* start up the reaper event */
378 mru->next_reap = 0;
379 mru->reap_all = 0;
380 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
381
382 *mrup = mru; 377 *mrup = mru;
383 378
384exit: 379exit:
@@ -394,35 +389,25 @@ exit:
394 * Call xfs_mru_cache_flush() to flush out all cached entries, calling their 389 * Call xfs_mru_cache_flush() to flush out all cached entries, calling their
395 * free functions as they're deleted. When this function returns, the caller is 390 * free functions as they're deleted. When this function returns, the caller is
396 * guaranteed that all the free functions for all the elements have finished 391 * guaranteed that all the free functions for all the elements have finished
397 * executing. 392 * executing and the reaper is not running.
398 *
399 * While we are flushing, we stop the periodic reaper event from triggering.
400 * Normally, we want to restart this periodic event, but if we are shutting
401 * down the cache we do not want it restarted. hence the restart parameter
402 * where 0 = do not restart reaper and 1 = restart reaper.
403 */ 393 */
404void 394void
405xfs_mru_cache_flush( 395xfs_mru_cache_flush(
406 xfs_mru_cache_t *mru, 396 xfs_mru_cache_t *mru)
407 int restart)
408{ 397{
409 if (!mru || !mru->lists) 398 if (!mru || !mru->lists)
410 return; 399 return;
411 400
412 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
413
414 mutex_spinlock(&mru->lock); 401 mutex_spinlock(&mru->lock);
415 mru->reap_all = 1; 402 if (mru->queued) {
416 mutex_spinunlock(&mru->lock, 0); 403 mutex_spinunlock(&mru->lock, 0);
404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
405 mutex_spinlock(&mru->lock);
406 }
417 407
418 queue_work(xfs_mru_reap_wq, &mru->work.work); 408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
419 flush_workqueue(xfs_mru_reap_wq); 409 _xfs_mru_cache_clear_reap_list(mru);
420 410
421 mutex_spinlock(&mru->lock);
422 WARN_ON_ONCE(mru->reap_all != 0);
423 mru->reap_all = 0;
424 if (restart)
425 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
426 mutex_spinunlock(&mru->lock, 0); 411 mutex_spinunlock(&mru->lock, 0);
427} 412}
428 413
@@ -433,8 +418,7 @@ xfs_mru_cache_destroy(
433 if (!mru || !mru->lists) 418 if (!mru || !mru->lists)
434 return; 419 return;
435 420
436 /* we don't want the reaper to restart here */ 421 xfs_mru_cache_flush(mru);
437 xfs_mru_cache_flush(mru, 0);
438 422
439 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 423 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
440 kmem_free(mru, sizeof(*mru)); 424 kmem_free(mru, sizeof(*mru));
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 624fd10ee8e5..dd58ea1bbebe 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache
32 unsigned int grp_time; /* Time period spanned by grps. */ 32 unsigned int grp_time; /* Time period spanned by grps. */
33 unsigned int lru_grp; /* Group containing time zero. */ 33 unsigned int lru_grp; /* Group containing time zero. */
34 unsigned long time_zero; /* Time first element was added. */ 34 unsigned long time_zero; /* Time first element was added. */
35 unsigned long next_reap; /* Time that the reaper should
36 next do something. */
37 unsigned int reap_all; /* if set, reap all lists */
38 xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ 35 xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
39 struct delayed_work work; /* Workqueue data for reaping. */ 36 struct delayed_work work; /* Workqueue data for reaping. */
37 unsigned int queued; /* work has been queued */
40} xfs_mru_cache_t; 38} xfs_mru_cache_t;
41 39
42int xfs_mru_cache_init(void); 40int xfs_mru_cache_init(void);
@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void);
44int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, 42int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
45 unsigned int grp_count, 43 unsigned int grp_count,
46 xfs_mru_cache_free_func_t free_func); 44 xfs_mru_cache_free_func_t free_func);
47void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart); 45void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
48void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); 46void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
49int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, 47int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
50 void *value); 48 void *value);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 1a5ad8cd97b0..603459229904 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1082,6 +1082,9 @@ xfs_fsync(
1082 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 1082 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1083 return XFS_ERROR(EIO); 1083 return XFS_ERROR(EIO);
1084 1084
1085 if (flag & FSYNC_DATA)
1086 filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
1087
1085 /* 1088 /*
1086 * We always need to make sure that the required inode state 1089 * We always need to make sure that the required inode state
1087 * is safe on disk. The vnode might be clean but because 1090 * is safe on disk. The vnode might be clean but because
@@ -3769,12 +3772,16 @@ xfs_inode_flush(
3769 sync_lsn = log->l_last_sync_lsn; 3772 sync_lsn = log->l_last_sync_lsn;
3770 GRANT_UNLOCK(log, s); 3773 GRANT_UNLOCK(log, s);
3771 3774
3772 if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0)) 3775 if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
3773 return 0; 3776 if (flags & FLUSH_SYNC)
3777 log_flags |= XFS_LOG_SYNC;
3778 error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
3779 if (error)
3780 return error;
3781 }
3774 3782
3775 if (flags & FLUSH_SYNC) 3783 if (ip->i_update_core == 0)
3776 log_flags |= XFS_LOG_SYNC; 3784 return 0;
3777 return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
3778 } 3785 }
3779 } 3786 }
3780 3787
@@ -3788,9 +3795,6 @@ xfs_inode_flush(
3788 if (flags & FLUSH_INODE) { 3795 if (flags & FLUSH_INODE) {
3789 int flush_flags; 3796 int flush_flags;
3790 3797
3791 if (xfs_ipincount(ip))
3792 return EAGAIN;
3793
3794 if (flags & FLUSH_SYNC) { 3798 if (flags & FLUSH_SYNC) {
3795 xfs_ilock(ip, XFS_ILOCK_SHARED); 3799 xfs_ilock(ip, XFS_ILOCK_SHARED);
3796 xfs_iflock(ip); 3800 xfs_iflock(ip);