aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/fs-writeback.c15
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nilfs2/segment.c10
4 files changed, 22 insertions, 12 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 6055d61811d3..cb4a10690868 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3061,8 +3061,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
3061 * thus don't need to be hashed. They also don't need a name until a 3061 * thus don't need to be hashed. They also don't need a name until a
3062 * user wants to identify the object in /proc/pid/fd/. The little hack 3062 * user wants to identify the object in /proc/pid/fd/. The little hack
3063 * below allows us to generate a name for these objects on demand: 3063 * below allows us to generate a name for these objects on demand:
3064 *
3065 * Some pseudo inodes are mountable. When they are mounted
3066 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3067 * and instead have d_path return the mounted path.
3064 */ 3068 */
3065 if (path->dentry->d_op && path->dentry->d_op->d_dname) 3069 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3070 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3066 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 3071 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3067 3072
3068 rcu_read_lock(); 3073 rcu_read_lock();
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1f4a10ece2f1..e0259a163f98 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -516,13 +516,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
516 } 516 }
517 WARN_ON(inode->i_state & I_SYNC); 517 WARN_ON(inode->i_state & I_SYNC);
518 /* 518 /*
519 * Skip inode if it is clean. We don't want to mess with writeback 519 * Skip inode if it is clean and we have no outstanding writeback in
520 * lists in this function since flusher thread may be doing for example 520 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
521 * sync in parallel and if we move the inode, it could get skipped. So 521 * function since flusher thread may be doing for example sync in
522 * here we make sure inode is on some writeback list and leave it there 522 * parallel and if we move the inode, it could get skipped. So here we
523 * unless we have completely cleaned the inode. 523 * make sure inode is on some writeback list and leave it there unless
524 * we have completely cleaned the inode.
524 */ 525 */
525 if (!(inode->i_state & I_DIRTY)) 526 if (!(inode->i_state & I_DIRTY) &&
527 (wbc->sync_mode != WB_SYNC_ALL ||
528 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
526 goto out; 529 goto out;
527 inode->i_state |= I_SYNC; 530 inode->i_state |= I_SYNC;
528 spin_unlock(&inode->i_lock); 531 spin_unlock(&inode->i_lock);
diff --git a/fs/namespace.c b/fs/namespace.c
index ac2ce8a766e1..be32ebccdeb1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2886,7 +2886,7 @@ bool fs_fully_visible(struct file_system_type *type)
2886 struct inode *inode = child->mnt_mountpoint->d_inode; 2886 struct inode *inode = child->mnt_mountpoint->d_inode;
2887 if (!S_ISDIR(inode->i_mode)) 2887 if (!S_ISDIR(inode->i_mode))
2888 goto next; 2888 goto next;
2889 if (inode->i_nlink != 2) 2889 if (inode->i_nlink > 2)
2890 goto next; 2890 goto next;
2891 } 2891 }
2892 visible = true; 2892 visible = true;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 9f6b486b6c01..a1a191634abc 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1440 1440
1441 nilfs_clear_logs(&sci->sc_segbufs); 1441 nilfs_clear_logs(&sci->sc_segbufs);
1442 1442
1443 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1444 if (unlikely(err))
1445 return err;
1446
1447 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1443 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1448 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1444 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1449 sci->sc_freesegs, 1445 sci->sc_freesegs,
1450 sci->sc_nfreesegs, 1446 sci->sc_nfreesegs,
1451 NULL); 1447 NULL);
1452 WARN_ON(err); /* do not happen */ 1448 WARN_ON(err); /* do not happen */
1449 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1453 } 1450 }
1451
1452 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1453 if (unlikely(err))
1454 return err;
1455
1454 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); 1456 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1455 sci->sc_stage = prev_stage; 1457 sci->sc_stage = prev_stage;
1456 } 1458 }