aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-04 08:44:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-04 08:44:16 -0400
commit695a461296e5df148c99ac087b9e1cb380f4db15 (patch)
tree951893036fdc0b7bae0e17bc739ac8ffe909781d /fs
parentc7084b35eb1a4d3353a501508baf9d3d82822c93 (diff)
parent2b681fafcc50fea6304ed418667c9d04282acb73 (diff)
Merge branch 'amd-iommu/2.6.32' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into core/iommu
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs.c21
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/vfs_inode.c126
-rw-r--r--fs/9p/vfs_super.c39
-rw-r--r--fs/afs/file.c18
-rw-r--r--fs/autofs4/expire.c2
-rw-r--r--fs/binfmt_flat.c17
-rw-r--r--fs/block_dev.c10
-rw-r--r--fs/btrfs/async-thread.c4
-rw-r--r--fs/btrfs/ctree.c121
-rw-r--r--fs/btrfs/ctree.h27
-rw-r--r--fs/btrfs/disk-io.c15
-rw-r--r--fs/btrfs/extent-tree.c530
-rw-r--r--fs/btrfs/free-space-cache.c1058
-rw-r--r--fs/btrfs/free-space-cache.h8
-rw-r--r--fs/btrfs/inode.c26
-rw-r--r--fs/btrfs/print-tree.c6
-rw-r--r--fs/btrfs/relocation.c12
-rw-r--r--fs/btrfs/transaction.c56
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c46
-rw-r--r--fs/btrfs/zlib.c6
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/cifs/CHANGES7
-rw-r--r--fs/cifs/README25
-rw-r--r--fs/cifs/cifs_dfs_ref.c12
-rw-r--r--fs/cifs/cifs_unicode.c2
-rw-r--r--fs/cifs/cifsfs.c4
-rw-r--r--fs/cifs/connect.c55
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/ecryptfs/keystore.c13
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext3/Kconfig32
-rw-r--r--fs/ext3/super.c40
-rw-r--r--fs/gfs2/aops.c39
-rw-r--r--fs/gfs2/glock.c138
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c21
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/rgrp.c23
-rw-r--r--fs/gfs2/super.c40
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c20
-rw-r--r--fs/hugetlbfs/inode.c20
-rw-r--r--fs/inode.c40
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/nfs/direct.c20
-rw-r--r--fs/nfs/nfs4state.c4
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/segment.c16
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c46
-rw-r--r--fs/notify/inotify/inotify_user.c254
-rw-r--r--fs/notify/notification.c11
-rw-r--r--fs/ocfs2/alloc.c49
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/dcache.c35
-rw-r--r--fs/ocfs2/dcache.h3
-rw-r--r--fs/ocfs2/dlm/dlmast.c1
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c4
-rw-r--r--fs/ocfs2/file.c5
-rw-r--r--fs/ocfs2/journal.c8
-rw-r--r--fs/ocfs2/journal.h19
-rw-r--r--fs/ocfs2/ocfs2.h22
-rw-r--r--fs/ocfs2/ocfs2_lockid.h1
-rw-r--r--fs/ocfs2/quota.h1
-rw-r--r--fs/ocfs2/quota_global.c144
-rw-r--r--fs/ocfs2/quota_local.c110
-rw-r--r--fs/ocfs2/stack_o2cb.c3
-rw-r--r--fs/ocfs2/super.c34
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/proc/base.c46
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/proc/task_nommu.c1
-rw-r--r--fs/quota/dquot.c7
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/select.c1
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/udf/super.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h1
-rw-r--r--fs/xfs/xfs_attr.c8
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_btree.c4
-rw-r--r--fs/xfs/xfs_da_btree.c6
-rw-r--r--fs/xfs/xfs_dir2.c2
-rw-r--r--fs/xfs/xfs_fsops.c20
-rw-r--r--fs/xfs/xfs_iget.c253
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_inode.h17
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c4
102 files changed, 2804 insertions, 1218 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 332b5ff02fec..f7003cfac63d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -76,7 +76,7 @@ static const match_table_t tokens = {
76 * Return 0 upon success, -ERRNO upon failure. 76 * Return 0 upon success, -ERRNO upon failure.
77 */ 77 */
78 78
79static int v9fs_parse_options(struct v9fs_session_info *v9ses) 79static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
80{ 80{
81 char *options; 81 char *options;
82 substring_t args[MAX_OPT_ARGS]; 82 substring_t args[MAX_OPT_ARGS];
@@ -90,10 +90,10 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses)
90 v9ses->debug = 0; 90 v9ses->debug = 0;
91 v9ses->cache = 0; 91 v9ses->cache = 0;
92 92
93 if (!v9ses->options) 93 if (!opts)
94 return 0; 94 return 0;
95 95
96 options = kstrdup(v9ses->options, GFP_KERNEL); 96 options = kstrdup(opts, GFP_KERNEL);
97 if (!options) { 97 if (!options) {
98 P9_DPRINTK(P9_DEBUG_ERROR, 98 P9_DPRINTK(P9_DEBUG_ERROR,
99 "failed to allocate copy of option string\n"); 99 "failed to allocate copy of option string\n");
@@ -206,24 +206,14 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
206 v9ses->uid = ~0; 206 v9ses->uid = ~0;
207 v9ses->dfltuid = V9FS_DEFUID; 207 v9ses->dfltuid = V9FS_DEFUID;
208 v9ses->dfltgid = V9FS_DEFGID; 208 v9ses->dfltgid = V9FS_DEFGID;
209 if (data) {
210 v9ses->options = kstrdup(data, GFP_KERNEL);
211 if (!v9ses->options) {
212 P9_DPRINTK(P9_DEBUG_ERROR,
213 "failed to allocate copy of option string\n");
214 retval = -ENOMEM;
215 goto error;
216 }
217 }
218 209
219 rc = v9fs_parse_options(v9ses); 210 rc = v9fs_parse_options(v9ses, data);
220 if (rc < 0) { 211 if (rc < 0) {
221 retval = rc; 212 retval = rc;
222 goto error; 213 goto error;
223 } 214 }
224 215
225 v9ses->clnt = p9_client_create(dev_name, v9ses->options); 216 v9ses->clnt = p9_client_create(dev_name, data);
226
227 if (IS_ERR(v9ses->clnt)) { 217 if (IS_ERR(v9ses->clnt)) {
228 retval = PTR_ERR(v9ses->clnt); 218 retval = PTR_ERR(v9ses->clnt);
229 v9ses->clnt = NULL; 219 v9ses->clnt = NULL;
@@ -280,7 +270,6 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
280 270
281 __putname(v9ses->uname); 271 __putname(v9ses->uname);
282 __putname(v9ses->aname); 272 __putname(v9ses->aname);
283 kfree(v9ses->options);
284} 273}
285 274
286/** 275/**
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index a7d567192998..38762bf102a9 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -85,7 +85,6 @@ struct v9fs_session_info {
85 unsigned int afid; 85 unsigned int afid;
86 unsigned int cache; 86 unsigned int cache;
87 87
88 char *options; /* copy of mount options */
89 char *uname; /* user name to mount as */ 88 char *uname; /* user name to mount as */
90 char *aname; /* name of remote hierarchy being mounted */ 89 char *aname; /* name of remote hierarchy being mounted */
91 unsigned int maxdata; /* max data for client interface */ 90 unsigned int maxdata; /* max data for client interface */
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 81f8bbf12f9f..06a223d50a81 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -171,7 +171,6 @@ int v9fs_uflags2omode(int uflags, int extended)
171 171
172/** 172/**
173 * v9fs_blank_wstat - helper function to setup a 9P stat structure 173 * v9fs_blank_wstat - helper function to setup a 9P stat structure
174 * @v9ses: 9P session info (for determining extended mode)
175 * @wstat: structure to initialize 174 * @wstat: structure to initialize
176 * 175 *
177 */ 176 */
@@ -207,65 +206,72 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
207 206
208struct inode *v9fs_get_inode(struct super_block *sb, int mode) 207struct inode *v9fs_get_inode(struct super_block *sb, int mode)
209{ 208{
209 int err;
210 struct inode *inode; 210 struct inode *inode;
211 struct v9fs_session_info *v9ses = sb->s_fs_info; 211 struct v9fs_session_info *v9ses = sb->s_fs_info;
212 212
213 P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode); 213 P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
214 214
215 inode = new_inode(sb); 215 inode = new_inode(sb);
216 if (inode) { 216 if (!inode) {
217 inode->i_mode = mode;
218 inode->i_uid = current_fsuid();
219 inode->i_gid = current_fsgid();
220 inode->i_blocks = 0;
221 inode->i_rdev = 0;
222 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
223 inode->i_mapping->a_ops = &v9fs_addr_operations;
224
225 switch (mode & S_IFMT) {
226 case S_IFIFO:
227 case S_IFBLK:
228 case S_IFCHR:
229 case S_IFSOCK:
230 if (!v9fs_extended(v9ses)) {
231 P9_DPRINTK(P9_DEBUG_ERROR,
232 "special files without extended mode\n");
233 return ERR_PTR(-EINVAL);
234 }
235 init_special_inode(inode, inode->i_mode,
236 inode->i_rdev);
237 break;
238 case S_IFREG:
239 inode->i_op = &v9fs_file_inode_operations;
240 inode->i_fop = &v9fs_file_operations;
241 break;
242 case S_IFLNK:
243 if (!v9fs_extended(v9ses)) {
244 P9_DPRINTK(P9_DEBUG_ERROR,
245 "extended modes used w/o 9P2000.u\n");
246 return ERR_PTR(-EINVAL);
247 }
248 inode->i_op = &v9fs_symlink_inode_operations;
249 break;
250 case S_IFDIR:
251 inc_nlink(inode);
252 if (v9fs_extended(v9ses))
253 inode->i_op = &v9fs_dir_inode_operations_ext;
254 else
255 inode->i_op = &v9fs_dir_inode_operations;
256 inode->i_fop = &v9fs_dir_operations;
257 break;
258 default:
259 P9_DPRINTK(P9_DEBUG_ERROR,
260 "BAD mode 0x%x S_IFMT 0x%x\n",
261 mode, mode & S_IFMT);
262 return ERR_PTR(-EINVAL);
263 }
264 } else {
265 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n"); 217 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
266 return ERR_PTR(-ENOMEM); 218 return ERR_PTR(-ENOMEM);
267 } 219 }
220
221 inode->i_mode = mode;
222 inode->i_uid = current_fsuid();
223 inode->i_gid = current_fsgid();
224 inode->i_blocks = 0;
225 inode->i_rdev = 0;
226 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
227 inode->i_mapping->a_ops = &v9fs_addr_operations;
228
229 switch (mode & S_IFMT) {
230 case S_IFIFO:
231 case S_IFBLK:
232 case S_IFCHR:
233 case S_IFSOCK:
234 if (!v9fs_extended(v9ses)) {
235 P9_DPRINTK(P9_DEBUG_ERROR,
236 "special files without extended mode\n");
237 err = -EINVAL;
238 goto error;
239 }
240 init_special_inode(inode, inode->i_mode, inode->i_rdev);
241 break;
242 case S_IFREG:
243 inode->i_op = &v9fs_file_inode_operations;
244 inode->i_fop = &v9fs_file_operations;
245 break;
246 case S_IFLNK:
247 if (!v9fs_extended(v9ses)) {
248 P9_DPRINTK(P9_DEBUG_ERROR,
249 "extended modes used w/o 9P2000.u\n");
250 err = -EINVAL;
251 goto error;
252 }
253 inode->i_op = &v9fs_symlink_inode_operations;
254 break;
255 case S_IFDIR:
256 inc_nlink(inode);
257 if (v9fs_extended(v9ses))
258 inode->i_op = &v9fs_dir_inode_operations_ext;
259 else
260 inode->i_op = &v9fs_dir_inode_operations;
261 inode->i_fop = &v9fs_dir_operations;
262 break;
263 default:
264 P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n",
265 mode, mode & S_IFMT);
266 err = -EINVAL;
267 goto error;
268 }
269
268 return inode; 270 return inode;
271
272error:
273 iput(inode);
274 return ERR_PTR(err);
269} 275}
270 276
271/* 277/*
@@ -338,30 +344,25 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
338 344
339 ret = NULL; 345 ret = NULL;
340 st = p9_client_stat(fid); 346 st = p9_client_stat(fid);
341 if (IS_ERR(st)) { 347 if (IS_ERR(st))
342 err = PTR_ERR(st); 348 return ERR_CAST(st);
343 st = NULL;
344 goto error;
345 }
346 349
347 umode = p9mode2unixmode(v9ses, st->mode); 350 umode = p9mode2unixmode(v9ses, st->mode);
348 ret = v9fs_get_inode(sb, umode); 351 ret = v9fs_get_inode(sb, umode);
349 if (IS_ERR(ret)) { 352 if (IS_ERR(ret)) {
350 err = PTR_ERR(ret); 353 err = PTR_ERR(ret);
351 ret = NULL;
352 goto error; 354 goto error;
353 } 355 }
354 356
355 v9fs_stat2inode(st, ret, sb); 357 v9fs_stat2inode(st, ret, sb);
356 ret->i_ino = v9fs_qid2ino(&st->qid); 358 ret->i_ino = v9fs_qid2ino(&st->qid);
359 p9stat_free(st);
357 kfree(st); 360 kfree(st);
358 return ret; 361 return ret;
359 362
360error: 363error:
364 p9stat_free(st);
361 kfree(st); 365 kfree(st);
362 if (ret)
363 iput(ret);
364
365 return ERR_PTR(err); 366 return ERR_PTR(err);
366} 367}
367 368
@@ -403,9 +404,9 @@ v9fs_open_created(struct inode *inode, struct file *file)
403 * @v9ses: session information 404 * @v9ses: session information
404 * @dir: directory that dentry is being created in 405 * @dir: directory that dentry is being created in
405 * @dentry: dentry that is being created 406 * @dentry: dentry that is being created
407 * @extension: 9p2000.u extension string to support devices, etc.
406 * @perm: create permissions 408 * @perm: create permissions
407 * @mode: open mode 409 * @mode: open mode
408 * @extension: 9p2000.u extension string to support devices, etc.
409 * 410 *
410 */ 411 */
411static struct p9_fid * 412static struct p9_fid *
@@ -470,7 +471,10 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
470 dentry->d_op = &v9fs_dentry_operations; 471 dentry->d_op = &v9fs_dentry_operations;
471 472
472 d_instantiate(dentry, inode); 473 d_instantiate(dentry, inode);
473 v9fs_fid_add(dentry, fid); 474 err = v9fs_fid_add(dentry, fid);
475 if (err < 0)
476 goto error;
477
474 return ofid; 478 return ofid;
475 479
476error: 480error:
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 38d695d66a0b..8961f1a8f668 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -81,7 +81,7 @@ static int v9fs_set_super(struct super_block *s, void *data)
81 81
82static void 82static void
83v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, 83v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
84 int flags) 84 int flags, void *data)
85{ 85{
86 sb->s_maxbytes = MAX_LFS_FILESIZE; 86 sb->s_maxbytes = MAX_LFS_FILESIZE;
87 sb->s_blocksize_bits = fls(v9ses->maxdata - 1); 87 sb->s_blocksize_bits = fls(v9ses->maxdata - 1);
@@ -91,6 +91,8 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
91 91
92 sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC | 92 sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
93 MS_NOATIME; 93 MS_NOATIME;
94
95 save_mount_options(sb, data);
94} 96}
95 97
96/** 98/**
@@ -113,14 +115,11 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
113 struct v9fs_session_info *v9ses = NULL; 115 struct v9fs_session_info *v9ses = NULL;
114 struct p9_wstat *st = NULL; 116 struct p9_wstat *st = NULL;
115 int mode = S_IRWXUGO | S_ISVTX; 117 int mode = S_IRWXUGO | S_ISVTX;
116 uid_t uid = current_fsuid();
117 gid_t gid = current_fsgid();
118 struct p9_fid *fid; 118 struct p9_fid *fid;
119 int retval = 0; 119 int retval = 0;
120 120
121 P9_DPRINTK(P9_DEBUG_VFS, " \n"); 121 P9_DPRINTK(P9_DEBUG_VFS, " \n");
122 122
123 st = NULL;
124 v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL); 123 v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
125 if (!v9ses) 124 if (!v9ses)
126 return -ENOMEM; 125 return -ENOMEM;
@@ -142,7 +141,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
142 retval = PTR_ERR(sb); 141 retval = PTR_ERR(sb);
143 goto free_stat; 142 goto free_stat;
144 } 143 }
145 v9fs_fill_super(sb, v9ses, flags); 144 v9fs_fill_super(sb, v9ses, flags, data);
146 145
147 inode = v9fs_get_inode(sb, S_IFDIR | mode); 146 inode = v9fs_get_inode(sb, S_IFDIR | mode);
148 if (IS_ERR(inode)) { 147 if (IS_ERR(inode)) {
@@ -150,9 +149,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
150 goto release_sb; 149 goto release_sb;
151 } 150 }
152 151
153 inode->i_uid = uid;
154 inode->i_gid = gid;
155
156 root = d_alloc_root(inode); 152 root = d_alloc_root(inode);
157 if (!root) { 153 if (!root) {
158 iput(inode); 154 iput(inode);
@@ -173,10 +169,8 @@ P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
173 simple_set_mnt(mnt, sb); 169 simple_set_mnt(mnt, sb);
174 return 0; 170 return 0;
175 171
176release_sb:
177 deactivate_locked_super(sb);
178
179free_stat: 172free_stat:
173 p9stat_free(st);
180 kfree(st); 174 kfree(st);
181 175
182clunk_fid: 176clunk_fid:
@@ -185,7 +179,12 @@ clunk_fid:
185close_session: 179close_session:
186 v9fs_session_close(v9ses); 180 v9fs_session_close(v9ses);
187 kfree(v9ses); 181 kfree(v9ses);
182 return retval;
188 183
184release_sb:
185 p9stat_free(st);
186 kfree(st);
187 deactivate_locked_super(sb);
189 return retval; 188 return retval;
190} 189}
191 190
@@ -207,24 +206,10 @@ static void v9fs_kill_super(struct super_block *s)
207 206
208 v9fs_session_close(v9ses); 207 v9fs_session_close(v9ses);
209 kfree(v9ses); 208 kfree(v9ses);
209 s->s_fs_info = NULL;
210 P9_DPRINTK(P9_DEBUG_VFS, "exiting kill_super\n"); 210 P9_DPRINTK(P9_DEBUG_VFS, "exiting kill_super\n");
211} 211}
212 212
213/**
214 * v9fs_show_options - Show mount options in /proc/mounts
215 * @m: seq_file to write to
216 * @mnt: mount descriptor
217 *
218 */
219
220static int v9fs_show_options(struct seq_file *m, struct vfsmount *mnt)
221{
222 struct v9fs_session_info *v9ses = mnt->mnt_sb->s_fs_info;
223
224 seq_printf(m, "%s", v9ses->options);
225 return 0;
226}
227
228static void 213static void
229v9fs_umount_begin(struct super_block *sb) 214v9fs_umount_begin(struct super_block *sb)
230{ 215{
@@ -237,7 +222,7 @@ v9fs_umount_begin(struct super_block *sb)
237static const struct super_operations v9fs_super_ops = { 222static const struct super_operations v9fs_super_ops = {
238 .statfs = simple_statfs, 223 .statfs = simple_statfs,
239 .clear_inode = v9fs_clear_inode, 224 .clear_inode = v9fs_clear_inode,
240 .show_options = v9fs_show_options, 225 .show_options = generic_show_options,
241 .umount_begin = v9fs_umount_begin, 226 .umount_begin = v9fs_umount_begin,
242}; 227};
243 228
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 0149dab365e7..681c2a7b013f 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -134,9 +134,16 @@ static int afs_readpage(struct file *file, struct page *page)
134 134
135 inode = page->mapping->host; 135 inode = page->mapping->host;
136 136
137 ASSERT(file != NULL); 137 if (file) {
138 key = file->private_data; 138 key = file->private_data;
139 ASSERT(key != NULL); 139 ASSERT(key != NULL);
140 } else {
141 key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
142 if (IS_ERR(key)) {
143 ret = PTR_ERR(key);
144 goto error_nokey;
145 }
146 }
140 147
141 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); 148 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
142 149
@@ -207,12 +214,17 @@ static int afs_readpage(struct file *file, struct page *page)
207 unlock_page(page); 214 unlock_page(page);
208 } 215 }
209 216
217 if (!file)
218 key_put(key);
210 _leave(" = 0"); 219 _leave(" = 0");
211 return 0; 220 return 0;
212 221
213error: 222error:
214 SetPageError(page); 223 SetPageError(page);
215 unlock_page(page); 224 unlock_page(page);
225 if (!file)
226 key_put(key);
227error_nokey:
216 _leave(" = %d", ret); 228 _leave(" = %d", ret);
217 return ret; 229 return ret;
218} 230}
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index aa39ae83f019..3da18d453488 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -77,7 +77,7 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
77 } 77 }
78 78
79 /* Update the expiry counter if fs is busy */ 79 /* Update the expiry counter if fs is busy */
80 if (!may_umount_tree(mnt)) { 80 if (!may_umount_tree(path.mnt)) {
81 struct autofs_info *ino = autofs4_dentry_ino(top); 81 struct autofs_info *ino = autofs4_dentry_ino(top);
82 ino->last_used = jiffies; 82 ino->last_used = jiffies;
83 goto done; 83 goto done;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 697f6b5f1313..e92f229e3c6e 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
828 if (IS_ERR(bprm.file)) 828 if (IS_ERR(bprm.file))
829 return res; 829 return res;
830 830
831 bprm.cred = prepare_exec_creds();
832 res = -ENOMEM;
833 if (!bprm.cred)
834 goto out;
835
831 res = prepare_binprm(&bprm); 836 res = prepare_binprm(&bprm);
832 837
833 if (res <= (unsigned long)-4096) 838 if (res <= (unsigned long)-4096)
834 res = load_flat_file(&bprm, libs, id, NULL); 839 res = load_flat_file(&bprm, libs, id, NULL);
835 if (bprm.file) { 840
836 allow_write_access(bprm.file); 841 abort_creds(bprm.cred);
837 fput(bprm.file); 842
838 bprm.file = NULL; 843out:
839 } 844 allow_write_access(bprm.file);
845 fput(bprm.file);
846
840 return(res); 847 return(res);
841} 848}
842 849
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3a6d4fb2a329..94dfda24c06e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -564,6 +564,16 @@ struct block_device *bdget(dev_t dev)
564 564
565EXPORT_SYMBOL(bdget); 565EXPORT_SYMBOL(bdget);
566 566
567/**
568 * bdgrab -- Grab a reference to an already referenced block device
569 * @bdev: Block device to grab a reference to.
570 */
571struct block_device *bdgrab(struct block_device *bdev)
572{
573 atomic_inc(&bdev->bd_inode->i_count);
574 return bdev;
575}
576
567long nr_blockdev_pages(void) 577long nr_blockdev_pages(void)
568{ 578{
569 struct block_device *bdev; 579 struct block_device *bdev;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 6e4f6c50a120..019e8af449ab 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work)
424 * list 424 * list
425 */ 425 */
426 if (worker->idle) { 426 if (worker->idle) {
427 spin_lock_irqsave(&worker->workers->lock, flags); 427 spin_lock(&worker->workers->lock);
428 worker->idle = 0; 428 worker->idle = 0;
429 list_move_tail(&worker->worker_list, 429 list_move_tail(&worker->worker_list,
430 &worker->workers->worker_list); 430 &worker->workers->worker_list);
431 spin_unlock_irqrestore(&worker->workers->lock, flags); 431 spin_unlock(&worker->workers->lock);
432 } 432 }
433 if (!worker->working) { 433 if (!worker->working) {
434 wake = 1; 434 wake = 1;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 60a45f3a4e91..3fdcc0512d3a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
557 557
558 btrfs_disk_key_to_cpu(&k1, disk); 558 btrfs_disk_key_to_cpu(&k1, disk);
559 559
560 if (k1.objectid > k2->objectid) 560 return btrfs_comp_cpu_keys(&k1, k2);
561 return 1;
562 if (k1.objectid < k2->objectid)
563 return -1;
564 if (k1.type > k2->type)
565 return 1;
566 if (k1.type < k2->type)
567 return -1;
568 if (k1.offset > k2->offset)
569 return 1;
570 if (k1.offset < k2->offset)
571 return -1;
572 return 0;
573} 561}
574 562
575/* 563/*
@@ -1052,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1052 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1040 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1053 return 0; 1041 return 0;
1054 1042
1055 if (btrfs_header_nritems(mid) > 2)
1056 return 0;
1057
1058 if (btrfs_header_nritems(mid) < 2) 1043 if (btrfs_header_nritems(mid) < 2)
1059 err_on_enospc = 1; 1044 err_on_enospc = 1;
1060 1045
@@ -1701,6 +1686,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1701 struct extent_buffer *b; 1686 struct extent_buffer *b;
1702 int slot; 1687 int slot;
1703 int ret; 1688 int ret;
1689 int err;
1704 int level; 1690 int level;
1705 int lowest_unlock = 1; 1691 int lowest_unlock = 1;
1706 u8 lowest_level = 0; 1692 u8 lowest_level = 0;
@@ -1737,8 +1723,6 @@ again:
1737 p->locks[level] = 1; 1723 p->locks[level] = 1;
1738 1724
1739 if (cow) { 1725 if (cow) {
1740 int wret;
1741
1742 /* 1726 /*
1743 * if we don't really need to cow this block 1727 * if we don't really need to cow this block
1744 * then we don't want to set the path blocking, 1728 * then we don't want to set the path blocking,
@@ -1749,12 +1733,12 @@ again:
1749 1733
1750 btrfs_set_path_blocking(p); 1734 btrfs_set_path_blocking(p);
1751 1735
1752 wret = btrfs_cow_block(trans, root, b, 1736 err = btrfs_cow_block(trans, root, b,
1753 p->nodes[level + 1], 1737 p->nodes[level + 1],
1754 p->slots[level + 1], &b); 1738 p->slots[level + 1], &b);
1755 if (wret) { 1739 if (err) {
1756 free_extent_buffer(b); 1740 free_extent_buffer(b);
1757 ret = wret; 1741 ret = err;
1758 goto done; 1742 goto done;
1759 } 1743 }
1760 } 1744 }
@@ -1793,41 +1777,45 @@ cow_done:
1793 ret = bin_search(b, key, level, &slot); 1777 ret = bin_search(b, key, level, &slot);
1794 1778
1795 if (level != 0) { 1779 if (level != 0) {
1796 if (ret && slot > 0) 1780 int dec = 0;
1781 if (ret && slot > 0) {
1782 dec = 1;
1797 slot -= 1; 1783 slot -= 1;
1784 }
1798 p->slots[level] = slot; 1785 p->slots[level] = slot;
1799 ret = setup_nodes_for_search(trans, root, p, b, level, 1786 err = setup_nodes_for_search(trans, root, p, b, level,
1800 ins_len); 1787 ins_len);
1801 if (ret == -EAGAIN) 1788 if (err == -EAGAIN)
1802 goto again; 1789 goto again;
1803 else if (ret) 1790 if (err) {
1791 ret = err;
1804 goto done; 1792 goto done;
1793 }
1805 b = p->nodes[level]; 1794 b = p->nodes[level];
1806 slot = p->slots[level]; 1795 slot = p->slots[level];
1807 1796
1808 unlock_up(p, level, lowest_unlock); 1797 unlock_up(p, level, lowest_unlock);
1809 1798
1810 /* this is only true while dropping a snapshot */
1811 if (level == lowest_level) { 1799 if (level == lowest_level) {
1812 ret = 0; 1800 if (dec)
1801 p->slots[level]++;
1813 goto done; 1802 goto done;
1814 } 1803 }
1815 1804
1816 ret = read_block_for_search(trans, root, p, 1805 err = read_block_for_search(trans, root, p,
1817 &b, level, slot, key); 1806 &b, level, slot, key);
1818 if (ret == -EAGAIN) 1807 if (err == -EAGAIN)
1819 goto again; 1808 goto again;
1820 1809 if (err) {
1821 if (ret == -EIO) 1810 ret = err;
1822 goto done; 1811 goto done;
1812 }
1823 1813
1824 if (!p->skip_locking) { 1814 if (!p->skip_locking) {
1825 int lret;
1826
1827 btrfs_clear_path_blocking(p, NULL); 1815 btrfs_clear_path_blocking(p, NULL);
1828 lret = btrfs_try_spin_lock(b); 1816 err = btrfs_try_spin_lock(b);
1829 1817
1830 if (!lret) { 1818 if (!err) {
1831 btrfs_set_path_blocking(p); 1819 btrfs_set_path_blocking(p);
1832 btrfs_tree_lock(b); 1820 btrfs_tree_lock(b);
1833 btrfs_clear_path_blocking(p, b); 1821 btrfs_clear_path_blocking(p, b);
@@ -1837,16 +1825,14 @@ cow_done:
1837 p->slots[level] = slot; 1825 p->slots[level] = slot;
1838 if (ins_len > 0 && 1826 if (ins_len > 0 &&
1839 btrfs_leaf_free_space(root, b) < ins_len) { 1827 btrfs_leaf_free_space(root, b) < ins_len) {
1840 int sret;
1841
1842 btrfs_set_path_blocking(p); 1828 btrfs_set_path_blocking(p);
1843 sret = split_leaf(trans, root, key, 1829 err = split_leaf(trans, root, key,
1844 p, ins_len, ret == 0); 1830 p, ins_len, ret == 0);
1845 btrfs_clear_path_blocking(p, NULL); 1831 btrfs_clear_path_blocking(p, NULL);
1846 1832
1847 BUG_ON(sret > 0); 1833 BUG_ON(err > 0);
1848 if (sret) { 1834 if (err) {
1849 ret = sret; 1835 ret = err;
1850 goto done; 1836 goto done;
1851 } 1837 }
1852 } 1838 }
@@ -3807,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3807 } 3793 }
3808 3794
3809 /* delete the leaf if it is mostly empty */ 3795 /* delete the leaf if it is mostly empty */
3810 if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) { 3796 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3811 /* push_leaf_left fixes the path. 3797 /* push_leaf_left fixes the path.
3812 * make sure the path still points to our leaf 3798 * make sure the path still points to our leaf
3813 * for possible call to del_ptr below 3799 * for possible call to del_ptr below
@@ -4042,10 +4028,9 @@ out:
4042 * calling this function. 4028 * calling this function.
4043 */ 4029 */
4044int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4030int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4045 struct btrfs_key *key, int lowest_level, 4031 struct btrfs_key *key, int level,
4046 int cache_only, u64 min_trans) 4032 int cache_only, u64 min_trans)
4047{ 4033{
4048 int level = lowest_level;
4049 int slot; 4034 int slot;
4050 struct extent_buffer *c; 4035 struct extent_buffer *c;
4051 4036
@@ -4058,11 +4043,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4058 c = path->nodes[level]; 4043 c = path->nodes[level];
4059next: 4044next:
4060 if (slot >= btrfs_header_nritems(c)) { 4045 if (slot >= btrfs_header_nritems(c)) {
4061 level++; 4046 int ret;
4062 if (level == BTRFS_MAX_LEVEL) 4047 int orig_lowest;
4048 struct btrfs_key cur_key;
4049 if (level + 1 >= BTRFS_MAX_LEVEL ||
4050 !path->nodes[level + 1])
4063 return 1; 4051 return 1;
4064 continue; 4052
4053 if (path->locks[level + 1]) {
4054 level++;
4055 continue;
4056 }
4057
4058 slot = btrfs_header_nritems(c) - 1;
4059 if (level == 0)
4060 btrfs_item_key_to_cpu(c, &cur_key, slot);
4061 else
4062 btrfs_node_key_to_cpu(c, &cur_key, slot);
4063
4064 orig_lowest = path->lowest_level;
4065 btrfs_release_path(root, path);
4066 path->lowest_level = level;
4067 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4068 0, 0);
4069 path->lowest_level = orig_lowest;
4070 if (ret < 0)
4071 return ret;
4072
4073 c = path->nodes[level];
4074 slot = path->slots[level];
4075 if (ret == 0)
4076 slot++;
4077 goto next;
4065 } 4078 }
4079
4066 if (level == 0) 4080 if (level == 0)
4067 btrfs_item_key_to_cpu(c, key, slot); 4081 btrfs_item_key_to_cpu(c, key, slot);
4068 else { 4082 else {
@@ -4146,7 +4160,8 @@ again:
4146 * advance the path if there are now more items available. 4160 * advance the path if there are now more items available.
4147 */ 4161 */
4148 if (nritems > 0 && path->slots[0] < nritems - 1) { 4162 if (nritems > 0 && path->slots[0] < nritems - 1) {
4149 path->slots[0]++; 4163 if (ret == 0)
4164 path->slots[0]++;
4150 ret = 0; 4165 ret = 0;
4151 goto done; 4166 goto done;
4152 } 4167 }
@@ -4278,10 +4293,10 @@ int btrfs_previous_item(struct btrfs_root *root,
4278 path->slots[0]--; 4293 path->slots[0]--;
4279 4294
4280 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4295 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4281 if (found_key.type == type)
4282 return 0;
4283 if (found_key.objectid < min_objectid) 4296 if (found_key.objectid < min_objectid)
4284 break; 4297 break;
4298 if (found_key.type == type)
4299 return 0;
4285 if (found_key.objectid == min_objectid && 4300 if (found_key.objectid == min_objectid &&
4286 found_key.type < type) 4301 found_key.type < type)
4287 break; 4302 break;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 98a873838717..837435ce84ca 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -481,7 +481,7 @@ struct btrfs_shared_data_ref {
481 481
482struct btrfs_extent_inline_ref { 482struct btrfs_extent_inline_ref {
483 u8 type; 483 u8 type;
484 u64 offset; 484 __le64 offset;
485} __attribute__ ((__packed__)); 485} __attribute__ ((__packed__));
486 486
487/* old style backrefs item */ 487/* old style backrefs item */
@@ -689,6 +689,7 @@ struct btrfs_space_info {
689 struct list_head block_groups; 689 struct list_head block_groups;
690 spinlock_t lock; 690 spinlock_t lock;
691 struct rw_semaphore groups_sem; 691 struct rw_semaphore groups_sem;
692 atomic_t caching_threads;
692}; 693};
693 694
694/* 695/*
@@ -707,6 +708,9 @@ struct btrfs_free_cluster {
707 /* first extent starting offset */ 708 /* first extent starting offset */
708 u64 window_start; 709 u64 window_start;
709 710
711 /* if this cluster simply points at a bitmap in the block group */
712 bool points_to_bitmap;
713
710 struct btrfs_block_group_cache *block_group; 714 struct btrfs_block_group_cache *block_group;
711 /* 715 /*
712 * when a cluster is allocated from a block group, we put the 716 * when a cluster is allocated from a block group, we put the
@@ -716,24 +720,37 @@ struct btrfs_free_cluster {
716 struct list_head block_group_list; 720 struct list_head block_group_list;
717}; 721};
718 722
723enum btrfs_caching_type {
724 BTRFS_CACHE_NO = 0,
725 BTRFS_CACHE_STARTED = 1,
726 BTRFS_CACHE_FINISHED = 2,
727};
728
719struct btrfs_block_group_cache { 729struct btrfs_block_group_cache {
720 struct btrfs_key key; 730 struct btrfs_key key;
721 struct btrfs_block_group_item item; 731 struct btrfs_block_group_item item;
732 struct btrfs_fs_info *fs_info;
722 spinlock_t lock; 733 spinlock_t lock;
723 struct mutex cache_mutex;
724 u64 pinned; 734 u64 pinned;
725 u64 reserved; 735 u64 reserved;
726 u64 flags; 736 u64 flags;
727 int cached; 737 u64 sectorsize;
738 int extents_thresh;
739 int free_extents;
740 int total_bitmaps;
728 int ro; 741 int ro;
729 int dirty; 742 int dirty;
730 743
744 /* cache tracking stuff */
745 wait_queue_head_t caching_q;
746 int cached;
747
731 struct btrfs_space_info *space_info; 748 struct btrfs_space_info *space_info;
732 749
733 /* free space cache stuff */ 750 /* free space cache stuff */
734 spinlock_t tree_lock; 751 spinlock_t tree_lock;
735 struct rb_root free_space_bytes;
736 struct rb_root free_space_offset; 752 struct rb_root free_space_offset;
753 u64 free_space;
737 754
738 /* block group cache stuff */ 755 /* block group cache stuff */
739 struct rb_node cache_node; 756 struct rb_node cache_node;
@@ -808,6 +825,7 @@ struct btrfs_fs_info {
808 struct mutex drop_mutex; 825 struct mutex drop_mutex;
809 struct mutex volume_mutex; 826 struct mutex volume_mutex;
810 struct mutex tree_reloc_mutex; 827 struct mutex tree_reloc_mutex;
828 struct rw_semaphore extent_commit_sem;
811 829
812 /* 830 /*
813 * this protects the ordered operations list only while we are 831 * this protects the ordered operations list only while we are
@@ -1988,6 +2006,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
1988 u64 bytes); 2006 u64 bytes);
1989void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, 2007void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
1990 u64 bytes); 2008 u64 bytes);
2009void btrfs_free_pinned_extents(struct btrfs_fs_info *info);
1991/* ctree.c */ 2010/* ctree.c */
1992int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2011int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1993 int level, int *slot); 2012 int level, int *slot);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d28d29c95f7c..e83be2e4602c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1639,6 +1639,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1639 mutex_init(&fs_info->cleaner_mutex); 1639 mutex_init(&fs_info->cleaner_mutex);
1640 mutex_init(&fs_info->volume_mutex); 1640 mutex_init(&fs_info->volume_mutex);
1641 mutex_init(&fs_info->tree_reloc_mutex); 1641 mutex_init(&fs_info->tree_reloc_mutex);
1642 init_rwsem(&fs_info->extent_commit_sem);
1642 1643
1643 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 1644 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1644 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 1645 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -1799,6 +1800,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1799 btrfs_super_chunk_root(disk_super), 1800 btrfs_super_chunk_root(disk_super),
1800 blocksize, generation); 1801 blocksize, generation);
1801 BUG_ON(!chunk_root->node); 1802 BUG_ON(!chunk_root->node);
1803 if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1804 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1805 sb->s_id);
1806 goto fail_chunk_root;
1807 }
1802 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 1808 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1803 chunk_root->commit_root = btrfs_root_node(chunk_root); 1809 chunk_root->commit_root = btrfs_root_node(chunk_root);
1804 1810
@@ -1826,6 +1832,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1826 blocksize, generation); 1832 blocksize, generation);
1827 if (!tree_root->node) 1833 if (!tree_root->node)
1828 goto fail_chunk_root; 1834 goto fail_chunk_root;
1835 if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1836 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1837 sb->s_id);
1838 goto fail_tree_root;
1839 }
1829 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 1840 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1830 tree_root->commit_root = btrfs_root_node(tree_root); 1841 tree_root->commit_root = btrfs_root_node(tree_root);
1831 1842
@@ -2322,6 +2333,9 @@ int close_ctree(struct btrfs_root *root)
2322 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 2333 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2323 } 2334 }
2324 2335
2336 fs_info->closing = 2;
2337 smp_mb();
2338
2325 if (fs_info->delalloc_bytes) { 2339 if (fs_info->delalloc_bytes) {
2326 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", 2340 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2327 (unsigned long long)fs_info->delalloc_bytes); 2341 (unsigned long long)fs_info->delalloc_bytes);
@@ -2343,6 +2357,7 @@ int close_ctree(struct btrfs_root *root)
2343 free_extent_buffer(root->fs_info->csum_root->commit_root); 2357 free_extent_buffer(root->fs_info->csum_root->commit_root);
2344 2358
2345 btrfs_free_block_groups(root->fs_info); 2359 btrfs_free_block_groups(root->fs_info);
2360 btrfs_free_pinned_extents(root->fs_info);
2346 2361
2347 del_fs_roots(fs_info); 2362 del_fs_roots(fs_info);
2348 2363
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a5aca3997d42..72a2b9c28e9f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -21,6 +21,7 @@
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/sort.h> 22#include <linux/sort.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/kthread.h>
24#include "compat.h" 25#include "compat.h"
25#include "hash.h" 26#include "hash.h"
26#include "ctree.h" 27#include "ctree.h"
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes, 62 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force); 63 u64 flags, int force);
63 64
65static noinline int
66block_group_cache_done(struct btrfs_block_group_cache *cache)
67{
68 smp_mb();
69 return cache->cached == BTRFS_CACHE_FINISHED;
70}
71
64static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 72static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
65{ 73{
66 return (cache->flags & bits) == bits; 74 return (cache->flags & bits) == bits;
@@ -146,20 +154,70 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
146} 154}
147 155
148/* 156/*
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
161 */
162void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
163{
164 u64 start, end, last = 0;
165 int ret;
166
167 while (1) {
168 ret = find_first_extent_bit(&info->pinned_extents, last,
169 &start, &end,
170 EXTENT_LOCKED|EXTENT_DIRTY);
171 if (ret)
172 break;
173
174 clear_extent_bits(&info->pinned_extents, start, end,
175 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
176 last = end+1;
177 }
178}
179
180static int remove_sb_from_cache(struct btrfs_root *root,
181 struct btrfs_block_group_cache *cache)
182{
183 struct btrfs_fs_info *fs_info = root->fs_info;
184 u64 bytenr;
185 u64 *logical;
186 int stripe_len;
187 int i, nr, ret;
188
189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190 bytenr = btrfs_sb_offset(i);
191 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192 cache->key.objectid, bytenr,
193 0, &logical, &nr, &stripe_len);
194 BUG_ON(ret);
195 while (nr--) {
196 try_lock_extent(&fs_info->pinned_extents,
197 logical[nr],
198 logical[nr] + stripe_len - 1, GFP_NOFS);
199 }
200 kfree(logical);
201 }
202
203 return 0;
204}
205
206/*
149 * this is only called by cache_block_group, since we could have freed extents 207 * this is only called by cache_block_group, since we could have freed extents
150 * we need to check the pinned_extents for any extents that can't be used yet 208 * we need to check the pinned_extents for any extents that can't be used yet
151 * since their free space will be released as soon as the transaction commits. 209 * since their free space will be released as soon as the transaction commits.
152 */ 210 */
153static int add_new_free_space(struct btrfs_block_group_cache *block_group, 211static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
154 struct btrfs_fs_info *info, u64 start, u64 end) 212 struct btrfs_fs_info *info, u64 start, u64 end)
155{ 213{
156 u64 extent_start, extent_end, size; 214 u64 extent_start, extent_end, size, total_added = 0;
157 int ret; 215 int ret;
158 216
159 while (start < end) { 217 while (start < end) {
160 ret = find_first_extent_bit(&info->pinned_extents, start, 218 ret = find_first_extent_bit(&info->pinned_extents, start,
161 &extent_start, &extent_end, 219 &extent_start, &extent_end,
162 EXTENT_DIRTY); 220 EXTENT_DIRTY|EXTENT_LOCKED);
163 if (ret) 221 if (ret)
164 break; 222 break;
165 223
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
167 start = extent_end + 1; 225 start = extent_end + 1;
168 } else if (extent_start > start && extent_start < end) { 226 } else if (extent_start > start && extent_start < end) {
169 size = extent_start - start; 227 size = extent_start - start;
228 total_added += size;
170 ret = btrfs_add_free_space(block_group, start, 229 ret = btrfs_add_free_space(block_group, start,
171 size); 230 size);
172 BUG_ON(ret); 231 BUG_ON(ret);
@@ -178,84 +237,93 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
178 237
179 if (start < end) { 238 if (start < end) {
180 size = end - start; 239 size = end - start;
240 total_added += size;
181 ret = btrfs_add_free_space(block_group, start, size); 241 ret = btrfs_add_free_space(block_group, start, size);
182 BUG_ON(ret); 242 BUG_ON(ret);
183 } 243 }
184 244
185 return 0; 245 return total_added;
186}
187
188static int remove_sb_from_cache(struct btrfs_root *root,
189 struct btrfs_block_group_cache *cache)
190{
191 u64 bytenr;
192 u64 *logical;
193 int stripe_len;
194 int i, nr, ret;
195
196 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
197 bytenr = btrfs_sb_offset(i);
198 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
199 cache->key.objectid, bytenr, 0,
200 &logical, &nr, &stripe_len);
201 BUG_ON(ret);
202 while (nr--) {
203 btrfs_remove_free_space(cache, logical[nr],
204 stripe_len);
205 }
206 kfree(logical);
207 }
208 return 0;
209} 246}
210 247
211static int cache_block_group(struct btrfs_root *root, 248static int caching_kthread(void *data)
212 struct btrfs_block_group_cache *block_group)
213{ 249{
250 struct btrfs_block_group_cache *block_group = data;
251 struct btrfs_fs_info *fs_info = block_group->fs_info;
252 u64 last = 0;
214 struct btrfs_path *path; 253 struct btrfs_path *path;
215 int ret = 0; 254 int ret = 0;
216 struct btrfs_key key; 255 struct btrfs_key key;
217 struct extent_buffer *leaf; 256 struct extent_buffer *leaf;
218 int slot; 257 int slot;
219 u64 last; 258 u64 total_found = 0;
220
221 if (!block_group)
222 return 0;
223
224 root = root->fs_info->extent_root;
225 259
226 if (block_group->cached) 260 BUG_ON(!fs_info);
227 return 0;
228 261
229 path = btrfs_alloc_path(); 262 path = btrfs_alloc_path();
230 if (!path) 263 if (!path)
231 return -ENOMEM; 264 return -ENOMEM;
232 265
233 path->reada = 2; 266 atomic_inc(&block_group->space_info->caching_threads);
267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
234 /* 268 /*
235 * we get into deadlocks with paths held by callers of this function. 269 * We don't want to deadlock with somebody trying to allocate a new
236 * since the alloc_mutex is protecting things right now, just 270 * extent for the extent root while also trying to search the extent
237 * skip the locking here 271 * root to add free space. So we skip locking and search the commit
272 * root, since its read-only
238 */ 273 */
239 path->skip_locking = 1; 274 path->skip_locking = 1;
240 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 275 path->search_commit_root = 1;
276 path->reada = 2;
277
241 key.objectid = last; 278 key.objectid = last;
242 key.offset = 0; 279 key.offset = 0;
243 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 280 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
244 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 281again:
282 /* need to make sure the commit_root doesn't disappear */
283 down_read(&fs_info->extent_commit_sem);
284
285 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
245 if (ret < 0) 286 if (ret < 0)
246 goto err; 287 goto err;
247 288
248 while (1) { 289 while (1) {
290 smp_mb();
291 if (block_group->fs_info->closing > 1) {
292 last = (u64)-1;
293 break;
294 }
295
249 leaf = path->nodes[0]; 296 leaf = path->nodes[0];
250 slot = path->slots[0]; 297 slot = path->slots[0];
251 if (slot >= btrfs_header_nritems(leaf)) { 298 if (slot >= btrfs_header_nritems(leaf)) {
252 ret = btrfs_next_leaf(root, path); 299 ret = btrfs_next_leaf(fs_info->extent_root, path);
253 if (ret < 0) 300 if (ret < 0)
254 goto err; 301 goto err;
255 if (ret == 0) 302 else if (ret)
256 continue;
257 else
258 break; 303 break;
304
305 if (need_resched() ||
306 btrfs_transaction_in_commit(fs_info)) {
307 leaf = path->nodes[0];
308
309 /* this shouldn't happen, but if the
310 * leaf is empty just move on.
311 */
312 if (btrfs_header_nritems(leaf) == 0)
313 break;
314 /*
315 * we need to copy the key out so that
316 * we are sure the next search advances
317 * us forward in the btree.
318 */
319 btrfs_item_key_to_cpu(leaf, &key, 0);
320 btrfs_release_path(fs_info->extent_root, path);
321 up_read(&fs_info->extent_commit_sem);
322 schedule_timeout(1);
323 goto again;
324 }
325
326 continue;
259 } 327 }
260 btrfs_item_key_to_cpu(leaf, &key, slot); 328 btrfs_item_key_to_cpu(leaf, &key, slot);
261 if (key.objectid < block_group->key.objectid) 329 if (key.objectid < block_group->key.objectid)
@@ -266,24 +334,59 @@ static int cache_block_group(struct btrfs_root *root,
266 break; 334 break;
267 335
268 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { 336 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
269 add_new_free_space(block_group, root->fs_info, last, 337 total_found += add_new_free_space(block_group,
270 key.objectid); 338 fs_info, last,
271 339 key.objectid);
272 last = key.objectid + key.offset; 340 last = key.objectid + key.offset;
273 } 341 }
342
343 if (total_found > (1024 * 1024 * 2)) {
344 total_found = 0;
345 wake_up(&block_group->caching_q);
346 }
274next: 347next:
275 path->slots[0]++; 348 path->slots[0]++;
276 } 349 }
350 ret = 0;
277 351
278 add_new_free_space(block_group, root->fs_info, last, 352 total_found += add_new_free_space(block_group, fs_info, last,
279 block_group->key.objectid + 353 block_group->key.objectid +
280 block_group->key.offset); 354 block_group->key.offset);
355
356 spin_lock(&block_group->lock);
357 block_group->cached = BTRFS_CACHE_FINISHED;
358 spin_unlock(&block_group->lock);
281 359
282 block_group->cached = 1;
283 remove_sb_from_cache(root, block_group);
284 ret = 0;
285err: 360err:
286 btrfs_free_path(path); 361 btrfs_free_path(path);
362 up_read(&fs_info->extent_commit_sem);
363 atomic_dec(&block_group->space_info->caching_threads);
364 wake_up(&block_group->caching_q);
365
366 return 0;
367}
368
369static int cache_block_group(struct btrfs_block_group_cache *cache)
370{
371 struct task_struct *tsk;
372 int ret = 0;
373
374 spin_lock(&cache->lock);
375 if (cache->cached != BTRFS_CACHE_NO) {
376 spin_unlock(&cache->lock);
377 return ret;
378 }
379 cache->cached = BTRFS_CACHE_STARTED;
380 spin_unlock(&cache->lock);
381
382 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
383 cache->key.objectid);
384 if (IS_ERR(tsk)) {
385 ret = PTR_ERR(tsk);
386 printk(KERN_ERR "error running thread %d\n", ret);
387 BUG();
388 }
389
287 return ret; 390 return ret;
288} 391}
289 392
@@ -2387,13 +2490,29 @@ fail:
2387 2490
2388} 2491}
2389 2492
2493static struct btrfs_block_group_cache *
2494next_block_group(struct btrfs_root *root,
2495 struct btrfs_block_group_cache *cache)
2496{
2497 struct rb_node *node;
2498 spin_lock(&root->fs_info->block_group_cache_lock);
2499 node = rb_next(&cache->cache_node);
2500 btrfs_put_block_group(cache);
2501 if (node) {
2502 cache = rb_entry(node, struct btrfs_block_group_cache,
2503 cache_node);
2504 atomic_inc(&cache->count);
2505 } else
2506 cache = NULL;
2507 spin_unlock(&root->fs_info->block_group_cache_lock);
2508 return cache;
2509}
2510
2390int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2511int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2391 struct btrfs_root *root) 2512 struct btrfs_root *root)
2392{ 2513{
2393 struct btrfs_block_group_cache *cache, *entry; 2514 struct btrfs_block_group_cache *cache;
2394 struct rb_node *n;
2395 int err = 0; 2515 int err = 0;
2396 int werr = 0;
2397 struct btrfs_path *path; 2516 struct btrfs_path *path;
2398 u64 last = 0; 2517 u64 last = 0;
2399 2518
@@ -2402,39 +2521,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2402 return -ENOMEM; 2521 return -ENOMEM;
2403 2522
2404 while (1) { 2523 while (1) {
2405 cache = NULL; 2524 if (last == 0) {
2406 spin_lock(&root->fs_info->block_group_cache_lock); 2525 err = btrfs_run_delayed_refs(trans, root,
2407 for (n = rb_first(&root->fs_info->block_group_cache_tree); 2526 (unsigned long)-1);
2408 n; n = rb_next(n)) { 2527 BUG_ON(err);
2409 entry = rb_entry(n, struct btrfs_block_group_cache,
2410 cache_node);
2411 if (entry->dirty) {
2412 cache = entry;
2413 break;
2414 }
2415 } 2528 }
2416 spin_unlock(&root->fs_info->block_group_cache_lock);
2417 2529
2418 if (!cache) 2530 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2419 break; 2531 while (cache) {
2532 if (cache->dirty)
2533 break;
2534 cache = next_block_group(root, cache);
2535 }
2536 if (!cache) {
2537 if (last == 0)
2538 break;
2539 last = 0;
2540 continue;
2541 }
2420 2542
2421 cache->dirty = 0; 2543 cache->dirty = 0;
2422 last += cache->key.offset; 2544 last = cache->key.objectid + cache->key.offset;
2423 2545
2424 err = write_one_cache_group(trans, root, 2546 err = write_one_cache_group(trans, root, path, cache);
2425 path, cache); 2547 BUG_ON(err);
2426 /* 2548 btrfs_put_block_group(cache);
2427 * if we fail to write the cache group, we want
2428 * to keep it marked dirty in hopes that a later
2429 * write will work
2430 */
2431 if (err) {
2432 werr = err;
2433 continue;
2434 }
2435 } 2549 }
2550
2436 btrfs_free_path(path); 2551 btrfs_free_path(path);
2437 return werr; 2552 return 0;
2438} 2553}
2439 2554
2440int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 2555int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -2484,6 +2599,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2484 found->force_alloc = 0; 2599 found->force_alloc = 0;
2485 *space_info = found; 2600 *space_info = found;
2486 list_add_rcu(&found->list, &info->space_info); 2601 list_add_rcu(&found->list, &info->space_info);
2602 atomic_set(&found->caching_threads, 0);
2487 return 0; 2603 return 0;
2488} 2604}
2489 2605
@@ -2947,13 +3063,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2947 struct btrfs_block_group_cache *cache; 3063 struct btrfs_block_group_cache *cache;
2948 struct btrfs_fs_info *fs_info = root->fs_info; 3064 struct btrfs_fs_info *fs_info = root->fs_info;
2949 3065
2950 if (pin) { 3066 if (pin)
2951 set_extent_dirty(&fs_info->pinned_extents, 3067 set_extent_dirty(&fs_info->pinned_extents,
2952 bytenr, bytenr + num - 1, GFP_NOFS); 3068 bytenr, bytenr + num - 1, GFP_NOFS);
2953 } else {
2954 clear_extent_dirty(&fs_info->pinned_extents,
2955 bytenr, bytenr + num - 1, GFP_NOFS);
2956 }
2957 3069
2958 while (num > 0) { 3070 while (num > 0) {
2959 cache = btrfs_lookup_block_group(fs_info, bytenr); 3071 cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2969,14 +3081,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2969 spin_unlock(&cache->space_info->lock); 3081 spin_unlock(&cache->space_info->lock);
2970 fs_info->total_pinned += len; 3082 fs_info->total_pinned += len;
2971 } else { 3083 } else {
3084 int unpin = 0;
3085
3086 /*
3087 * in order to not race with the block group caching, we
3088 * only want to unpin the extent if we are cached. If
3089 * we aren't cached, we want to start async caching this
3090 * block group so we can free the extent the next time
3091 * around.
3092 */
2972 spin_lock(&cache->space_info->lock); 3093 spin_lock(&cache->space_info->lock);
2973 spin_lock(&cache->lock); 3094 spin_lock(&cache->lock);
2974 cache->pinned -= len; 3095 unpin = (cache->cached == BTRFS_CACHE_FINISHED);
2975 cache->space_info->bytes_pinned -= len; 3096 if (likely(unpin)) {
3097 cache->pinned -= len;
3098 cache->space_info->bytes_pinned -= len;
3099 fs_info->total_pinned -= len;
3100 }
2976 spin_unlock(&cache->lock); 3101 spin_unlock(&cache->lock);
2977 spin_unlock(&cache->space_info->lock); 3102 spin_unlock(&cache->space_info->lock);
2978 fs_info->total_pinned -= len; 3103
2979 if (cache->cached) 3104 if (likely(unpin))
3105 clear_extent_dirty(&fs_info->pinned_extents,
3106 bytenr, bytenr + len -1,
3107 GFP_NOFS);
3108 else
3109 cache_block_group(cache);
3110
3111 if (unpin)
2980 btrfs_add_free_space(cache, bytenr, len); 3112 btrfs_add_free_space(cache, bytenr, len);
2981 } 3113 }
2982 btrfs_put_block_group(cache); 3114 btrfs_put_block_group(cache);
@@ -3030,6 +3162,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3030 &start, &end, EXTENT_DIRTY); 3162 &start, &end, EXTENT_DIRTY);
3031 if (ret) 3163 if (ret)
3032 break; 3164 break;
3165
3033 set_extent_dirty(copy, start, end, GFP_NOFS); 3166 set_extent_dirty(copy, start, end, GFP_NOFS);
3034 last = end + 1; 3167 last = end + 1;
3035 } 3168 }
@@ -3058,6 +3191,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3058 3191
3059 cond_resched(); 3192 cond_resched();
3060 } 3193 }
3194
3061 return ret; 3195 return ret;
3062} 3196}
3063 3197
@@ -3436,6 +3570,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val)
3436} 3570}
3437 3571
3438/* 3572/*
3573 * when we wait for progress in the block group caching, its because
3574 * our allocation attempt failed at least once. So, we must sleep
3575 * and let some progress happen before we try again.
3576 *
3577 * This function will sleep at least once waiting for new free space to
3578 * show up, and then it will check the block group free space numbers
3579 * for our min num_bytes. Another option is to have it go ahead
3580 * and look in the rbtree for a free extent of a given size, but this
3581 * is a good start.
3582 */
3583static noinline int
3584wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3585 u64 num_bytes)
3586{
3587 DEFINE_WAIT(wait);
3588
3589 prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
3590
3591 if (block_group_cache_done(cache)) {
3592 finish_wait(&cache->caching_q, &wait);
3593 return 0;
3594 }
3595 schedule();
3596 finish_wait(&cache->caching_q, &wait);
3597
3598 wait_event(cache->caching_q, block_group_cache_done(cache) ||
3599 (cache->free_space >= num_bytes));
3600 return 0;
3601}
3602
3603enum btrfs_loop_type {
3604 LOOP_CACHED_ONLY = 0,
3605 LOOP_CACHING_NOWAIT = 1,
3606 LOOP_CACHING_WAIT = 2,
3607 LOOP_ALLOC_CHUNK = 3,
3608 LOOP_NO_EMPTY_SIZE = 4,
3609};
3610
3611/*
3439 * walks the btree of allocated extents and find a hole of a given size. 3612 * walks the btree of allocated extents and find a hole of a given size.
3440 * The key ins is changed to record the hole: 3613 * The key ins is changed to record the hole:
3441 * ins->objectid == block start 3614 * ins->objectid == block start
@@ -3460,6 +3633,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3460 struct btrfs_space_info *space_info; 3633 struct btrfs_space_info *space_info;
3461 int last_ptr_loop = 0; 3634 int last_ptr_loop = 0;
3462 int loop = 0; 3635 int loop = 0;
3636 bool found_uncached_bg = false;
3463 3637
3464 WARN_ON(num_bytes < root->sectorsize); 3638 WARN_ON(num_bytes < root->sectorsize);
3465 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 3639 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3491,15 +3665,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3491 search_start = max(search_start, first_logical_byte(root, 0)); 3665 search_start = max(search_start, first_logical_byte(root, 0));
3492 search_start = max(search_start, hint_byte); 3666 search_start = max(search_start, hint_byte);
3493 3667
3494 if (!last_ptr) { 3668 if (!last_ptr)
3495 empty_cluster = 0; 3669 empty_cluster = 0;
3496 loop = 1;
3497 }
3498 3670
3499 if (search_start == hint_byte) { 3671 if (search_start == hint_byte) {
3500 block_group = btrfs_lookup_block_group(root->fs_info, 3672 block_group = btrfs_lookup_block_group(root->fs_info,
3501 search_start); 3673 search_start);
3502 if (block_group && block_group_bits(block_group, data)) { 3674 /*
3675 * we don't want to use the block group if it doesn't match our
3676 * allocation bits, or if its not cached.
3677 */
3678 if (block_group && block_group_bits(block_group, data) &&
3679 block_group_cache_done(block_group)) {
3503 down_read(&space_info->groups_sem); 3680 down_read(&space_info->groups_sem);
3504 if (list_empty(&block_group->list) || 3681 if (list_empty(&block_group->list) ||
3505 block_group->ro) { 3682 block_group->ro) {
@@ -3522,21 +3699,35 @@ search:
3522 down_read(&space_info->groups_sem); 3699 down_read(&space_info->groups_sem);
3523 list_for_each_entry(block_group, &space_info->block_groups, list) { 3700 list_for_each_entry(block_group, &space_info->block_groups, list) {
3524 u64 offset; 3701 u64 offset;
3702 int cached;
3525 3703
3526 atomic_inc(&block_group->count); 3704 atomic_inc(&block_group->count);
3527 search_start = block_group->key.objectid; 3705 search_start = block_group->key.objectid;
3528 3706
3529have_block_group: 3707have_block_group:
3530 if (unlikely(!block_group->cached)) { 3708 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3531 mutex_lock(&block_group->cache_mutex); 3709 /*
3532 ret = cache_block_group(root, block_group); 3710 * we want to start caching kthreads, but not too many
3533 mutex_unlock(&block_group->cache_mutex); 3711 * right off the bat so we don't overwhelm the system,
3534 if (ret) { 3712 * so only start them if there are less than 2 and we're
3535 btrfs_put_block_group(block_group); 3713 * in the initial allocation phase.
3536 break; 3714 */
3715 if (loop > LOOP_CACHING_NOWAIT ||
3716 atomic_read(&space_info->caching_threads) < 2) {
3717 ret = cache_block_group(block_group);
3718 BUG_ON(ret);
3537 } 3719 }
3538 } 3720 }
3539 3721
3722 cached = block_group_cache_done(block_group);
3723 if (unlikely(!cached)) {
3724 found_uncached_bg = true;
3725
3726 /* if we only want cached bgs, loop */
3727 if (loop == LOOP_CACHED_ONLY)
3728 goto loop;
3729 }
3730
3540 if (unlikely(block_group->ro)) 3731 if (unlikely(block_group->ro))
3541 goto loop; 3732 goto loop;
3542 3733
@@ -3615,14 +3806,21 @@ refill_cluster:
3615 spin_unlock(&last_ptr->refill_lock); 3806 spin_unlock(&last_ptr->refill_lock);
3616 goto checks; 3807 goto checks;
3617 } 3808 }
3809 } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
3810 spin_unlock(&last_ptr->refill_lock);
3811
3812 wait_block_group_cache_progress(block_group,
3813 num_bytes + empty_cluster + empty_size);
3814 goto have_block_group;
3618 } 3815 }
3816
3619 /* 3817 /*
3620 * at this point we either didn't find a cluster 3818 * at this point we either didn't find a cluster
3621 * or we weren't able to allocate a block from our 3819 * or we weren't able to allocate a block from our
3622 * cluster. Free the cluster we've been trying 3820 * cluster. Free the cluster we've been trying
3623 * to use, and go to the next block group 3821 * to use, and go to the next block group
3624 */ 3822 */
3625 if (loop < 2) { 3823 if (loop < LOOP_NO_EMPTY_SIZE) {
3626 btrfs_return_cluster_to_free_space(NULL, 3824 btrfs_return_cluster_to_free_space(NULL,
3627 last_ptr); 3825 last_ptr);
3628 spin_unlock(&last_ptr->refill_lock); 3826 spin_unlock(&last_ptr->refill_lock);
@@ -3633,11 +3831,17 @@ refill_cluster:
3633 3831
3634 offset = btrfs_find_space_for_alloc(block_group, search_start, 3832 offset = btrfs_find_space_for_alloc(block_group, search_start,
3635 num_bytes, empty_size); 3833 num_bytes, empty_size);
3636 if (!offset) 3834 if (!offset && (cached || (!cached &&
3835 loop == LOOP_CACHING_NOWAIT))) {
3637 goto loop; 3836 goto loop;
3837 } else if (!offset && (!cached &&
3838 loop > LOOP_CACHING_NOWAIT)) {
3839 wait_block_group_cache_progress(block_group,
3840 num_bytes + empty_size);
3841 goto have_block_group;
3842 }
3638checks: 3843checks:
3639 search_start = stripe_align(root, offset); 3844 search_start = stripe_align(root, offset);
3640
3641 /* move on to the next group */ 3845 /* move on to the next group */
3642 if (search_start + num_bytes >= search_end) { 3846 if (search_start + num_bytes >= search_end) {
3643 btrfs_add_free_space(block_group, offset, num_bytes); 3847 btrfs_add_free_space(block_group, offset, num_bytes);
@@ -3683,13 +3887,26 @@ loop:
3683 } 3887 }
3684 up_read(&space_info->groups_sem); 3888 up_read(&space_info->groups_sem);
3685 3889
3686 /* loop == 0, try to find a clustered alloc in every block group 3890 /* LOOP_CACHED_ONLY, only search fully cached block groups
3687 * loop == 1, try again after forcing a chunk allocation 3891 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3688 * loop == 2, set empty_size and empty_cluster to 0 and try again 3892 * dont wait foR them to finish caching
3893 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3894 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3895 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3896 * again
3689 */ 3897 */
3690 if (!ins->objectid && loop < 3 && 3898 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
3691 (empty_size || empty_cluster || allowed_chunk_alloc)) { 3899 (found_uncached_bg || empty_size || empty_cluster ||
3692 if (loop >= 2) { 3900 allowed_chunk_alloc)) {
3901 if (found_uncached_bg) {
3902 found_uncached_bg = false;
3903 if (loop < LOOP_CACHING_WAIT) {
3904 loop++;
3905 goto search;
3906 }
3907 }
3908
3909 if (loop == LOOP_ALLOC_CHUNK) {
3693 empty_size = 0; 3910 empty_size = 0;
3694 empty_cluster = 0; 3911 empty_cluster = 0;
3695 } 3912 }
@@ -3702,7 +3919,7 @@ loop:
3702 space_info->force_alloc = 1; 3919 space_info->force_alloc = 1;
3703 } 3920 }
3704 3921
3705 if (loop < 3) { 3922 if (loop < LOOP_NO_EMPTY_SIZE) {
3706 loop++; 3923 loop++;
3707 goto search; 3924 goto search;
3708 } 3925 }
@@ -3798,7 +4015,7 @@ again:
3798 num_bytes, data, 1); 4015 num_bytes, data, 1);
3799 goto again; 4016 goto again;
3800 } 4017 }
3801 if (ret) { 4018 if (ret == -ENOSPC) {
3802 struct btrfs_space_info *sinfo; 4019 struct btrfs_space_info *sinfo;
3803 4020
3804 sinfo = __find_space_info(root->fs_info, data); 4021 sinfo = __find_space_info(root->fs_info, data);
@@ -3806,7 +4023,6 @@ again:
3806 "wanted %llu\n", (unsigned long long)data, 4023 "wanted %llu\n", (unsigned long long)data,
3807 (unsigned long long)num_bytes); 4024 (unsigned long long)num_bytes);
3808 dump_space_info(sinfo, num_bytes); 4025 dump_space_info(sinfo, num_bytes);
3809 BUG();
3810 } 4026 }
3811 4027
3812 return ret; 4028 return ret;
@@ -3844,7 +4060,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3844 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, 4060 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3845 empty_size, hint_byte, search_end, ins, 4061 empty_size, hint_byte, search_end, ins,
3846 data); 4062 data);
3847 update_reserved_extents(root, ins->objectid, ins->offset, 1); 4063 if (!ret)
4064 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4065
3848 return ret; 4066 return ret;
3849} 4067}
3850 4068
@@ -4006,9 +4224,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4006 struct btrfs_block_group_cache *block_group; 4224 struct btrfs_block_group_cache *block_group;
4007 4225
4008 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 4226 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4009 mutex_lock(&block_group->cache_mutex); 4227 cache_block_group(block_group);
4010 cache_block_group(root, block_group); 4228 wait_event(block_group->caching_q,
4011 mutex_unlock(&block_group->cache_mutex); 4229 block_group_cache_done(block_group));
4012 4230
4013 ret = btrfs_remove_free_space(block_group, ins->objectid, 4231 ret = btrfs_remove_free_space(block_group, ins->objectid,
4014 ins->offset); 4232 ins->offset);
@@ -4039,7 +4257,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
4039 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, 4257 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4040 empty_size, hint_byte, search_end, 4258 empty_size, hint_byte, search_end,
4041 ins, 0); 4259 ins, 0);
4042 BUG_ON(ret); 4260 if (ret)
4261 return ret;
4043 4262
4044 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4263 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4045 if (parent == 0) 4264 if (parent == 0)
@@ -6955,11 +7174,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
6955 &info->block_group_cache_tree); 7174 &info->block_group_cache_tree);
6956 spin_unlock(&info->block_group_cache_lock); 7175 spin_unlock(&info->block_group_cache_lock);
6957 7176
6958 btrfs_remove_free_space_cache(block_group);
6959 down_write(&block_group->space_info->groups_sem); 7177 down_write(&block_group->space_info->groups_sem);
6960 list_del(&block_group->list); 7178 list_del(&block_group->list);
6961 up_write(&block_group->space_info->groups_sem); 7179 up_write(&block_group->space_info->groups_sem);
6962 7180
7181 if (block_group->cached == BTRFS_CACHE_STARTED)
7182 wait_event(block_group->caching_q,
7183 block_group_cache_done(block_group));
7184
7185 btrfs_remove_free_space_cache(block_group);
7186
6963 WARN_ON(atomic_read(&block_group->count) != 1); 7187 WARN_ON(atomic_read(&block_group->count) != 1);
6964 kfree(block_group); 7188 kfree(block_group);
6965 7189
@@ -7025,9 +7249,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7025 atomic_set(&cache->count, 1); 7249 atomic_set(&cache->count, 1);
7026 spin_lock_init(&cache->lock); 7250 spin_lock_init(&cache->lock);
7027 spin_lock_init(&cache->tree_lock); 7251 spin_lock_init(&cache->tree_lock);
7028 mutex_init(&cache->cache_mutex); 7252 cache->fs_info = info;
7253 init_waitqueue_head(&cache->caching_q);
7029 INIT_LIST_HEAD(&cache->list); 7254 INIT_LIST_HEAD(&cache->list);
7030 INIT_LIST_HEAD(&cache->cluster_list); 7255 INIT_LIST_HEAD(&cache->cluster_list);
7256
7257 /*
7258 * we only want to have 32k of ram per block group for keeping
7259 * track of free space, and if we pass 1/2 of that we want to
7260 * start converting things over to using bitmaps
7261 */
7262 cache->extents_thresh = ((1024 * 32) / 2) /
7263 sizeof(struct btrfs_free_space);
7264
7031 read_extent_buffer(leaf, &cache->item, 7265 read_extent_buffer(leaf, &cache->item,
7032 btrfs_item_ptr_offset(leaf, path->slots[0]), 7266 btrfs_item_ptr_offset(leaf, path->slots[0]),
7033 sizeof(cache->item)); 7267 sizeof(cache->item));
@@ -7036,6 +7270,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7036 key.objectid = found_key.objectid + found_key.offset; 7270 key.objectid = found_key.objectid + found_key.offset;
7037 btrfs_release_path(root, path); 7271 btrfs_release_path(root, path);
7038 cache->flags = btrfs_block_group_flags(&cache->item); 7272 cache->flags = btrfs_block_group_flags(&cache->item);
7273 cache->sectorsize = root->sectorsize;
7274
7275 remove_sb_from_cache(root, cache);
7276
7277 /*
7278 * check for two cases, either we are full, and therefore
7279 * don't need to bother with the caching work since we won't
7280 * find any space, or we are empty, and we can just add all
7281 * the space in and be done with it. This saves us _alot_ of
7282 * time, particularly in the full case.
7283 */
7284 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7285 cache->cached = BTRFS_CACHE_FINISHED;
7286 } else if (btrfs_block_group_used(&cache->item) == 0) {
7287 cache->cached = BTRFS_CACHE_FINISHED;
7288 add_new_free_space(cache, root->fs_info,
7289 found_key.objectid,
7290 found_key.objectid +
7291 found_key.offset);
7292 }
7039 7293
7040 ret = update_space_info(info, cache->flags, found_key.offset, 7294 ret = update_space_info(info, cache->flags, found_key.offset,
7041 btrfs_block_group_used(&cache->item), 7295 btrfs_block_group_used(&cache->item),
@@ -7079,10 +7333,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7079 cache->key.objectid = chunk_offset; 7333 cache->key.objectid = chunk_offset;
7080 cache->key.offset = size; 7334 cache->key.offset = size;
7081 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 7335 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7336 cache->sectorsize = root->sectorsize;
7337
7338 /*
7339 * we only want to have 32k of ram per block group for keeping track
7340 * of free space, and if we pass 1/2 of that we want to start
7341 * converting things over to using bitmaps
7342 */
7343 cache->extents_thresh = ((1024 * 32) / 2) /
7344 sizeof(struct btrfs_free_space);
7082 atomic_set(&cache->count, 1); 7345 atomic_set(&cache->count, 1);
7083 spin_lock_init(&cache->lock); 7346 spin_lock_init(&cache->lock);
7084 spin_lock_init(&cache->tree_lock); 7347 spin_lock_init(&cache->tree_lock);
7085 mutex_init(&cache->cache_mutex); 7348 init_waitqueue_head(&cache->caching_q);
7086 INIT_LIST_HEAD(&cache->list); 7349 INIT_LIST_HEAD(&cache->list);
7087 INIT_LIST_HEAD(&cache->cluster_list); 7350 INIT_LIST_HEAD(&cache->cluster_list);
7088 7351
@@ -7091,6 +7354,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7091 cache->flags = type; 7354 cache->flags = type;
7092 btrfs_set_block_group_flags(&cache->item, type); 7355 btrfs_set_block_group_flags(&cache->item, type);
7093 7356
7357 cache->cached = BTRFS_CACHE_FINISHED;
7358 remove_sb_from_cache(root, cache);
7359
7360 add_new_free_space(cache, root->fs_info, chunk_offset,
7361 chunk_offset + size);
7362
7094 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 7363 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7095 &cache->space_info); 7364 &cache->space_info);
7096 BUG_ON(ret); 7365 BUG_ON(ret);
@@ -7149,7 +7418,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7149 rb_erase(&block_group->cache_node, 7418 rb_erase(&block_group->cache_node,
7150 &root->fs_info->block_group_cache_tree); 7419 &root->fs_info->block_group_cache_tree);
7151 spin_unlock(&root->fs_info->block_group_cache_lock); 7420 spin_unlock(&root->fs_info->block_group_cache_lock);
7152 btrfs_remove_free_space_cache(block_group); 7421
7153 down_write(&block_group->space_info->groups_sem); 7422 down_write(&block_group->space_info->groups_sem);
7154 /* 7423 /*
7155 * we must use list_del_init so people can check to see if they 7424 * we must use list_del_init so people can check to see if they
@@ -7158,11 +7427,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7158 list_del_init(&block_group->list); 7427 list_del_init(&block_group->list);
7159 up_write(&block_group->space_info->groups_sem); 7428 up_write(&block_group->space_info->groups_sem);
7160 7429
7430 if (block_group->cached == BTRFS_CACHE_STARTED)
7431 wait_event(block_group->caching_q,
7432 block_group_cache_done(block_group));
7433
7434 btrfs_remove_free_space_cache(block_group);
7435
7161 spin_lock(&block_group->space_info->lock); 7436 spin_lock(&block_group->space_info->lock);
7162 block_group->space_info->total_bytes -= block_group->key.offset; 7437 block_group->space_info->total_bytes -= block_group->key.offset;
7163 block_group->space_info->bytes_readonly -= block_group->key.offset; 7438 block_group->space_info->bytes_readonly -= block_group->key.offset;
7164 spin_unlock(&block_group->space_info->lock); 7439 spin_unlock(&block_group->space_info->lock);
7165 block_group->space_info->full = 0; 7440
7441 btrfs_clear_space_info_full(root->fs_info);
7166 7442
7167 btrfs_put_block_group(block_group); 7443 btrfs_put_block_group(block_group);
7168 btrfs_put_block_group(block_group); 7444 btrfs_put_block_group(block_group);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4538e48581a5..5edcee3a617f 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -16,45 +16,46 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/pagemap.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/math64.h>
20#include "ctree.h" 22#include "ctree.h"
21#include "free-space-cache.h" 23#include "free-space-cache.h"
22#include "transaction.h" 24#include "transaction.h"
23 25
24struct btrfs_free_space { 26#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
25 struct rb_node bytes_index; 27#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
26 struct rb_node offset_index;
27 u64 offset;
28 u64 bytes;
29};
30 28
31static int tree_insert_offset(struct rb_root *root, u64 offset, 29static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
32 struct rb_node *node) 30 u64 offset)
33{ 31{
34 struct rb_node **p = &root->rb_node; 32 BUG_ON(offset < bitmap_start);
35 struct rb_node *parent = NULL; 33 offset -= bitmap_start;
36 struct btrfs_free_space *info; 34 return (unsigned long)(div64_u64(offset, sectorsize));
35}
37 36
38 while (*p) { 37static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
39 parent = *p; 38{
40 info = rb_entry(parent, struct btrfs_free_space, offset_index); 39 return (unsigned long)(div64_u64(bytes, sectorsize));
40}
41 41
42 if (offset < info->offset) 42static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
43 p = &(*p)->rb_left; 43 u64 offset)
44 else if (offset > info->offset) 44{
45 p = &(*p)->rb_right; 45 u64 bitmap_start;
46 else 46 u64 bytes_per_bitmap;
47 return -EEXIST;
48 }
49 47
50 rb_link_node(node, parent, p); 48 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
51 rb_insert_color(node, root); 49 bitmap_start = offset - block_group->key.objectid;
50 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
51 bitmap_start *= bytes_per_bitmap;
52 bitmap_start += block_group->key.objectid;
52 53
53 return 0; 54 return bitmap_start;
54} 55}
55 56
56static int tree_insert_bytes(struct rb_root *root, u64 bytes, 57static int tree_insert_offset(struct rb_root *root, u64 offset,
57 struct rb_node *node) 58 struct rb_node *node, int bitmap)
58{ 59{
59 struct rb_node **p = &root->rb_node; 60 struct rb_node **p = &root->rb_node;
60 struct rb_node *parent = NULL; 61 struct rb_node *parent = NULL;
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
62 63
63 while (*p) { 64 while (*p) {
64 parent = *p; 65 parent = *p;
65 info = rb_entry(parent, struct btrfs_free_space, bytes_index); 66 info = rb_entry(parent, struct btrfs_free_space, offset_index);
66 67
67 if (bytes < info->bytes) 68 if (offset < info->offset) {
68 p = &(*p)->rb_left; 69 p = &(*p)->rb_left;
69 else 70 } else if (offset > info->offset) {
70 p = &(*p)->rb_right; 71 p = &(*p)->rb_right;
72 } else {
73 /*
74 * we could have a bitmap entry and an extent entry
75 * share the same offset. If this is the case, we want
76 * the extent entry to always be found first if we do a
77 * linear search through the tree, since we want to have
78 * the quickest allocation time, and allocating from an
79 * extent is faster than allocating from a bitmap. So
80 * if we're inserting a bitmap and we find an entry at
81 * this offset, we want to go right, or after this entry
82 * logically. If we are inserting an extent and we've
83 * found a bitmap, we want to go left, or before
84 * logically.
85 */
86 if (bitmap) {
87 WARN_ON(info->bitmap);
88 p = &(*p)->rb_right;
89 } else {
90 WARN_ON(!info->bitmap);
91 p = &(*p)->rb_left;
92 }
93 }
71 } 94 }
72 95
73 rb_link_node(node, parent, p); 96 rb_link_node(node, parent, p);
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
79/* 102/*
80 * searches the tree for the given offset. 103 * searches the tree for the given offset.
81 * 104 *
82 * fuzzy == 1: this is used for allocations where we are given a hint of where 105 * fuzzy - If this is set, then we are trying to make an allocation, and we just
83 * to look for free space. Because the hint may not be completely on an offset 106 * want a section that has at least bytes size and comes at or after the given
84 * mark, or the hint may no longer point to free space we need to fudge our 107 * offset.
85 * results a bit. So we look for free space starting at or after offset with at
86 * least bytes size. We prefer to find as close to the given offset as we can.
87 * Also if the offset is within a free space range, then we will return the free
88 * space that contains the given offset, which means we can return a free space
89 * chunk with an offset before the provided offset.
90 *
91 * fuzzy == 0: this is just a normal tree search. Give us the free space that
92 * starts at the given offset which is at least bytes size, and if its not there
93 * return NULL.
94 */ 108 */
95static struct btrfs_free_space *tree_search_offset(struct rb_root *root, 109static struct btrfs_free_space *
96 u64 offset, u64 bytes, 110tree_search_offset(struct btrfs_block_group_cache *block_group,
97 int fuzzy) 111 u64 offset, int bitmap_only, int fuzzy)
98{ 112{
99 struct rb_node *n = root->rb_node; 113 struct rb_node *n = block_group->free_space_offset.rb_node;
100 struct btrfs_free_space *entry, *ret = NULL; 114 struct btrfs_free_space *entry, *prev = NULL;
115
116 /* find entry that is closest to the 'offset' */
117 while (1) {
118 if (!n) {
119 entry = NULL;
120 break;
121 }
101 122
102 while (n) {
103 entry = rb_entry(n, struct btrfs_free_space, offset_index); 123 entry = rb_entry(n, struct btrfs_free_space, offset_index);
124 prev = entry;
104 125
105 if (offset < entry->offset) { 126 if (offset < entry->offset)
106 if (fuzzy &&
107 (!ret || entry->offset < ret->offset) &&
108 (bytes <= entry->bytes))
109 ret = entry;
110 n = n->rb_left; 127 n = n->rb_left;
111 } else if (offset > entry->offset) { 128 else if (offset > entry->offset)
112 if (fuzzy &&
113 (entry->offset + entry->bytes - 1) >= offset &&
114 bytes <= entry->bytes) {
115 ret = entry;
116 break;
117 }
118 n = n->rb_right; 129 n = n->rb_right;
119 } else { 130 else
120 if (bytes > entry->bytes) {
121 n = n->rb_right;
122 continue;
123 }
124 ret = entry;
125 break; 131 break;
126 }
127 } 132 }
128 133
129 return ret; 134 if (bitmap_only) {
130} 135 if (!entry)
136 return NULL;
137 if (entry->bitmap)
138 return entry;
131 139
132/* 140 /*
133 * return a chunk at least bytes size, as close to offset that we can get. 141 * bitmap entry and extent entry may share same offset,
134 */ 142 * in that case, bitmap entry comes after extent entry.
135static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, 143 */
136 u64 offset, u64 bytes) 144 n = rb_next(n);
137{ 145 if (!n)
138 struct rb_node *n = root->rb_node; 146 return NULL;
139 struct btrfs_free_space *entry, *ret = NULL; 147 entry = rb_entry(n, struct btrfs_free_space, offset_index);
140 148 if (entry->offset != offset)
141 while (n) { 149 return NULL;
142 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
143 150
144 if (bytes < entry->bytes) { 151 WARN_ON(!entry->bitmap);
152 return entry;
153 } else if (entry) {
154 if (entry->bitmap) {
145 /* 155 /*
146 * We prefer to get a hole size as close to the size we 156 * if previous extent entry covers the offset,
147 * are asking for so we don't take small slivers out of 157 * we should return it instead of the bitmap entry
148 * huge holes, but we also want to get as close to the
149 * offset as possible so we don't have a whole lot of
150 * fragmentation.
151 */ 158 */
152 if (offset <= entry->offset) { 159 n = &entry->offset_index;
153 if (!ret) 160 while (1) {
154 ret = entry; 161 n = rb_prev(n);
155 else if (entry->bytes < ret->bytes) 162 if (!n)
156 ret = entry; 163 break;
157 else if (entry->offset < ret->offset) 164 prev = rb_entry(n, struct btrfs_free_space,
158 ret = entry; 165 offset_index);
166 if (!prev->bitmap) {
167 if (prev->offset + prev->bytes > offset)
168 entry = prev;
169 break;
170 }
159 } 171 }
160 n = n->rb_left; 172 }
161 } else if (bytes > entry->bytes) { 173 return entry;
162 n = n->rb_right; 174 }
175
176 if (!prev)
177 return NULL;
178
179 /* find last entry before the 'offset' */
180 entry = prev;
181 if (entry->offset > offset) {
182 n = rb_prev(&entry->offset_index);
183 if (n) {
184 entry = rb_entry(n, struct btrfs_free_space,
185 offset_index);
186 BUG_ON(entry->offset > offset);
163 } else { 187 } else {
164 /* 188 if (fuzzy)
165 * Ok we may have multiple chunks of the wanted size, 189 return entry;
166 * so we don't want to take the first one we find, we 190 else
167 * want to take the one closest to our given offset, so 191 return NULL;
168 * keep searching just in case theres a better match.
169 */
170 n = n->rb_right;
171 if (offset > entry->offset)
172 continue;
173 else if (!ret || entry->offset < ret->offset)
174 ret = entry;
175 } 192 }
176 } 193 }
177 194
178 return ret; 195 if (entry->bitmap) {
196 n = &entry->offset_index;
197 while (1) {
198 n = rb_prev(n);
199 if (!n)
200 break;
201 prev = rb_entry(n, struct btrfs_free_space,
202 offset_index);
203 if (!prev->bitmap) {
204 if (prev->offset + prev->bytes > offset)
205 return prev;
206 break;
207 }
208 }
209 if (entry->offset + BITS_PER_BITMAP *
210 block_group->sectorsize > offset)
211 return entry;
212 } else if (entry->offset + entry->bytes > offset)
213 return entry;
214
215 if (!fuzzy)
216 return NULL;
217
218 while (1) {
219 if (entry->bitmap) {
220 if (entry->offset + BITS_PER_BITMAP *
221 block_group->sectorsize > offset)
222 break;
223 } else {
224 if (entry->offset + entry->bytes > offset)
225 break;
226 }
227
228 n = rb_next(&entry->offset_index);
229 if (!n)
230 return NULL;
231 entry = rb_entry(n, struct btrfs_free_space, offset_index);
232 }
233 return entry;
179} 234}
180 235
181static void unlink_free_space(struct btrfs_block_group_cache *block_group, 236static void unlink_free_space(struct btrfs_block_group_cache *block_group,
182 struct btrfs_free_space *info) 237 struct btrfs_free_space *info)
183{ 238{
184 rb_erase(&info->offset_index, &block_group->free_space_offset); 239 rb_erase(&info->offset_index, &block_group->free_space_offset);
185 rb_erase(&info->bytes_index, &block_group->free_space_bytes); 240 block_group->free_extents--;
241 block_group->free_space -= info->bytes;
186} 242}
187 243
188static int link_free_space(struct btrfs_block_group_cache *block_group, 244static int link_free_space(struct btrfs_block_group_cache *block_group,
@@ -190,17 +246,353 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
190{ 246{
191 int ret = 0; 247 int ret = 0;
192 248
193 249 BUG_ON(!info->bitmap && !info->bytes);
194 BUG_ON(!info->bytes);
195 ret = tree_insert_offset(&block_group->free_space_offset, info->offset, 250 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
196 &info->offset_index); 251 &info->offset_index, (info->bitmap != NULL));
197 if (ret) 252 if (ret)
198 return ret; 253 return ret;
199 254
200 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, 255 block_group->free_space += info->bytes;
201 &info->bytes_index); 256 block_group->free_extents++;
202 if (ret) 257 return ret;
203 return ret; 258}
259
260static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
261{
262 u64 max_bytes, possible_bytes;
263
264 /*
265 * The goal is to keep the total amount of memory used per 1gb of space
266 * at or below 32k, so we need to adjust how much memory we allow to be
267 * used by extent based free space tracking
268 */
269 max_bytes = MAX_CACHE_BYTES_PER_GIG *
270 (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
271
272 possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
273 (sizeof(struct btrfs_free_space) *
274 block_group->extents_thresh);
275
276 if (possible_bytes > max_bytes) {
277 int extent_bytes = max_bytes -
278 (block_group->total_bitmaps * PAGE_CACHE_SIZE);
279
280 if (extent_bytes <= 0) {
281 block_group->extents_thresh = 0;
282 return;
283 }
284
285 block_group->extents_thresh = extent_bytes /
286 (sizeof(struct btrfs_free_space));
287 }
288}
289
290static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
291 struct btrfs_free_space *info, u64 offset,
292 u64 bytes)
293{
294 unsigned long start, end;
295 unsigned long i;
296
297 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
298 end = start + bytes_to_bits(bytes, block_group->sectorsize);
299 BUG_ON(end > BITS_PER_BITMAP);
300
301 for (i = start; i < end; i++)
302 clear_bit(i, info->bitmap);
303
304 info->bytes -= bytes;
305 block_group->free_space -= bytes;
306}
307
308static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
309 struct btrfs_free_space *info, u64 offset,
310 u64 bytes)
311{
312 unsigned long start, end;
313 unsigned long i;
314
315 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
316 end = start + bytes_to_bits(bytes, block_group->sectorsize);
317 BUG_ON(end > BITS_PER_BITMAP);
318
319 for (i = start; i < end; i++)
320 set_bit(i, info->bitmap);
321
322 info->bytes += bytes;
323 block_group->free_space += bytes;
324}
325
326static int search_bitmap(struct btrfs_block_group_cache *block_group,
327 struct btrfs_free_space *bitmap_info, u64 *offset,
328 u64 *bytes)
329{
330 unsigned long found_bits = 0;
331 unsigned long bits, i;
332 unsigned long next_zero;
333
334 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
335 max_t(u64, *offset, bitmap_info->offset));
336 bits = bytes_to_bits(*bytes, block_group->sectorsize);
337
338 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
339 i < BITS_PER_BITMAP;
340 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
341 next_zero = find_next_zero_bit(bitmap_info->bitmap,
342 BITS_PER_BITMAP, i);
343 if ((next_zero - i) >= bits) {
344 found_bits = next_zero - i;
345 break;
346 }
347 i = next_zero;
348 }
349
350 if (found_bits) {
351 *offset = (u64)(i * block_group->sectorsize) +
352 bitmap_info->offset;
353 *bytes = (u64)(found_bits) * block_group->sectorsize;
354 return 0;
355 }
356
357 return -1;
358}
359
360static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
361 *block_group, u64 *offset,
362 u64 *bytes, int debug)
363{
364 struct btrfs_free_space *entry;
365 struct rb_node *node;
366 int ret;
367
368 if (!block_group->free_space_offset.rb_node)
369 return NULL;
370
371 entry = tree_search_offset(block_group,
372 offset_to_bitmap(block_group, *offset),
373 0, 1);
374 if (!entry)
375 return NULL;
376
377 for (node = &entry->offset_index; node; node = rb_next(node)) {
378 entry = rb_entry(node, struct btrfs_free_space, offset_index);
379 if (entry->bytes < *bytes)
380 continue;
381
382 if (entry->bitmap) {
383 ret = search_bitmap(block_group, entry, offset, bytes);
384 if (!ret)
385 return entry;
386 continue;
387 }
388
389 *offset = entry->offset;
390 *bytes = entry->bytes;
391 return entry;
392 }
393
394 return NULL;
395}
396
397static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
398 struct btrfs_free_space *info, u64 offset)
399{
400 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
401 int max_bitmaps = (int)div64_u64(block_group->key.offset +
402 bytes_per_bg - 1, bytes_per_bg);
403 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
404
405 info->offset = offset_to_bitmap(block_group, offset);
406 link_free_space(block_group, info);
407 block_group->total_bitmaps++;
408
409 recalculate_thresholds(block_group);
410}
411
412static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
413 struct btrfs_free_space *bitmap_info,
414 u64 *offset, u64 *bytes)
415{
416 u64 end;
417 u64 search_start, search_bytes;
418 int ret;
419
420again:
421 end = bitmap_info->offset +
422 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
423
424 /*
425 * XXX - this can go away after a few releases.
426 *
427 * since the only user of btrfs_remove_free_space is the tree logging
428 * stuff, and the only way to test that is under crash conditions, we
429 * want to have this debug stuff here just in case somethings not
430 * working. Search the bitmap for the space we are trying to use to
431 * make sure its actually there. If its not there then we need to stop
432 * because something has gone wrong.
433 */
434 search_start = *offset;
435 search_bytes = *bytes;
436 ret = search_bitmap(block_group, bitmap_info, &search_start,
437 &search_bytes);
438 BUG_ON(ret < 0 || search_start != *offset);
439
440 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
441 bitmap_clear_bits(block_group, bitmap_info, *offset,
442 end - *offset + 1);
443 *bytes -= end - *offset + 1;
444 *offset = end + 1;
445 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
446 bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
447 *bytes = 0;
448 }
449
450 if (*bytes) {
451 struct rb_node *next = rb_next(&bitmap_info->offset_index);
452 if (!bitmap_info->bytes) {
453 unlink_free_space(block_group, bitmap_info);
454 kfree(bitmap_info->bitmap);
455 kfree(bitmap_info);
456 block_group->total_bitmaps--;
457 recalculate_thresholds(block_group);
458 }
459
460 /*
461 * no entry after this bitmap, but we still have bytes to
462 * remove, so something has gone wrong.
463 */
464 if (!next)
465 return -EINVAL;
466
467 bitmap_info = rb_entry(next, struct btrfs_free_space,
468 offset_index);
469
470 /*
471 * if the next entry isn't a bitmap we need to return to let the
472 * extent stuff do its work.
473 */
474 if (!bitmap_info->bitmap)
475 return -EAGAIN;
476
477 /*
478 * Ok the next item is a bitmap, but it may not actually hold
479 * the information for the rest of this free space stuff, so
480 * look for it, and if we don't find it return so we can try
481 * everything over again.
482 */
483 search_start = *offset;
484 search_bytes = *bytes;
485 ret = search_bitmap(block_group, bitmap_info, &search_start,
486 &search_bytes);
487 if (ret < 0 || search_start != *offset)
488 return -EAGAIN;
489
490 goto again;
491 } else if (!bitmap_info->bytes) {
492 unlink_free_space(block_group, bitmap_info);
493 kfree(bitmap_info->bitmap);
494 kfree(bitmap_info);
495 block_group->total_bitmaps--;
496 recalculate_thresholds(block_group);
497 }
498
499 return 0;
500}
501
502static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
503 struct btrfs_free_space *info)
504{
505 struct btrfs_free_space *bitmap_info;
506 int added = 0;
507 u64 bytes, offset, end;
508 int ret;
509
510 /*
511 * If we are below the extents threshold then we can add this as an
512 * extent, and don't have to deal with the bitmap
513 */
514 if (block_group->free_extents < block_group->extents_thresh &&
515 info->bytes > block_group->sectorsize * 4)
516 return 0;
517
518 /*
519 * some block groups are so tiny they can't be enveloped by a bitmap, so
520 * don't even bother to create a bitmap for this
521 */
522 if (BITS_PER_BITMAP * block_group->sectorsize >
523 block_group->key.offset)
524 return 0;
525
526 bytes = info->bytes;
527 offset = info->offset;
528
529again:
530 bitmap_info = tree_search_offset(block_group,
531 offset_to_bitmap(block_group, offset),
532 1, 0);
533 if (!bitmap_info) {
534 BUG_ON(added);
535 goto new_bitmap;
536 }
537
538 end = bitmap_info->offset +
539 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
540
541 if (offset >= bitmap_info->offset && offset + bytes > end) {
542 bitmap_set_bits(block_group, bitmap_info, offset,
543 end - offset);
544 bytes -= end - offset;
545 offset = end;
546 added = 0;
547 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
548 bitmap_set_bits(block_group, bitmap_info, offset, bytes);
549 bytes = 0;
550 } else {
551 BUG();
552 }
553
554 if (!bytes) {
555 ret = 1;
556 goto out;
557 } else
558 goto again;
559
560new_bitmap:
561 if (info && info->bitmap) {
562 add_new_bitmap(block_group, info, offset);
563 added = 1;
564 info = NULL;
565 goto again;
566 } else {
567 spin_unlock(&block_group->tree_lock);
568
569 /* no pre-allocated info, allocate a new one */
570 if (!info) {
571 info = kzalloc(sizeof(struct btrfs_free_space),
572 GFP_NOFS);
573 if (!info) {
574 spin_lock(&block_group->tree_lock);
575 ret = -ENOMEM;
576 goto out;
577 }
578 }
579
580 /* allocate the bitmap */
581 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
582 spin_lock(&block_group->tree_lock);
583 if (!info->bitmap) {
584 ret = -ENOMEM;
585 goto out;
586 }
587 goto again;
588 }
589
590out:
591 if (info) {
592 if (info->bitmap)
593 kfree(info->bitmap);
594 kfree(info);
595 }
204 596
205 return ret; 597 return ret;
206} 598}
@@ -208,8 +600,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
208int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 600int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
209 u64 offset, u64 bytes) 601 u64 offset, u64 bytes)
210{ 602{
211 struct btrfs_free_space *right_info; 603 struct btrfs_free_space *right_info = NULL;
212 struct btrfs_free_space *left_info; 604 struct btrfs_free_space *left_info = NULL;
213 struct btrfs_free_space *info = NULL; 605 struct btrfs_free_space *info = NULL;
214 int ret = 0; 606 int ret = 0;
215 607
@@ -227,18 +619,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
227 * are adding, if there is remove that struct and add a new one to 619 * are adding, if there is remove that struct and add a new one to
228 * cover the entire range 620 * cover the entire range
229 */ 621 */
230 right_info = tree_search_offset(&block_group->free_space_offset, 622 right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
231 offset+bytes, 0, 0); 623 if (right_info && rb_prev(&right_info->offset_index))
232 left_info = tree_search_offset(&block_group->free_space_offset, 624 left_info = rb_entry(rb_prev(&right_info->offset_index),
233 offset-1, 0, 1); 625 struct btrfs_free_space, offset_index);
626 else
627 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
628
629 /*
630 * If there was no extent directly to the left or right of this new
631 * extent then we know we're going to have to allocate a new extent, so
632 * before we do that see if we need to drop this into a bitmap
633 */
634 if ((!left_info || left_info->bitmap) &&
635 (!right_info || right_info->bitmap)) {
636 ret = insert_into_bitmap(block_group, info);
637
638 if (ret < 0) {
639 goto out;
640 } else if (ret) {
641 ret = 0;
642 goto out;
643 }
644 }
234 645
235 if (right_info) { 646 if (right_info && !right_info->bitmap) {
236 unlink_free_space(block_group, right_info); 647 unlink_free_space(block_group, right_info);
237 info->bytes += right_info->bytes; 648 info->bytes += right_info->bytes;
238 kfree(right_info); 649 kfree(right_info);
239 } 650 }
240 651
241 if (left_info && left_info->offset + left_info->bytes == offset) { 652 if (left_info && !left_info->bitmap &&
653 left_info->offset + left_info->bytes == offset) {
242 unlink_free_space(block_group, left_info); 654 unlink_free_space(block_group, left_info);
243 info->offset = left_info->offset; 655 info->offset = left_info->offset;
244 info->bytes += left_info->bytes; 656 info->bytes += left_info->bytes;
@@ -248,11 +660,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
248 ret = link_free_space(block_group, info); 660 ret = link_free_space(block_group, info);
249 if (ret) 661 if (ret)
250 kfree(info); 662 kfree(info);
251 663out:
252 spin_unlock(&block_group->tree_lock); 664 spin_unlock(&block_group->tree_lock);
253 665
254 if (ret) { 666 if (ret) {
255 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); 667 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
256 BUG_ON(ret == -EEXIST); 668 BUG_ON(ret == -EEXIST);
257 } 669 }
258 670
@@ -263,40 +675,74 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
263 u64 offset, u64 bytes) 675 u64 offset, u64 bytes)
264{ 676{
265 struct btrfs_free_space *info; 677 struct btrfs_free_space *info;
678 struct btrfs_free_space *next_info = NULL;
266 int ret = 0; 679 int ret = 0;
267 680
268 spin_lock(&block_group->tree_lock); 681 spin_lock(&block_group->tree_lock);
269 682
270 info = tree_search_offset(&block_group->free_space_offset, offset, 0, 683again:
271 1); 684 info = tree_search_offset(block_group, offset, 0, 0);
272 if (info && info->offset == offset) { 685 if (!info) {
273 if (info->bytes < bytes) { 686 /*
274 printk(KERN_ERR "Found free space at %llu, size %llu," 687 * oops didn't find an extent that matched the space we wanted
275 "trying to use %llu\n", 688 * to remove, look for a bitmap instead
276 (unsigned long long)info->offset, 689 */
277 (unsigned long long)info->bytes, 690 info = tree_search_offset(block_group,
278 (unsigned long long)bytes); 691 offset_to_bitmap(block_group, offset),
692 1, 0);
693 if (!info) {
694 WARN_ON(1);
695 goto out_lock;
696 }
697 }
698
699 if (info->bytes < bytes && rb_next(&info->offset_index)) {
700 u64 end;
701 next_info = rb_entry(rb_next(&info->offset_index),
702 struct btrfs_free_space,
703 offset_index);
704
705 if (next_info->bitmap)
706 end = next_info->offset + BITS_PER_BITMAP *
707 block_group->sectorsize - 1;
708 else
709 end = next_info->offset + next_info->bytes;
710
711 if (next_info->bytes < bytes ||
712 next_info->offset > offset || offset > end) {
713 printk(KERN_CRIT "Found free space at %llu, size %llu,"
714 " trying to use %llu\n",
715 (unsigned long long)info->offset,
716 (unsigned long long)info->bytes,
717 (unsigned long long)bytes);
279 WARN_ON(1); 718 WARN_ON(1);
280 ret = -EINVAL; 719 ret = -EINVAL;
281 spin_unlock(&block_group->tree_lock); 720 goto out_lock;
282 goto out;
283 } 721 }
284 unlink_free_space(block_group, info);
285 722
286 if (info->bytes == bytes) { 723 info = next_info;
287 kfree(info); 724 }
288 spin_unlock(&block_group->tree_lock); 725
289 goto out; 726 if (info->bytes == bytes) {
727 unlink_free_space(block_group, info);
728 if (info->bitmap) {
729 kfree(info->bitmap);
730 block_group->total_bitmaps--;
290 } 731 }
732 kfree(info);
733 goto out_lock;
734 }
291 735
736 if (!info->bitmap && info->offset == offset) {
737 unlink_free_space(block_group, info);
292 info->offset += bytes; 738 info->offset += bytes;
293 info->bytes -= bytes; 739 info->bytes -= bytes;
740 link_free_space(block_group, info);
741 goto out_lock;
742 }
294 743
295 ret = link_free_space(block_group, info); 744 if (!info->bitmap && info->offset <= offset &&
296 spin_unlock(&block_group->tree_lock); 745 info->offset + info->bytes >= offset + bytes) {
297 BUG_ON(ret);
298 } else if (info && info->offset < offset &&
299 info->offset + info->bytes >= offset + bytes) {
300 u64 old_start = info->offset; 746 u64 old_start = info->offset;
301 /* 747 /*
302 * we're freeing space in the middle of the info, 748 * we're freeing space in the middle of the info,
@@ -312,7 +758,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
312 info->offset = offset + bytes; 758 info->offset = offset + bytes;
313 info->bytes = old_end - info->offset; 759 info->bytes = old_end - info->offset;
314 ret = link_free_space(block_group, info); 760 ret = link_free_space(block_group, info);
315 BUG_ON(ret); 761 WARN_ON(ret);
762 if (ret)
763 goto out_lock;
316 } else { 764 } else {
317 /* the hole we're creating ends at the end 765 /* the hole we're creating ends at the end
318 * of the info struct, just free the info 766 * of the info struct, just free the info
@@ -320,32 +768,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
320 kfree(info); 768 kfree(info);
321 } 769 }
322 spin_unlock(&block_group->tree_lock); 770 spin_unlock(&block_group->tree_lock);
323 /* step two, insert a new info struct to cover anything 771
324 * before the hole 772 /* step two, insert a new info struct to cover
773 * anything before the hole
325 */ 774 */
326 ret = btrfs_add_free_space(block_group, old_start, 775 ret = btrfs_add_free_space(block_group, old_start,
327 offset - old_start); 776 offset - old_start);
328 BUG_ON(ret); 777 WARN_ON(ret);
329 } else { 778 goto out;
330 spin_unlock(&block_group->tree_lock);
331 if (!info) {
332 printk(KERN_ERR "couldn't find space %llu to free\n",
333 (unsigned long long)offset);
334 printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
335 block_group->cached,
336 (unsigned long long)block_group->key.objectid,
337 (unsigned long long)block_group->key.offset);
338 btrfs_dump_free_space(block_group, bytes);
339 } else if (info) {
340 printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
341 "but wanted offset=%llu bytes=%llu\n",
342 (unsigned long long)info->offset,
343 (unsigned long long)info->bytes,
344 (unsigned long long)offset,
345 (unsigned long long)bytes);
346 }
347 WARN_ON(1);
348 } 779 }
780
781 ret = remove_from_bitmap(block_group, info, &offset, &bytes);
782 if (ret == -EAGAIN)
783 goto again;
784 BUG_ON(ret);
785out_lock:
786 spin_unlock(&block_group->tree_lock);
349out: 787out:
350 return ret; 788 return ret;
351} 789}
@@ -361,10 +799,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
361 info = rb_entry(n, struct btrfs_free_space, offset_index); 799 info = rb_entry(n, struct btrfs_free_space, offset_index);
362 if (info->bytes >= bytes) 800 if (info->bytes >= bytes)
363 count++; 801 count++;
364 printk(KERN_ERR "entry offset %llu, bytes %llu\n", 802 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
365 (unsigned long long)info->offset, 803 (unsigned long long)info->offset,
366 (unsigned long long)info->bytes); 804 (unsigned long long)info->bytes,
805 (info->bitmap) ? "yes" : "no");
367 } 806 }
807 printk(KERN_INFO "block group has cluster?: %s\n",
808 list_empty(&block_group->cluster_list) ? "no" : "yes");
368 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" 809 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
369 "\n", count); 810 "\n", count);
370} 811}
@@ -397,26 +838,35 @@ __btrfs_return_cluster_to_free_space(
397{ 838{
398 struct btrfs_free_space *entry; 839 struct btrfs_free_space *entry;
399 struct rb_node *node; 840 struct rb_node *node;
841 bool bitmap;
400 842
401 spin_lock(&cluster->lock); 843 spin_lock(&cluster->lock);
402 if (cluster->block_group != block_group) 844 if (cluster->block_group != block_group)
403 goto out; 845 goto out;
404 846
847 bitmap = cluster->points_to_bitmap;
848 cluster->block_group = NULL;
405 cluster->window_start = 0; 849 cluster->window_start = 0;
850 list_del_init(&cluster->block_group_list);
851 cluster->points_to_bitmap = false;
852
853 if (bitmap)
854 goto out;
855
406 node = rb_first(&cluster->root); 856 node = rb_first(&cluster->root);
407 while(node) { 857 while (node) {
408 entry = rb_entry(node, struct btrfs_free_space, offset_index); 858 entry = rb_entry(node, struct btrfs_free_space, offset_index);
409 node = rb_next(&entry->offset_index); 859 node = rb_next(&entry->offset_index);
410 rb_erase(&entry->offset_index, &cluster->root); 860 rb_erase(&entry->offset_index, &cluster->root);
411 link_free_space(block_group, entry); 861 BUG_ON(entry->bitmap);
862 tree_insert_offset(&block_group->free_space_offset,
863 entry->offset, &entry->offset_index, 0);
412 } 864 }
413 list_del_init(&cluster->block_group_list);
414
415 btrfs_put_block_group(cluster->block_group);
416 cluster->block_group = NULL;
417 cluster->root.rb_node = NULL; 865 cluster->root.rb_node = NULL;
866
418out: 867out:
419 spin_unlock(&cluster->lock); 868 spin_unlock(&cluster->lock);
869 btrfs_put_block_group(block_group);
420 return 0; 870 return 0;
421} 871}
422 872
@@ -425,20 +875,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
425 struct btrfs_free_space *info; 875 struct btrfs_free_space *info;
426 struct rb_node *node; 876 struct rb_node *node;
427 struct btrfs_free_cluster *cluster; 877 struct btrfs_free_cluster *cluster;
428 struct btrfs_free_cluster *safe; 878 struct list_head *head;
429 879
430 spin_lock(&block_group->tree_lock); 880 spin_lock(&block_group->tree_lock);
431 881 while ((head = block_group->cluster_list.next) !=
432 list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, 882 &block_group->cluster_list) {
433 block_group_list) { 883 cluster = list_entry(head, struct btrfs_free_cluster,
884 block_group_list);
434 885
435 WARN_ON(cluster->block_group != block_group); 886 WARN_ON(cluster->block_group != block_group);
436 __btrfs_return_cluster_to_free_space(block_group, cluster); 887 __btrfs_return_cluster_to_free_space(block_group, cluster);
888 if (need_resched()) {
889 spin_unlock(&block_group->tree_lock);
890 cond_resched();
891 spin_lock(&block_group->tree_lock);
892 }
437 } 893 }
438 894
439 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { 895 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
440 info = rb_entry(node, struct btrfs_free_space, bytes_index); 896 info = rb_entry(node, struct btrfs_free_space, offset_index);
441 unlink_free_space(block_group, info); 897 unlink_free_space(block_group, info);
898 if (info->bitmap)
899 kfree(info->bitmap);
442 kfree(info); 900 kfree(info);
443 if (need_resched()) { 901 if (need_resched()) {
444 spin_unlock(&block_group->tree_lock); 902 spin_unlock(&block_group->tree_lock);
@@ -446,6 +904,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
446 spin_lock(&block_group->tree_lock); 904 spin_lock(&block_group->tree_lock);
447 } 905 }
448 } 906 }
907
449 spin_unlock(&block_group->tree_lock); 908 spin_unlock(&block_group->tree_lock);
450} 909}
451 910
@@ -453,25 +912,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
453 u64 offset, u64 bytes, u64 empty_size) 912 u64 offset, u64 bytes, u64 empty_size)
454{ 913{
455 struct btrfs_free_space *entry = NULL; 914 struct btrfs_free_space *entry = NULL;
915 u64 bytes_search = bytes + empty_size;
456 u64 ret = 0; 916 u64 ret = 0;
457 917
458 spin_lock(&block_group->tree_lock); 918 spin_lock(&block_group->tree_lock);
459 entry = tree_search_offset(&block_group->free_space_offset, offset, 919 entry = find_free_space(block_group, &offset, &bytes_search, 0);
460 bytes + empty_size, 1);
461 if (!entry) 920 if (!entry)
462 entry = tree_search_bytes(&block_group->free_space_bytes, 921 goto out;
463 offset, bytes + empty_size); 922
464 if (entry) { 923 ret = offset;
924 if (entry->bitmap) {
925 bitmap_clear_bits(block_group, entry, offset, bytes);
926 if (!entry->bytes) {
927 unlink_free_space(block_group, entry);
928 kfree(entry->bitmap);
929 kfree(entry);
930 block_group->total_bitmaps--;
931 recalculate_thresholds(block_group);
932 }
933 } else {
465 unlink_free_space(block_group, entry); 934 unlink_free_space(block_group, entry);
466 ret = entry->offset;
467 entry->offset += bytes; 935 entry->offset += bytes;
468 entry->bytes -= bytes; 936 entry->bytes -= bytes;
469
470 if (!entry->bytes) 937 if (!entry->bytes)
471 kfree(entry); 938 kfree(entry);
472 else 939 else
473 link_free_space(block_group, entry); 940 link_free_space(block_group, entry);
474 } 941 }
942
943out:
475 spin_unlock(&block_group->tree_lock); 944 spin_unlock(&block_group->tree_lock);
476 945
477 return ret; 946 return ret;
@@ -517,6 +986,54 @@ int btrfs_return_cluster_to_free_space(
517 return ret; 986 return ret;
518} 987}
519 988
989static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
990 struct btrfs_free_cluster *cluster,
991 u64 bytes, u64 min_start)
992{
993 struct btrfs_free_space *entry;
994 int err;
995 u64 search_start = cluster->window_start;
996 u64 search_bytes = bytes;
997 u64 ret = 0;
998
999 spin_lock(&block_group->tree_lock);
1000 spin_lock(&cluster->lock);
1001
1002 if (!cluster->points_to_bitmap)
1003 goto out;
1004
1005 if (cluster->block_group != block_group)
1006 goto out;
1007
1008 /*
1009 * search_start is the beginning of the bitmap, but at some point it may
1010 * be a good idea to point to the actual start of the free area in the
1011 * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
1012 * to 1 to make sure we get the bitmap entry
1013 */
1014 entry = tree_search_offset(block_group,
1015 offset_to_bitmap(block_group, search_start),
1016 1, 0);
1017 if (!entry || !entry->bitmap)
1018 goto out;
1019
1020 search_start = min_start;
1021 search_bytes = bytes;
1022
1023 err = search_bitmap(block_group, entry, &search_start,
1024 &search_bytes);
1025 if (err)
1026 goto out;
1027
1028 ret = search_start;
1029 bitmap_clear_bits(block_group, entry, ret, bytes);
1030out:
1031 spin_unlock(&cluster->lock);
1032 spin_unlock(&block_group->tree_lock);
1033
1034 return ret;
1035}
1036
520/* 1037/*
521 * given a cluster, try to allocate 'bytes' from it, returns 0 1038 * given a cluster, try to allocate 'bytes' from it, returns 0
522 * if it couldn't find anything suitably large, or a logical disk offset 1039 * if it couldn't find anything suitably large, or a logical disk offset
@@ -530,6 +1047,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
530 struct rb_node *node; 1047 struct rb_node *node;
531 u64 ret = 0; 1048 u64 ret = 0;
532 1049
1050 if (cluster->points_to_bitmap)
1051 return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
1052 min_start);
1053
533 spin_lock(&cluster->lock); 1054 spin_lock(&cluster->lock);
534 if (bytes > cluster->max_size) 1055 if (bytes > cluster->max_size)
535 goto out; 1056 goto out;
@@ -567,9 +1088,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
567 } 1088 }
568out: 1089out:
569 spin_unlock(&cluster->lock); 1090 spin_unlock(&cluster->lock);
1091
570 return ret; 1092 return ret;
571} 1093}
572 1094
1095static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1096 struct btrfs_free_space *entry,
1097 struct btrfs_free_cluster *cluster,
1098 u64 offset, u64 bytes, u64 min_bytes)
1099{
1100 unsigned long next_zero;
1101 unsigned long i;
1102 unsigned long search_bits;
1103 unsigned long total_bits;
1104 unsigned long found_bits;
1105 unsigned long start = 0;
1106 unsigned long total_found = 0;
1107 bool found = false;
1108
1109 i = offset_to_bit(entry->offset, block_group->sectorsize,
1110 max_t(u64, offset, entry->offset));
1111 search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
1112 total_bits = bytes_to_bits(bytes, block_group->sectorsize);
1113
1114again:
1115 found_bits = 0;
1116 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
1117 i < BITS_PER_BITMAP;
1118 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
1119 next_zero = find_next_zero_bit(entry->bitmap,
1120 BITS_PER_BITMAP, i);
1121 if (next_zero - i >= search_bits) {
1122 found_bits = next_zero - i;
1123 break;
1124 }
1125 i = next_zero;
1126 }
1127
1128 if (!found_bits)
1129 return -1;
1130
1131 if (!found) {
1132 start = i;
1133 found = true;
1134 }
1135
1136 total_found += found_bits;
1137
1138 if (cluster->max_size < found_bits * block_group->sectorsize)
1139 cluster->max_size = found_bits * block_group->sectorsize;
1140
1141 if (total_found < total_bits) {
1142 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
1143 if (i - start > total_bits * 2) {
1144 total_found = 0;
1145 cluster->max_size = 0;
1146 found = false;
1147 }
1148 goto again;
1149 }
1150
1151 cluster->window_start = start * block_group->sectorsize +
1152 entry->offset;
1153 cluster->points_to_bitmap = true;
1154
1155 return 0;
1156}
1157
573/* 1158/*
574 * here we try to find a cluster of blocks in a block group. The goal 1159 * here we try to find a cluster of blocks in a block group. The goal
575 * is to find at least bytes free and up to empty_size + bytes free. 1160 * is to find at least bytes free and up to empty_size + bytes free.
@@ -587,12 +1172,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
587 struct btrfs_free_space *entry = NULL; 1172 struct btrfs_free_space *entry = NULL;
588 struct rb_node *node; 1173 struct rb_node *node;
589 struct btrfs_free_space *next; 1174 struct btrfs_free_space *next;
590 struct btrfs_free_space *last; 1175 struct btrfs_free_space *last = NULL;
591 u64 min_bytes; 1176 u64 min_bytes;
592 u64 window_start; 1177 u64 window_start;
593 u64 window_free; 1178 u64 window_free;
594 u64 max_extent = 0; 1179 u64 max_extent = 0;
595 int total_retries = 0; 1180 bool found_bitmap = false;
596 int ret; 1181 int ret;
597 1182
598 /* for metadata, allow allocates with more holes */ 1183 /* for metadata, allow allocates with more holes */
@@ -620,31 +1205,80 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
620 goto out; 1205 goto out;
621 } 1206 }
622again: 1207again:
623 min_bytes = min(min_bytes, bytes + empty_size); 1208 entry = tree_search_offset(block_group, offset, found_bitmap, 1);
624 entry = tree_search_bytes(&block_group->free_space_bytes,
625 offset, min_bytes);
626 if (!entry) { 1209 if (!entry) {
627 ret = -ENOSPC; 1210 ret = -ENOSPC;
628 goto out; 1211 goto out;
629 } 1212 }
1213
1214 /*
1215 * If found_bitmap is true, we exhausted our search for extent entries,
1216 * and we just want to search all of the bitmaps that we can find, and
1217 * ignore any extent entries we find.
1218 */
1219 while (entry->bitmap || found_bitmap ||
1220 (!entry->bitmap && entry->bytes < min_bytes)) {
1221 struct rb_node *node = rb_next(&entry->offset_index);
1222
1223 if (entry->bitmap && entry->bytes > bytes + empty_size) {
1224 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
1225 offset, bytes + empty_size,
1226 min_bytes);
1227 if (!ret)
1228 goto got_it;
1229 }
1230
1231 if (!node) {
1232 ret = -ENOSPC;
1233 goto out;
1234 }
1235 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1236 }
1237
1238 /*
1239 * We already searched all the extent entries from the passed in offset
1240 * to the end and didn't find enough space for the cluster, and we also
1241 * didn't find any bitmaps that met our criteria, just go ahead and exit
1242 */
1243 if (found_bitmap) {
1244 ret = -ENOSPC;
1245 goto out;
1246 }
1247
1248 cluster->points_to_bitmap = false;
630 window_start = entry->offset; 1249 window_start = entry->offset;
631 window_free = entry->bytes; 1250 window_free = entry->bytes;
632 last = entry; 1251 last = entry;
633 max_extent = entry->bytes; 1252 max_extent = entry->bytes;
634 1253
635 while(1) { 1254 while (1) {
636 /* out window is just right, lets fill it */ 1255 /* out window is just right, lets fill it */
637 if (window_free >= bytes + empty_size) 1256 if (window_free >= bytes + empty_size)
638 break; 1257 break;
639 1258
640 node = rb_next(&last->offset_index); 1259 node = rb_next(&last->offset_index);
641 if (!node) { 1260 if (!node) {
1261 if (found_bitmap)
1262 goto again;
642 ret = -ENOSPC; 1263 ret = -ENOSPC;
643 goto out; 1264 goto out;
644 } 1265 }
645 next = rb_entry(node, struct btrfs_free_space, offset_index); 1266 next = rb_entry(node, struct btrfs_free_space, offset_index);
646 1267
647 /* 1268 /*
1269 * we found a bitmap, so if this search doesn't result in a
1270 * cluster, we know to go and search again for the bitmaps and
1271 * start looking for space there
1272 */
1273 if (next->bitmap) {
1274 if (!found_bitmap)
1275 offset = next->offset;
1276 found_bitmap = true;
1277 last = next;
1278 continue;
1279 }
1280
1281 /*
648 * we haven't filled the empty size and the window is 1282 * we haven't filled the empty size and the window is
649 * very large. reset and try again 1283 * very large. reset and try again
650 */ 1284 */
@@ -655,19 +1289,6 @@ again:
655 window_free = entry->bytes; 1289 window_free = entry->bytes;
656 last = entry; 1290 last = entry;
657 max_extent = 0; 1291 max_extent = 0;
658 total_retries++;
659 if (total_retries % 64 == 0) {
660 if (min_bytes >= (bytes + empty_size)) {
661 ret = -ENOSPC;
662 goto out;
663 }
664 /*
665 * grow our allocation a bit, we're not having
666 * much luck
667 */
668 min_bytes *= 2;
669 goto again;
670 }
671 } else { 1292 } else {
672 last = next; 1293 last = next;
673 window_free += next->bytes; 1294 window_free += next->bytes;
@@ -685,11 +1306,19 @@ again:
685 * The cluster includes an rbtree, but only uses the offset index 1306 * The cluster includes an rbtree, but only uses the offset index
686 * of each free space cache entry. 1307 * of each free space cache entry.
687 */ 1308 */
688 while(1) { 1309 while (1) {
689 node = rb_next(&entry->offset_index); 1310 node = rb_next(&entry->offset_index);
690 unlink_free_space(block_group, entry); 1311 if (entry->bitmap && node) {
1312 entry = rb_entry(node, struct btrfs_free_space,
1313 offset_index);
1314 continue;
1315 } else if (entry->bitmap && !node) {
1316 break;
1317 }
1318
1319 rb_erase(&entry->offset_index, &block_group->free_space_offset);
691 ret = tree_insert_offset(&cluster->root, entry->offset, 1320 ret = tree_insert_offset(&cluster->root, entry->offset,
692 &entry->offset_index); 1321 &entry->offset_index, 0);
693 BUG_ON(ret); 1322 BUG_ON(ret);
694 1323
695 if (!node || entry == last) 1324 if (!node || entry == last)
@@ -697,8 +1326,10 @@ again:
697 1326
698 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1327 entry = rb_entry(node, struct btrfs_free_space, offset_index);
699 } 1328 }
700 ret = 0; 1329
701 cluster->max_size = max_extent; 1330 cluster->max_size = max_extent;
1331got_it:
1332 ret = 0;
702 atomic_inc(&block_group->count); 1333 atomic_inc(&block_group->count);
703 list_add_tail(&cluster->block_group_list, &block_group->cluster_list); 1334 list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
704 cluster->block_group = block_group; 1335 cluster->block_group = block_group;
@@ -718,6 +1349,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
718 spin_lock_init(&cluster->refill_lock); 1349 spin_lock_init(&cluster->refill_lock);
719 cluster->root.rb_node = NULL; 1350 cluster->root.rb_node = NULL;
720 cluster->max_size = 0; 1351 cluster->max_size = 0;
1352 cluster->points_to_bitmap = false;
721 INIT_LIST_HEAD(&cluster->block_group_list); 1353 INIT_LIST_HEAD(&cluster->block_group_list);
722 cluster->block_group = NULL; 1354 cluster->block_group = NULL;
723} 1355}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 266fb8764054..890a8e79011b 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -19,6 +19,14 @@
19#ifndef __BTRFS_FREE_SPACE_CACHE 19#ifndef __BTRFS_FREE_SPACE_CACHE
20#define __BTRFS_FREE_SPACE_CACHE 20#define __BTRFS_FREE_SPACE_CACHE
21 21
22struct btrfs_free_space {
23 struct rb_node offset_index;
24 u64 offset;
25 u64 bytes;
26 unsigned long *bitmap;
27 struct list_head list;
28};
29
22int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 30int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
23 u64 bytenr, u64 size); 31 u64 bytenr, u64 size);
24int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 32int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 791eab19e330..59cba180fe83 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2603,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2603 if (root->ref_cows) 2603 if (root->ref_cows)
2604 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 2604 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2605 path = btrfs_alloc_path(); 2605 path = btrfs_alloc_path();
2606 path->reada = -1;
2607 BUG_ON(!path); 2606 BUG_ON(!path);
2607 path->reada = -1;
2608 2608
2609 /* FIXME, add redo link to tree so we don't leak on crash */ 2609 /* FIXME, add redo link to tree so we don't leak on crash */
2610 key.objectid = inode->i_ino; 2610 key.objectid = inode->i_ino;
@@ -3099,8 +3099,12 @@ static void inode_tree_add(struct inode *inode)
3099{ 3099{
3100 struct btrfs_root *root = BTRFS_I(inode)->root; 3100 struct btrfs_root *root = BTRFS_I(inode)->root;
3101 struct btrfs_inode *entry; 3101 struct btrfs_inode *entry;
3102 struct rb_node **p = &root->inode_tree.rb_node; 3102 struct rb_node **p;
3103 struct rb_node *parent = NULL; 3103 struct rb_node *parent;
3104
3105again:
3106 p = &root->inode_tree.rb_node;
3107 parent = NULL;
3104 3108
3105 spin_lock(&root->inode_lock); 3109 spin_lock(&root->inode_lock);
3106 while (*p) { 3110 while (*p) {
@@ -3108,13 +3112,16 @@ static void inode_tree_add(struct inode *inode)
3108 entry = rb_entry(parent, struct btrfs_inode, rb_node); 3112 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3109 3113
3110 if (inode->i_ino < entry->vfs_inode.i_ino) 3114 if (inode->i_ino < entry->vfs_inode.i_ino)
3111 p = &(*p)->rb_left; 3115 p = &parent->rb_left;
3112 else if (inode->i_ino > entry->vfs_inode.i_ino) 3116 else if (inode->i_ino > entry->vfs_inode.i_ino)
3113 p = &(*p)->rb_right; 3117 p = &parent->rb_right;
3114 else { 3118 else {
3115 WARN_ON(!(entry->vfs_inode.i_state & 3119 WARN_ON(!(entry->vfs_inode.i_state &
3116 (I_WILL_FREE | I_FREEING | I_CLEAR))); 3120 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3117 break; 3121 rb_erase(parent, &root->inode_tree);
3122 RB_CLEAR_NODE(parent);
3123 spin_unlock(&root->inode_lock);
3124 goto again;
3118 } 3125 }
3119 } 3126 }
3120 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); 3127 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
@@ -3126,12 +3133,12 @@ static void inode_tree_del(struct inode *inode)
3126{ 3133{
3127 struct btrfs_root *root = BTRFS_I(inode)->root; 3134 struct btrfs_root *root = BTRFS_I(inode)->root;
3128 3135
3136 spin_lock(&root->inode_lock);
3129 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 3137 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3130 spin_lock(&root->inode_lock);
3131 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3138 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3132 spin_unlock(&root->inode_lock);
3133 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 3139 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3134 } 3140 }
3141 spin_unlock(&root->inode_lock);
3135} 3142}
3136 3143
3137static noinline void init_btrfs_i(struct inode *inode) 3144static noinline void init_btrfs_i(struct inode *inode)
@@ -4785,8 +4792,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4785 * and the replacement file is large. Start IO on it now so 4792 * and the replacement file is large. Start IO on it now so
4786 * we don't add too much work to the end of the transaction 4793 * we don't add too much work to the end of the transaction
4787 */ 4794 */
4788 if (new_inode && old_inode && S_ISREG(old_inode->i_mode) && 4795 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
4789 new_inode->i_size &&
4790 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 4796 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4791 filemap_flush(old_inode->i_mapping); 4797 filemap_flush(old_inode->i_mapping);
4792 4798
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 6d6523da0a30..0d126be22b63 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
309 } 309 }
310 printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", 310 printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
311 (unsigned long long)btrfs_header_bytenr(c), 311 (unsigned long long)btrfs_header_bytenr(c),
312 btrfs_header_level(c), nr, 312 level, nr,
313 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); 313 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
314 for (i = 0; i < nr; i++) { 314 for (i = 0; i < nr; i++) {
315 btrfs_node_key_to_cpu(c, &key, i); 315 btrfs_node_key_to_cpu(c, &key, i);
@@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
326 btrfs_level_size(root, level - 1), 326 btrfs_level_size(root, level - 1),
327 btrfs_node_ptr_generation(c, i)); 327 btrfs_node_ptr_generation(c, i));
328 if (btrfs_is_leaf(next) && 328 if (btrfs_is_leaf(next) &&
329 btrfs_header_level(c) != 1) 329 level != 1)
330 BUG(); 330 BUG();
331 if (btrfs_header_level(next) != 331 if (btrfs_header_level(next) !=
332 btrfs_header_level(c) - 1) 332 level - 1)
333 BUG(); 333 BUG();
334 btrfs_print_tree(root, next); 334 btrfs_print_tree(root, next);
335 free_extent_buffer(next); 335 free_extent_buffer(next);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 008397934778..c04f7f212602 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -670,6 +670,8 @@ again:
670 err = ret; 670 err = ret;
671 goto out; 671 goto out;
672 } 672 }
673 if (ret > 0 && path2->slots[level] > 0)
674 path2->slots[level]--;
673 675
674 eb = path2->nodes[level]; 676 eb = path2->nodes[level];
675 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 677 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
@@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1609 BUG_ON(level == 0); 1611 BUG_ON(level == 0);
1610 path->lowest_level = level; 1612 path->lowest_level = level;
1611 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 1613 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1614 path->lowest_level = 0;
1612 if (ret < 0) { 1615 if (ret < 0) {
1613 btrfs_free_path(path); 1616 btrfs_free_path(path);
1614 return ret; 1617 return ret;
@@ -2550,8 +2553,13 @@ int relocate_inode_pages(struct inode *inode, u64 start, u64 len)
2550 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; 2553 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2551 2554
2552 /* make sure the dirty trick played by the caller work */ 2555 /* make sure the dirty trick played by the caller work */
2553 ret = invalidate_inode_pages2_range(inode->i_mapping, 2556 while (1) {
2554 first_index, last_index); 2557 ret = invalidate_inode_pages2_range(inode->i_mapping,
2558 first_index, last_index);
2559 if (ret != -EBUSY)
2560 break;
2561 schedule_timeout(HZ/10);
2562 }
2555 if (ret) 2563 if (ret)
2556 goto out_unlock; 2564 goto out_unlock;
2557 2565
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2dbf1c1f56ee..cdbb5022da52 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -40,6 +40,12 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
40 } 40 }
41} 41}
42 42
43static noinline void switch_commit_root(struct btrfs_root *root)
44{
45 free_extent_buffer(root->commit_root);
46 root->commit_root = btrfs_root_node(root);
47}
48
43/* 49/*
44 * either allocate a new transaction or hop into the existing one 50 * either allocate a new transaction or hop into the existing one
45 */ 51 */
@@ -444,9 +450,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
444 450
445 btrfs_write_dirty_block_groups(trans, root); 451 btrfs_write_dirty_block_groups(trans, root);
446 452
447 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
448 BUG_ON(ret);
449
450 while (1) { 453 while (1) {
451 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 454 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
452 if (old_root_bytenr == root->node->start) 455 if (old_root_bytenr == root->node->start)
@@ -457,13 +460,14 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
457 &root->root_key, 460 &root->root_key,
458 &root->root_item); 461 &root->root_item);
459 BUG_ON(ret); 462 BUG_ON(ret);
460 btrfs_write_dirty_block_groups(trans, root);
461 463
462 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 464 ret = btrfs_write_dirty_block_groups(trans, root);
463 BUG_ON(ret); 465 BUG_ON(ret);
464 } 466 }
465 free_extent_buffer(root->commit_root); 467
466 root->commit_root = btrfs_root_node(root); 468 if (root != root->fs_info->extent_root)
469 switch_commit_root(root);
470
467 return 0; 471 return 0;
468} 472}
469 473
@@ -495,10 +499,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
495 root = list_entry(next, struct btrfs_root, dirty_list); 499 root = list_entry(next, struct btrfs_root, dirty_list);
496 500
497 update_cowonly_root(trans, root); 501 update_cowonly_root(trans, root);
498
499 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
500 BUG_ON(ret);
501 } 502 }
503
504 down_write(&fs_info->extent_commit_sem);
505 switch_commit_root(fs_info->extent_root);
506 up_write(&fs_info->extent_commit_sem);
507
502 return 0; 508 return 0;
503} 509}
504 510
@@ -544,8 +550,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
544 btrfs_update_reloc_root(trans, root); 550 btrfs_update_reloc_root(trans, root);
545 551
546 if (root->commit_root != root->node) { 552 if (root->commit_root != root->node) {
547 free_extent_buffer(root->commit_root); 553 switch_commit_root(root);
548 root->commit_root = btrfs_root_node(root);
549 btrfs_set_root_node(&root->root_item, 554 btrfs_set_root_node(&root->root_item,
550 root->node); 555 root->node);
551 } 556 }
@@ -852,6 +857,16 @@ static void update_super_roots(struct btrfs_root *root)
852 super->root_level = root_item->level; 857 super->root_level = root_item->level;
853} 858}
854 859
860int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
861{
862 int ret = 0;
863 spin_lock(&info->new_trans_lock);
864 if (info->running_transaction)
865 ret = info->running_transaction->in_commit;
866 spin_unlock(&info->new_trans_lock);
867 return ret;
868}
869
855int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 870int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
856 struct btrfs_root *root) 871 struct btrfs_root *root)
857{ 872{
@@ -943,9 +958,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
943 958
944 mutex_unlock(&root->fs_info->trans_mutex); 959 mutex_unlock(&root->fs_info->trans_mutex);
945 960
946 if (flush_on_commit || snap_pending) { 961 if (flush_on_commit) {
947 if (flush_on_commit) 962 btrfs_start_delalloc_inodes(root);
948 btrfs_start_delalloc_inodes(root); 963 ret = btrfs_wait_ordered_extents(root, 0);
964 BUG_ON(ret);
965 } else if (snap_pending) {
949 ret = btrfs_wait_ordered_extents(root, 1); 966 ret = btrfs_wait_ordered_extents(root, 1);
950 BUG_ON(ret); 967 BUG_ON(ret);
951 } 968 }
@@ -1009,15 +1026,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1009 1026
1010 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1027 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1011 root->fs_info->tree_root->node); 1028 root->fs_info->tree_root->node);
1012 free_extent_buffer(root->fs_info->tree_root->commit_root); 1029 switch_commit_root(root->fs_info->tree_root);
1013 root->fs_info->tree_root->commit_root =
1014 btrfs_root_node(root->fs_info->tree_root);
1015 1030
1016 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1031 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1017 root->fs_info->chunk_root->node); 1032 root->fs_info->chunk_root->node);
1018 free_extent_buffer(root->fs_info->chunk_root->commit_root); 1033 switch_commit_root(root->fs_info->chunk_root);
1019 root->fs_info->chunk_root->commit_root =
1020 btrfs_root_node(root->fs_info->chunk_root);
1021 1034
1022 update_super_roots(root); 1035 update_super_roots(root);
1023 1036
@@ -1057,6 +1070,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1057 cur_trans->commit_done = 1; 1070 cur_trans->commit_done = 1;
1058 1071
1059 root->fs_info->last_trans_committed = cur_trans->transid; 1072 root->fs_info->last_trans_committed = cur_trans->transid;
1073
1060 wake_up(&cur_trans->commit_wait); 1074 wake_up(&cur_trans->commit_wait);
1061 1075
1062 put_transaction(cur_trans); 1076 put_transaction(cur_trans);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 961c3ee5a2e1..663c67404918 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -107,4 +107,5 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root); 107 struct btrfs_root *root);
108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
109 struct extent_io_tree *dirty_pages); 109 struct extent_io_tree *dirty_pages);
110int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
110#endif 111#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c13922206d1b..d91b0de7c502 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
797 return -ENOENT; 797 return -ENOENT;
798 798
799 inode = read_one_inode(root, key->objectid); 799 inode = read_one_inode(root, key->objectid);
800 BUG_ON(!dir); 800 BUG_ON(!inode);
801 801
802 ref_ptr = btrfs_item_ptr_offset(eb, slot); 802 ref_ptr = btrfs_item_ptr_offset(eb, slot);
803 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 803 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3ab80e9cd767..5dbefd11b4af 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -721,7 +721,8 @@ error:
721 */ 721 */
722static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, 722static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
723 struct btrfs_device *device, 723 struct btrfs_device *device,
724 u64 num_bytes, u64 *start) 724 u64 num_bytes, u64 *start,
725 u64 *max_avail)
725{ 726{
726 struct btrfs_key key; 727 struct btrfs_key key;
727 struct btrfs_root *root = device->dev_root; 728 struct btrfs_root *root = device->dev_root;
@@ -758,9 +759,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
758 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 759 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
759 if (ret < 0) 760 if (ret < 0)
760 goto error; 761 goto error;
761 ret = btrfs_previous_item(root, path, 0, key.type); 762 if (ret > 0) {
762 if (ret < 0) 763 ret = btrfs_previous_item(root, path, key.objectid, key.type);
763 goto error; 764 if (ret < 0)
765 goto error;
766 if (ret > 0)
767 start_found = 1;
768 }
764 l = path->nodes[0]; 769 l = path->nodes[0];
765 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 770 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
766 while (1) { 771 while (1) {
@@ -803,6 +808,10 @@ no_more_items:
803 if (last_byte < search_start) 808 if (last_byte < search_start)
804 last_byte = search_start; 809 last_byte = search_start;
805 hole_size = key.offset - last_byte; 810 hole_size = key.offset - last_byte;
811
812 if (hole_size > *max_avail)
813 *max_avail = hole_size;
814
806 if (key.offset > last_byte && 815 if (key.offset > last_byte &&
807 hole_size >= num_bytes) { 816 hole_size >= num_bytes) {
808 *start = last_byte; 817 *start = last_byte;
@@ -1621,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1621 device->fs_devices->total_rw_bytes += diff; 1630 device->fs_devices->total_rw_bytes += diff;
1622 1631
1623 device->total_bytes = new_size; 1632 device->total_bytes = new_size;
1633 device->disk_total_bytes = new_size;
1624 btrfs_clear_space_info_full(device->dev_root->fs_info); 1634 btrfs_clear_space_info_full(device->dev_root->fs_info);
1625 1635
1626 return btrfs_update_device(trans, device); 1636 return btrfs_update_device(trans, device);
@@ -2007,7 +2017,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2007 goto done; 2017 goto done;
2008 if (ret) { 2018 if (ret) {
2009 ret = 0; 2019 ret = 0;
2010 goto done; 2020 break;
2011 } 2021 }
2012 2022
2013 l = path->nodes[0]; 2023 l = path->nodes[0];
@@ -2015,7 +2025,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2015 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2025 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2016 2026
2017 if (key.objectid != device->devid) 2027 if (key.objectid != device->devid)
2018 goto done; 2028 break;
2019 2029
2020 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2030 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2021 length = btrfs_dev_extent_length(l, dev_extent); 2031 length = btrfs_dev_extent_length(l, dev_extent);
@@ -2171,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2171 max_chunk_size); 2181 max_chunk_size);
2172 2182
2173again: 2183again:
2184 max_avail = 0;
2174 if (!map || map->num_stripes != num_stripes) { 2185 if (!map || map->num_stripes != num_stripes) {
2175 kfree(map); 2186 kfree(map);
2176 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 2187 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -2219,7 +2230,8 @@ again:
2219 2230
2220 if (device->in_fs_metadata && avail >= min_free) { 2231 if (device->in_fs_metadata && avail >= min_free) {
2221 ret = find_free_dev_extent(trans, device, 2232 ret = find_free_dev_extent(trans, device,
2222 min_free, &dev_offset); 2233 min_free, &dev_offset,
2234 &max_avail);
2223 if (ret == 0) { 2235 if (ret == 0) {
2224 list_move_tail(&device->dev_alloc_list, 2236 list_move_tail(&device->dev_alloc_list,
2225 &private_devs); 2237 &private_devs);
@@ -2795,26 +2807,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2795 } 2807 }
2796 } 2808 }
2797 2809
2798 for (i = 0; i > nr; i++) {
2799 struct btrfs_multi_bio *multi;
2800 struct btrfs_bio_stripe *stripe;
2801 int ret;
2802
2803 length = 1;
2804 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2805 &length, &multi, 0);
2806 BUG_ON(ret);
2807
2808 stripe = multi->stripes;
2809 for (j = 0; j < multi->num_stripes; j++) {
2810 if (stripe->physical >= physical &&
2811 physical < stripe->physical + length)
2812 break;
2813 }
2814 BUG_ON(j >= multi->num_stripes);
2815 kfree(multi);
2816 }
2817
2818 *logical = buf; 2810 *logical = buf;
2819 *naddrs = nr; 2811 *naddrs = nr;
2820 *stripe_len = map->stripe_len; 2812 *stripe_len = map->stripe_len;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index ecfbce836d32..3e2b90eaa239 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -208,7 +208,7 @@ int btrfs_zlib_compress_pages(struct address_space *mapping,
208 *total_in = 0; 208 *total_in = 0;
209 209
210 workspace = find_zlib_workspace(); 210 workspace = find_zlib_workspace();
211 if (!workspace) 211 if (IS_ERR(workspace))
212 return -1; 212 return -1;
213 213
214 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { 214 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
@@ -366,7 +366,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
366 char *kaddr; 366 char *kaddr;
367 367
368 workspace = find_zlib_workspace(); 368 workspace = find_zlib_workspace();
369 if (!workspace) 369 if (IS_ERR(workspace))
370 return -ENOMEM; 370 return -ENOMEM;
371 371
372 data_in = kmap(pages_in[page_in_index]); 372 data_in = kmap(pages_in[page_in_index]);
@@ -547,7 +547,7 @@ int btrfs_zlib_decompress(unsigned char *data_in,
547 return -ENOMEM; 547 return -ENOMEM;
548 548
549 workspace = find_zlib_workspace(); 549 workspace = find_zlib_workspace();
550 if (!workspace) 550 if (IS_ERR(workspace))
551 return -ENOMEM; 551 return -ENOMEM;
552 552
553 workspace->inf_strm.next_in = data_in; 553 workspace->inf_strm.next_in = data_in;
diff --git a/fs/buffer.c b/fs/buffer.c
index a3ef091a45bd..28f320fac4d4 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1165,8 +1165,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
1165 1165
1166 if (!test_set_buffer_dirty(bh)) { 1166 if (!test_set_buffer_dirty(bh)) {
1167 struct page *page = bh->b_page; 1167 struct page *page = bh->b_page;
1168 if (!TestSetPageDirty(page)) 1168 if (!TestSetPageDirty(page)) {
1169 __set_page_dirty(page, page_mapping(page), 0); 1169 struct address_space *mapping = page_mapping(page);
1170 if (mapping)
1171 __set_page_dirty(page, mapping, 0);
1172 }
1170 } 1173 }
1171} 1174}
1172 1175
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 92888aa90749..e85b1e4389e0 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,10 @@
1Version 1.60
2-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path.
4Set s_maxbytes to smaller (the max that vfs can handle) so that
5sendfile will now work over cifs mounts again. Add noforcegid
6and noforceuid mount parameters.
7
1Version 1.59 8Version 1.59
2------------ 9------------
3Client uses server inode numbers (which are persistent) rather than 10Client uses server inode numbers (which are persistent) rather than
diff --git a/fs/cifs/README b/fs/cifs/README
index ad92921dbde4..79c1a93400be 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -262,11 +262,11 @@ A partial list of the supported mount options follows:
262 mount. 262 mount.
263 domain Set the SMB/CIFS workgroup name prepended to the 263 domain Set the SMB/CIFS workgroup name prepended to the
264 username during CIFS session establishment 264 username during CIFS session establishment
265 forceuid Set the default uid for inodes based on the uid 265 forceuid Set the default uid for inodes to the uid
266 passed in. For mounts to servers 266 passed in on mount. For mounts to servers
267 which do support the CIFS Unix extensions, such as a 267 which do support the CIFS Unix extensions, such as a
268 properly configured Samba server, the server provides 268 properly configured Samba server, the server provides
269 the uid, gid and mode so this parameter should not be 269 the uid, gid and mode so this parameter should not be
270 specified unless the server and clients uid and gid 270 specified unless the server and clients uid and gid
271 numbering differ. If the server and client are in the 271 numbering differ. If the server and client are in the
272 same domain (e.g. running winbind or nss_ldap) and 272 same domain (e.g. running winbind or nss_ldap) and
@@ -278,11 +278,7 @@ A partial list of the supported mount options follows:
278 of existing files will be the uid (gid) of the person 278 of existing files will be the uid (gid) of the person
279 who executed the mount (root, except when mount.cifs 279 who executed the mount (root, except when mount.cifs
280 is configured setuid for user mounts) unless the "uid=" 280 is configured setuid for user mounts) unless the "uid="
281 (gid) mount option is specified. For the uid (gid) of newly 281 (gid) mount option is specified. Also note that permission
282 created files and directories, ie files created since
283 the last mount of the server share, the expected uid
284 (gid) is cached as long as the inode remains in
285 memory on the client. Also note that permission
286 checks (authorization checks) on accesses to a file occur 282 checks (authorization checks) on accesses to a file occur
287 at the server, but there are cases in which an administrator 283 at the server, but there are cases in which an administrator
288 may want to restrict at the client as well. For those 284 may want to restrict at the client as well. For those
@@ -290,12 +286,15 @@ A partial list of the supported mount options follows:
290 (such as Windows), permissions can also be checked at the 286 (such as Windows), permissions can also be checked at the
291 client, and a crude form of client side permission checking 287 client, and a crude form of client side permission checking
292 can be enabled by specifying file_mode and dir_mode on 288 can be enabled by specifying file_mode and dir_mode on
293 the client. Note that the mount.cifs helper must be 289 the client. (default)
294 at version 1.10 or higher to support specifying the uid 290 forcegid (similar to above but for the groupid instead of uid) (default)
295 (or gid) in non-numeric form. 291 noforceuid Fill in file owner information (uid) by requesting it from
296 forcegid (similar to above but for the groupid instead of uid) 292 the server if possible. With this option, the value given in
293 the uid= option (on mount) will only be used if the server
294 can not support returning uids on inodes.
295 noforcegid (similar to above but for the group owner, gid, instead of uid)
297 uid Set the default uid for inodes, and indicate to the 296 uid Set the default uid for inodes, and indicate to the
298 cifs kernel driver which local user mounted . If the server 297 cifs kernel driver which local user mounted. If the server
299 supports the unix extensions the default uid is 298 supports the unix extensions the default uid is
300 not used to fill in the owner fields of inodes (files) 299 not used to fill in the owner fields of inodes (files)
301 unless the "forceuid" parameter is specified. 300 unless the "forceuid" parameter is specified.
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 3bb11be8b6a8..606912d8f2a8 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void)
55 * i.e. strips from UNC trailing path that is not part of share 55 * i.e. strips from UNC trailing path that is not part of share
56 * name and fixup missing '\' in the begining of DFS node refferal 56 * name and fixup missing '\' in the begining of DFS node refferal
57 * if neccessary. 57 * if neccessary.
58 * Returns pointer to share name on success or NULL on error. 58 * Returns pointer to share name on success or ERR_PTR on error.
59 * Caller is responsible for freeing returned string. 59 * Caller is responsible for freeing returned string.
60 */ 60 */
61static char *cifs_get_share_name(const char *node_name) 61static char *cifs_get_share_name(const char *node_name)
@@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name)
68 UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, 68 UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
69 GFP_KERNEL); 69 GFP_KERNEL);
70 if (!UNC) 70 if (!UNC)
71 return NULL; 71 return ERR_PTR(-ENOMEM);
72 72
73 /* get share name and server name */ 73 /* get share name and server name */
74 if (node_name[1] != '\\') { 74 if (node_name[1] != '\\') {
@@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name)
87 cERROR(1, ("%s: no server name end in node name: %s", 87 cERROR(1, ("%s: no server name end in node name: %s",
88 __func__, node_name)); 88 __func__, node_name));
89 kfree(UNC); 89 kfree(UNC);
90 return NULL; 90 return ERR_PTR(-EINVAL);
91 } 91 }
92 92
93 /* find sharename end */ 93 /* find sharename end */
@@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 *devname = cifs_get_share_name(ref->node_name); 135 *devname = cifs_get_share_name(ref->node_name);
136 if (IS_ERR(*devname)) {
137 rc = PTR_ERR(*devname);
138 *devname = NULL;
139 goto compose_mount_options_err;
140 }
141
136 rc = dns_resolve_server_name_to_ip(*devname, &srvIP); 142 rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
137 if (rc != 0) { 143 if (rc != 0) {
138 cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", 144 cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 60e3c4253de0..714a542cbafc 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,7 +44,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
44 int maxwords = maxbytes / 2; 44 int maxwords = maxbytes / 2;
45 char tmp[NLS_MAX_CHARSET_SIZE]; 45 char tmp[NLS_MAX_CHARSET_SIZE];
46 46
47 for (i = 0; from[i] && i < maxwords; i++) { 47 for (i = 0; i < maxwords && from[i]; i++) {
48 charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, 48 charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
49 NLS_MAX_CHARSET_SIZE); 49 NLS_MAX_CHARSET_SIZE);
50 if (charlen > 0) 50 if (charlen > 0)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 44f30504b82d..84b75253b05a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -376,10 +376,14 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
376 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); 376 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
378 seq_printf(s, ",forceuid"); 378 seq_printf(s, ",forceuid");
379 else
380 seq_printf(s, ",noforceuid");
379 381
380 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); 382 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
382 seq_printf(s, ",forcegid"); 384 seq_printf(s, ",forcegid");
385 else
386 seq_printf(s, ",noforcegid");
383 387
384 cifs_show_address(s, tcon->ses->server); 388 cifs_show_address(s, tcon->ses->server);
385 389
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fc44d316d0bb..1f3345d7fa79 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -803,6 +803,10 @@ cifs_parse_mount_options(char *options, const char *devname,
803 char *data; 803 char *data;
804 unsigned int temp_len, i, j; 804 unsigned int temp_len, i, j;
805 char separator[2]; 805 char separator[2];
806 short int override_uid = -1;
807 short int override_gid = -1;
808 bool uid_specified = false;
809 bool gid_specified = false;
806 810
807 separator[0] = ','; 811 separator[0] = ',';
808 separator[1] = 0; 812 separator[1] = 0;
@@ -1093,18 +1097,20 @@ cifs_parse_mount_options(char *options, const char *devname,
1093 "too long.\n"); 1097 "too long.\n");
1094 return 1; 1098 return 1;
1095 } 1099 }
1096 } else if (strnicmp(data, "uid", 3) == 0) { 1100 } else if (!strnicmp(data, "uid", 3) && value && *value) {
1097 if (value && *value) 1101 vol->linux_uid = simple_strtoul(value, &value, 0);
1098 vol->linux_uid = 1102 uid_specified = true;
1099 simple_strtoul(value, &value, 0); 1103 } else if (!strnicmp(data, "forceuid", 8)) {
1100 } else if (strnicmp(data, "forceuid", 8) == 0) { 1104 override_uid = 1;
1101 vol->override_uid = 1; 1105 } else if (!strnicmp(data, "noforceuid", 10)) {
1102 } else if (strnicmp(data, "gid", 3) == 0) { 1106 override_uid = 0;
1103 if (value && *value) 1107 } else if (!strnicmp(data, "gid", 3) && value && *value) {
1104 vol->linux_gid = 1108 vol->linux_gid = simple_strtoul(value, &value, 0);
1105 simple_strtoul(value, &value, 0); 1109 gid_specified = true;
1106 } else if (strnicmp(data, "forcegid", 8) == 0) { 1110 } else if (!strnicmp(data, "forcegid", 8)) {
1107 vol->override_gid = 1; 1111 override_gid = 1;
1112 } else if (!strnicmp(data, "noforcegid", 10)) {
1113 override_gid = 0;
1108 } else if (strnicmp(data, "file_mode", 4) == 0) { 1114 } else if (strnicmp(data, "file_mode", 4) == 0) {
1109 if (value && *value) { 1115 if (value && *value) {
1110 vol->file_mode = 1116 vol->file_mode =
@@ -1355,6 +1361,18 @@ cifs_parse_mount_options(char *options, const char *devname,
1355 if (vol->UNCip == NULL) 1361 if (vol->UNCip == NULL)
1356 vol->UNCip = &vol->UNC[2]; 1362 vol->UNCip = &vol->UNC[2];
1357 1363
1364 if (uid_specified)
1365 vol->override_uid = override_uid;
1366 else if (override_uid == 1)
1367 printk(KERN_NOTICE "CIFS: ignoring forceuid mount option "
1368 "specified with no uid= option.\n");
1369
1370 if (gid_specified)
1371 vol->override_gid = override_gid;
1372 else if (override_gid == 1)
1373 printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
1374 "specified with no gid= option.\n");
1375
1358 return 0; 1376 return 0;
1359} 1377}
1360 1378
@@ -2544,11 +2562,20 @@ remote_path_check:
2544 2562
2545 if (mount_data != mount_data_global) 2563 if (mount_data != mount_data_global)
2546 kfree(mount_data); 2564 kfree(mount_data);
2565
2547 mount_data = cifs_compose_mount_options( 2566 mount_data = cifs_compose_mount_options(
2548 cifs_sb->mountdata, full_path + 1, 2567 cifs_sb->mountdata, full_path + 1,
2549 referrals, &fake_devname); 2568 referrals, &fake_devname);
2550 kfree(fake_devname); 2569
2551 free_dfs_info_array(referrals, num_referrals); 2570 free_dfs_info_array(referrals, num_referrals);
2571 kfree(fake_devname);
2572 kfree(full_path);
2573
2574 if (IS_ERR(mount_data)) {
2575 rc = PTR_ERR(mount_data);
2576 mount_data = NULL;
2577 goto mount_fail_check;
2578 }
2552 2579
2553 if (tcon) 2580 if (tcon)
2554 cifs_put_tcon(tcon); 2581 cifs_put_tcon(tcon);
@@ -2556,8 +2583,6 @@ remote_path_check:
2556 cifs_put_smb_ses(pSesInfo); 2583 cifs_put_smb_ses(pSesInfo);
2557 2584
2558 cleanup_volume_info(&volume_info); 2585 cleanup_volume_info(&volume_info);
2559 FreeXid(xid);
2560 kfree(full_path);
2561 referral_walks_count++; 2586 referral_walks_count++;
2562 goto try_mount_again; 2587 goto try_mount_again;
2563 } 2588 }
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f28f070a60fc..f91fd51b32e3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1905,6 +1905,7 @@ COMPATIBLE_IOCTL(FIONCLEX)
1905COMPATIBLE_IOCTL(FIOASYNC) 1905COMPATIBLE_IOCTL(FIOASYNC)
1906COMPATIBLE_IOCTL(FIONBIO) 1906COMPATIBLE_IOCTL(FIONBIO)
1907COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ 1907COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
1908COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
1908/* 0x00 */ 1909/* 0x00 */
1909COMPATIBLE_IOCTL(FIBMAP) 1910COMPATIBLE_IOCTL(FIBMAP)
1910COMPATIBLE_IOCTL(FIGETBSZ) 1911COMPATIBLE_IOCTL(FIGETBSZ)
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index af737bb56cb7..259525c9abb8 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
1303 } 1303 }
1304 (*new_auth_tok)->session_key.encrypted_key_size = 1304 (*new_auth_tok)->session_key.encrypted_key_size =
1305 (body_size - (ECRYPTFS_SALT_SIZE + 5)); 1305 (body_size - (ECRYPTFS_SALT_SIZE + 5));
1306 if ((*new_auth_tok)->session_key.encrypted_key_size
1307 > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
1308 printk(KERN_WARNING "Tag 3 packet contains key larger "
1309 "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
1310 rc = -EINVAL;
1311 goto out_free;
1312 }
1306 if (unlikely(data[(*packet_size)++] != 0x04)) { 1313 if (unlikely(data[(*packet_size)++] != 0x04)) {
1307 printk(KERN_WARNING "Unknown version number [%d]\n", 1314 printk(KERN_WARNING "Unknown version number [%d]\n",
1308 data[(*packet_size) - 1]); 1315 data[(*packet_size) - 1]);
@@ -1449,6 +1456,12 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents,
1449 rc = -EINVAL; 1456 rc = -EINVAL;
1450 goto out; 1457 goto out;
1451 } 1458 }
1459 if (unlikely((*tag_11_contents_size) > max_contents_bytes)) {
1460 printk(KERN_ERR "Literal data section in tag 11 packet exceeds "
1461 "expected size\n");
1462 rc = -EINVAL;
1463 goto out;
1464 }
1452 if (data[(*packet_size)++] != 0x62) { 1465 if (data[(*packet_size)++] != 0x62) {
1453 printk(KERN_WARNING "Unrecognizable packet\n"); 1466 printk(KERN_WARNING "Unrecognizable packet\n");
1454 rc = -EINVAL; 1467 rc = -EINVAL;
diff --git a/fs/exec.c b/fs/exec.c
index 4a8849e45b21..fb4f3cdda78c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -678,8 +678,8 @@ exit:
678} 678}
679EXPORT_SYMBOL(open_exec); 679EXPORT_SYMBOL(open_exec);
680 680
681int kernel_read(struct file *file, unsigned long offset, 681int kernel_read(struct file *file, loff_t offset,
682 char *addr, unsigned long count) 682 char *addr, unsigned long count)
683{ 683{
684 mm_segment_t old_fs; 684 mm_segment_t old_fs;
685 loff_t pos = offset; 685 loff_t pos = offset;
diff --git a/fs/ext3/Kconfig b/fs/ext3/Kconfig
index fb3c1a21b135..522b15498f45 100644
--- a/fs/ext3/Kconfig
+++ b/fs/ext3/Kconfig
@@ -29,23 +29,25 @@ config EXT3_FS
29 module will be called ext3. 29 module will be called ext3.
30 30
31config EXT3_DEFAULTS_TO_ORDERED 31config EXT3_DEFAULTS_TO_ORDERED
32 bool "Default to 'data=ordered' in ext3 (legacy option)" 32 bool "Default to 'data=ordered' in ext3"
33 depends on EXT3_FS 33 depends on EXT3_FS
34 help 34 help
35 If a filesystem does not explicitly specify a data ordering 35 The journal mode options for ext3 have different tradeoffs
36 mode, and the journal capability allowed it, ext3 used to 36 between when data is guaranteed to be on disk and
37 historically default to 'data=ordered'. 37 performance. The use of "data=writeback" can cause
38 38 unwritten data to appear in files after an system crash or
39 That was a rather unfortunate choice, because it leads to all 39 power failure, which can be a security issue. However,
40 kinds of latency problems, and the 'data=writeback' mode is more 40 "data=ordered" mode can also result in major performance
41 appropriate these days. 41 problems, including seconds-long delays before an fsync()
42 42 call returns. For details, see:
43 You should probably always answer 'n' here, and if you really 43
44 want to use 'data=ordered' mode, set it in the filesystem itself 44 http://ext4.wiki.kernel.org/index.php/Ext3_data_mode_tradeoffs
45 with 'tune2fs -o journal_data_ordered'. 45
46 46 If you have been historically happy with ext3's performance,
47 But if you really want to enable the legacy default, you can do 47 data=ordered mode will be a safe choice and you should
48 so by answering 'y' to this question. 48 answer 'y' here. If you understand the reliability and data
49 privacy issues of data=writeback and are willing to make
50 that trade off, answer 'n'.
49 51
50config EXT3_FS_XATTR 52config EXT3_FS_XATTR
51 bool "Ext3 extended attributes" 53 bool "Ext3 extended attributes"
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 524b349c6299..a8d80a7f1105 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -543,6 +543,19 @@ static inline void ext3_show_quota_options(struct seq_file *seq, struct super_bl
543#endif 543#endif
544} 544}
545 545
546static char *data_mode_string(unsigned long mode)
547{
548 switch (mode) {
549 case EXT3_MOUNT_JOURNAL_DATA:
550 return "journal";
551 case EXT3_MOUNT_ORDERED_DATA:
552 return "ordered";
553 case EXT3_MOUNT_WRITEBACK_DATA:
554 return "writeback";
555 }
556 return "unknown";
557}
558
546/* 559/*
547 * Show an option if 560 * Show an option if
548 * - it's set to a non-default value OR 561 * - it's set to a non-default value OR
@@ -616,13 +629,8 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
616 if (test_opt(sb, NOBH)) 629 if (test_opt(sb, NOBH))
617 seq_puts(seq, ",nobh"); 630 seq_puts(seq, ",nobh");
618 631
619 if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA) 632 seq_printf(seq, ",data=%s", data_mode_string(sbi->s_mount_opt &
620 seq_puts(seq, ",data=journal"); 633 EXT3_MOUNT_DATA_FLAGS));
621 else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
622 seq_puts(seq, ",data=ordered");
623 else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
624 seq_puts(seq, ",data=writeback");
625
626 if (test_opt(sb, DATA_ERR_ABORT)) 634 if (test_opt(sb, DATA_ERR_ABORT))
627 seq_puts(seq, ",data_err=abort"); 635 seq_puts(seq, ",data_err=abort");
628 636
@@ -1024,12 +1032,18 @@ static int parse_options (char *options, struct super_block *sb,
1024 datacheck: 1032 datacheck:
1025 if (is_remount) { 1033 if (is_remount) {
1026 if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS) 1034 if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS)
1027 != data_opt) { 1035 == data_opt)
1028 printk(KERN_ERR 1036 break;
1029 "EXT3-fs: cannot change data " 1037 printk(KERN_ERR
1030 "mode on remount\n"); 1038 "EXT3-fs (device %s): Cannot change "
1031 return 0; 1039 "data mode on remount. The filesystem "
1032 } 1040 "is mounted in data=%s mode and you "
1041 "try to remount it in data=%s mode.\n",
1042 sb->s_id,
1043 data_mode_string(sbi->s_mount_opt &
1044 EXT3_MOUNT_DATA_FLAGS),
1045 data_mode_string(data_opt));
1046 return 0;
1033 } else { 1047 } else {
1034 sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS; 1048 sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS;
1035 sbi->s_mount_opt |= data_opt; 1049 sbi->s_mount_opt |= data_opt;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 03ebb439ace0..7ebae9a4ecc0 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -624,6 +624,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
624{ 624{
625 struct gfs2_inode *ip = GFS2_I(mapping->host); 625 struct gfs2_inode *ip = GFS2_I(mapping->host);
626 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 626 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
627 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
627 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 628 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
628 int alloc_required; 629 int alloc_required;
629 int error = 0; 630 int error = 0;
@@ -637,6 +638,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
637 error = gfs2_glock_nq(&ip->i_gh); 638 error = gfs2_glock_nq(&ip->i_gh);
638 if (unlikely(error)) 639 if (unlikely(error))
639 goto out_uninit; 640 goto out_uninit;
641 if (&ip->i_inode == sdp->sd_rindex) {
642 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
643 GL_NOCACHE, &m_ip->i_gh);
644 if (unlikely(error)) {
645 gfs2_glock_dq(&ip->i_gh);
646 goto out_uninit;
647 }
648 }
640 649
641 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); 650 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
642 if (error) 651 if (error)
@@ -667,6 +676,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
667 rblocks += data_blocks ? data_blocks : 1; 676 rblocks += data_blocks ? data_blocks : 1;
668 if (ind_blocks || data_blocks) 677 if (ind_blocks || data_blocks)
669 rblocks += RES_STATFS + RES_QUOTA; 678 rblocks += RES_STATFS + RES_QUOTA;
679 if (&ip->i_inode == sdp->sd_rindex)
680 rblocks += 2 * RES_STATFS;
670 681
671 error = gfs2_trans_begin(sdp, rblocks, 682 error = gfs2_trans_begin(sdp, rblocks,
672 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 683 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -712,6 +723,10 @@ out_alloc_put:
712 gfs2_alloc_put(ip); 723 gfs2_alloc_put(ip);
713 } 724 }
714out_unlock: 725out_unlock:
726 if (&ip->i_inode == sdp->sd_rindex) {
727 gfs2_glock_dq(&m_ip->i_gh);
728 gfs2_holder_uninit(&m_ip->i_gh);
729 }
715 gfs2_glock_dq(&ip->i_gh); 730 gfs2_glock_dq(&ip->i_gh);
716out_uninit: 731out_uninit:
717 gfs2_holder_uninit(&ip->i_gh); 732 gfs2_holder_uninit(&ip->i_gh);
@@ -725,14 +740,21 @@ out_uninit:
725static void adjust_fs_space(struct inode *inode) 740static void adjust_fs_space(struct inode *inode)
726{ 741{
727 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 742 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
743 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
744 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
728 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 745 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
729 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 746 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
747 struct buffer_head *m_bh, *l_bh;
730 u64 fs_total, new_free; 748 u64 fs_total, new_free;
731 749
732 /* Total up the file system space, according to the latest rindex. */ 750 /* Total up the file system space, according to the latest rindex. */
733 fs_total = gfs2_ri_total(sdp); 751 fs_total = gfs2_ri_total(sdp);
752 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
753 return;
734 754
735 spin_lock(&sdp->sd_statfs_spin); 755 spin_lock(&sdp->sd_statfs_spin);
756 gfs2_statfs_change_in(m_sc, m_bh->b_data +
757 sizeof(struct gfs2_dinode));
736 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 758 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
737 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 759 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
738 else 760 else
@@ -741,6 +763,13 @@ static void adjust_fs_space(struct inode *inode)
741 fs_warn(sdp, "File system extended by %llu blocks.\n", 763 fs_warn(sdp, "File system extended by %llu blocks.\n",
742 (unsigned long long)new_free); 764 (unsigned long long)new_free);
743 gfs2_statfs_change(sdp, new_free, new_free, 0); 765 gfs2_statfs_change(sdp, new_free, new_free, 0);
766
767 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
768 goto out;
769 update_statfs(sdp, m_bh, l_bh);
770 brelse(l_bh);
771out:
772 brelse(m_bh);
744} 773}
745 774
746/** 775/**
@@ -763,6 +792,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
763{ 792{
764 struct gfs2_inode *ip = GFS2_I(inode); 793 struct gfs2_inode *ip = GFS2_I(inode);
765 struct gfs2_sbd *sdp = GFS2_SB(inode); 794 struct gfs2_sbd *sdp = GFS2_SB(inode);
795 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
766 u64 to = pos + copied; 796 u64 to = pos + copied;
767 void *kaddr; 797 void *kaddr;
768 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 798 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
@@ -794,6 +824,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
794 824
795 brelse(dibh); 825 brelse(dibh);
796 gfs2_trans_end(sdp); 826 gfs2_trans_end(sdp);
827 if (inode == sdp->sd_rindex) {
828 gfs2_glock_dq(&m_ip->i_gh);
829 gfs2_holder_uninit(&m_ip->i_gh);
830 }
797 gfs2_glock_dq(&ip->i_gh); 831 gfs2_glock_dq(&ip->i_gh);
798 gfs2_holder_uninit(&ip->i_gh); 832 gfs2_holder_uninit(&ip->i_gh);
799 return copied; 833 return copied;
@@ -823,6 +857,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
823 struct inode *inode = page->mapping->host; 857 struct inode *inode = page->mapping->host;
824 struct gfs2_inode *ip = GFS2_I(inode); 858 struct gfs2_inode *ip = GFS2_I(inode);
825 struct gfs2_sbd *sdp = GFS2_SB(inode); 859 struct gfs2_sbd *sdp = GFS2_SB(inode);
860 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
826 struct buffer_head *dibh; 861 struct buffer_head *dibh;
827 struct gfs2_alloc *al = ip->i_alloc; 862 struct gfs2_alloc *al = ip->i_alloc;
828 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 863 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
@@ -865,6 +900,10 @@ failed:
865 gfs2_quota_unlock(ip); 900 gfs2_quota_unlock(ip);
866 gfs2_alloc_put(ip); 901 gfs2_alloc_put(ip);
867 } 902 }
903 if (inode == sdp->sd_rindex) {
904 gfs2_glock_dq(&m_ip->i_gh);
905 gfs2_holder_uninit(&m_ip->i_gh);
906 }
868 gfs2_glock_dq(&ip->i_gh); 907 gfs2_glock_dq(&ip->i_gh);
869 gfs2_holder_uninit(&ip->i_gh); 908 gfs2_holder_uninit(&ip->i_gh);
870 return ret; 909 return ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 297421c0427a..8b674b1f3a55 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
63static DECLARE_RWSEM(gfs2_umount_flush_sem); 63static DECLARE_RWSEM(gfs2_umount_flush_sem);
64static struct dentry *gfs2_root; 64static struct dentry *gfs2_root;
65static struct workqueue_struct *glock_workqueue; 65static struct workqueue_struct *glock_workqueue;
66struct workqueue_struct *gfs2_delete_workqueue;
66static LIST_HEAD(lru_list); 67static LIST_HEAD(lru_list);
67static atomic_t lru_count = ATOMIC_INIT(0); 68static atomic_t lru_count = ATOMIC_INIT(0);
68static DEFINE_SPINLOCK(lru_lock); 69static DEFINE_SPINLOCK(lru_lock);
@@ -167,13 +168,33 @@ static void glock_free(struct gfs2_glock *gl)
167 * 168 *
168 */ 169 */
169 170
170static void gfs2_glock_hold(struct gfs2_glock *gl) 171void gfs2_glock_hold(struct gfs2_glock *gl)
171{ 172{
172 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 173 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
173 atomic_inc(&gl->gl_ref); 174 atomic_inc(&gl->gl_ref);
174} 175}
175 176
176/** 177/**
178 * demote_ok - Check to see if it's ok to unlock a glock
179 * @gl: the glock
180 *
181 * Returns: 1 if it's ok
182 */
183
184static int demote_ok(const struct gfs2_glock *gl)
185{
186 const struct gfs2_glock_operations *glops = gl->gl_ops;
187
188 if (gl->gl_state == LM_ST_UNLOCKED)
189 return 0;
190 if (!list_empty(&gl->gl_holders))
191 return 0;
192 if (glops->go_demote_ok)
193 return glops->go_demote_ok(gl);
194 return 1;
195}
196
197/**
177 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 198 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
178 * @gl: the glock 199 * @gl: the glock
179 * 200 *
@@ -181,8 +202,13 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
181 202
182static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 203static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
183{ 204{
205 int may_reclaim;
206 may_reclaim = (demote_ok(gl) &&
207 (atomic_read(&gl->gl_ref) == 1 ||
208 (gl->gl_name.ln_type == LM_TYPE_INODE &&
209 atomic_read(&gl->gl_ref) <= 2)));
184 spin_lock(&lru_lock); 210 spin_lock(&lru_lock);
185 if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { 211 if (list_empty(&gl->gl_lru) && may_reclaim) {
186 list_add_tail(&gl->gl_lru, &lru_list); 212 list_add_tail(&gl->gl_lru, &lru_list);
187 atomic_inc(&lru_count); 213 atomic_inc(&lru_count);
188 } 214 }
@@ -190,6 +216,21 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
190} 216}
191 217
192/** 218/**
219 * gfs2_glock_put_nolock() - Decrement reference count on glock
220 * @gl: The glock to put
221 *
222 * This function should only be used if the caller has its own reference
223 * to the glock, in addition to the one it is dropping.
224 */
225
226void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227{
228 if (atomic_dec_and_test(&gl->gl_ref))
229 GLOCK_BUG_ON(gl, 1);
230 gfs2_glock_schedule_for_reclaim(gl);
231}
232
233/**
193 * gfs2_glock_put() - Decrement reference count on glock 234 * gfs2_glock_put() - Decrement reference count on glock
194 * @gl: The glock to put 235 * @gl: The glock to put
195 * 236 *
@@ -214,9 +255,9 @@ int gfs2_glock_put(struct gfs2_glock *gl)
214 rv = 1; 255 rv = 1;
215 goto out; 256 goto out;
216 } 257 }
217 /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ 258 spin_lock(&gl->gl_spin);
218 if (atomic_read(&gl->gl_ref) == 2) 259 gfs2_glock_schedule_for_reclaim(gl);
219 gfs2_glock_schedule_for_reclaim(gl); 260 spin_unlock(&gl->gl_spin);
220 write_unlock(gl_lock_addr(gl->gl_hash)); 261 write_unlock(gl_lock_addr(gl->gl_hash));
221out: 262out:
222 return rv; 263 return rv;
@@ -398,7 +439,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
398 if (held2) 439 if (held2)
399 gfs2_glock_hold(gl); 440 gfs2_glock_hold(gl);
400 else 441 else
401 gfs2_glock_put(gl); 442 gfs2_glock_put_nolock(gl);
402 } 443 }
403 444
404 gl->gl_state = new_state; 445 gl->gl_state = new_state;
@@ -633,12 +674,35 @@ out:
633out_sched: 674out_sched:
634 gfs2_glock_hold(gl); 675 gfs2_glock_hold(gl);
635 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 676 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
636 gfs2_glock_put(gl); 677 gfs2_glock_put_nolock(gl);
637out_unlock: 678out_unlock:
638 clear_bit(GLF_LOCK, &gl->gl_flags); 679 clear_bit(GLF_LOCK, &gl->gl_flags);
639 goto out; 680 goto out;
640} 681}
641 682
683static void delete_work_func(struct work_struct *work)
684{
685 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686 struct gfs2_sbd *sdp = gl->gl_sbd;
687 struct gfs2_inode *ip = NULL;
688 struct inode *inode;
689 u64 no_addr = 0;
690
691 spin_lock(&gl->gl_spin);
692 ip = (struct gfs2_inode *)gl->gl_object;
693 if (ip)
694 no_addr = ip->i_no_addr;
695 spin_unlock(&gl->gl_spin);
696 if (ip) {
697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
698 if (inode) {
699 d_prune_aliases(inode);
700 iput(inode);
701 }
702 }
703 gfs2_glock_put(gl);
704}
705
642static void glock_work_func(struct work_struct *work) 706static void glock_work_func(struct work_struct *work)
643{ 707{
644 unsigned long delay = 0; 708 unsigned long delay = 0;
@@ -717,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
717 gl->gl_sbd = sdp; 781 gl->gl_sbd = sdp;
718 gl->gl_aspace = NULL; 782 gl->gl_aspace = NULL;
719 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 783 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
784 INIT_WORK(&gl->gl_delete, delete_work_func);
720 785
721 /* If this glock protects actual on-disk data or metadata blocks, 786 /* If this glock protects actual on-disk data or metadata blocks,
722 create a VFS inode to manage the pages/buffers holding them. */ 787 create a VFS inode to manage the pages/buffers holding them. */
@@ -858,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
858 gl->gl_demote_state != state) { 923 gl->gl_demote_state != state) {
859 gl->gl_demote_state = LM_ST_UNLOCKED; 924 gl->gl_demote_state = LM_ST_UNLOCKED;
860 } 925 }
926 if (gl->gl_ops->go_callback)
927 gl->gl_ops->go_callback(gl);
861 trace_gfs2_demote_rq(gl); 928 trace_gfs2_demote_rq(gl);
862} 929}
863 930
@@ -1274,33 +1341,12 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1274 gfs2_glock_put(gl); 1341 gfs2_glock_put(gl);
1275} 1342}
1276 1343
1277/**
1278 * demote_ok - Check to see if it's ok to unlock a glock
1279 * @gl: the glock
1280 *
1281 * Returns: 1 if it's ok
1282 */
1283
1284static int demote_ok(const struct gfs2_glock *gl)
1285{
1286 const struct gfs2_glock_operations *glops = gl->gl_ops;
1287
1288 if (gl->gl_state == LM_ST_UNLOCKED)
1289 return 0;
1290 if (!list_empty(&gl->gl_holders))
1291 return 0;
1292 if (glops->go_demote_ok)
1293 return glops->go_demote_ok(gl);
1294 return 1;
1295}
1296
1297 1344
1298static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) 1345static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1299{ 1346{
1300 struct gfs2_glock *gl; 1347 struct gfs2_glock *gl;
1301 int may_demote; 1348 int may_demote;
1302 int nr_skipped = 0; 1349 int nr_skipped = 0;
1303 int got_ref = 0;
1304 LIST_HEAD(skipped); 1350 LIST_HEAD(skipped);
1305 1351
1306 if (nr == 0) 1352 if (nr == 0)
@@ -1315,37 +1361,29 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1315 list_del_init(&gl->gl_lru); 1361 list_del_init(&gl->gl_lru);
1316 atomic_dec(&lru_count); 1362 atomic_dec(&lru_count);
1317 1363
1364 /* Check if glock is about to be freed */
1365 if (atomic_read(&gl->gl_ref) == 0)
1366 continue;
1367
1318 /* Test for being demotable */ 1368 /* Test for being demotable */
1319 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1320 gfs2_glock_hold(gl); 1370 gfs2_glock_hold(gl);
1321 got_ref = 1;
1322 spin_unlock(&lru_lock); 1371 spin_unlock(&lru_lock);
1323 spin_lock(&gl->gl_spin); 1372 spin_lock(&gl->gl_spin);
1324 may_demote = demote_ok(gl); 1373 may_demote = demote_ok(gl);
1325 spin_unlock(&gl->gl_spin);
1326 clear_bit(GLF_LOCK, &gl->gl_flags);
1327 if (may_demote) { 1374 if (may_demote) {
1328 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1329 nr--; 1376 nr--;
1330 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1331 gfs2_glock_put(gl);
1332 got_ref = 0;
1333 } 1377 }
1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1334 spin_lock(&lru_lock); 1382 spin_lock(&lru_lock);
1335 if (may_demote) 1383 continue;
1336 continue;
1337 }
1338 if (list_empty(&gl->gl_lru) &&
1339 (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
1340 nr_skipped++;
1341 list_add(&gl->gl_lru, &skipped);
1342 }
1343 if (got_ref) {
1344 spin_unlock(&lru_lock);
1345 gfs2_glock_put(gl);
1346 spin_lock(&lru_lock);
1347 got_ref = 0;
1348 } 1384 }
1385 nr_skipped++;
1386 list_add(&gl->gl_lru, &skipped);
1349 } 1387 }
1350 list_splice(&skipped, &lru_list); 1388 list_splice(&skipped, &lru_list);
1351 atomic_add(nr_skipped, &lru_count); 1389 atomic_add(nr_skipped, &lru_count);
@@ -1727,6 +1765,11 @@ int __init gfs2_glock_init(void)
1727 glock_workqueue = create_workqueue("glock_workqueue"); 1765 glock_workqueue = create_workqueue("glock_workqueue");
1728 if (IS_ERR(glock_workqueue)) 1766 if (IS_ERR(glock_workqueue))
1729 return PTR_ERR(glock_workqueue); 1767 return PTR_ERR(glock_workqueue);
1768 gfs2_delete_workqueue = create_workqueue("delete_workqueue");
1769 if (IS_ERR(gfs2_delete_workqueue)) {
1770 destroy_workqueue(glock_workqueue);
1771 return PTR_ERR(gfs2_delete_workqueue);
1772 }
1730 1773
1731 register_shrinker(&glock_shrinker); 1774 register_shrinker(&glock_shrinker);
1732 1775
@@ -1737,6 +1780,7 @@ void gfs2_glock_exit(void)
1737{ 1780{
1738 unregister_shrinker(&glock_shrinker); 1781 unregister_shrinker(&glock_shrinker);
1739 destroy_workqueue(glock_workqueue); 1782 destroy_workqueue(glock_workqueue);
1783 destroy_workqueue(gfs2_delete_workqueue);
1740} 1784}
1741 1785
1742static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1786static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index a602a28f6f08..c609894ec0d0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -143,6 +143,7 @@ struct lm_lockops {
143 143
144#define GLR_TRYFAILED 13 144#define GLR_TRYFAILED 13
145 145
146extern struct workqueue_struct *gfs2_delete_workqueue;
146static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) 147static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
147{ 148{
148 struct gfs2_holder *gh; 149 struct gfs2_holder *gh;
@@ -191,6 +192,8 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
191int gfs2_glock_get(struct gfs2_sbd *sdp, 192int gfs2_glock_get(struct gfs2_sbd *sdp,
192 u64 number, const struct gfs2_glock_operations *glops, 193 u64 number, const struct gfs2_glock_operations *glops,
193 int create, struct gfs2_glock **glp); 194 int create, struct gfs2_glock **glp);
195void gfs2_glock_hold(struct gfs2_glock *gl);
196void gfs2_glock_put_nolock(struct gfs2_glock *gl);
194int gfs2_glock_put(struct gfs2_glock *gl); 197int gfs2_glock_put(struct gfs2_glock *gl);
195void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 198void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
196 struct gfs2_holder *gh); 199 struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d5e4ab155ca0..6985eef06c39 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -323,6 +323,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
323 323
324 if (gl->gl_state != LM_ST_UNLOCKED && 324 if (gl->gl_state != LM_ST_UNLOCKED &&
325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 325 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
326 flush_workqueue(gfs2_delete_workqueue);
326 gfs2_meta_syncfs(sdp); 327 gfs2_meta_syncfs(sdp);
327 gfs2_log_shutdown(sdp); 328 gfs2_log_shutdown(sdp);
328 } 329 }
@@ -372,6 +373,25 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
372 return 0; 373 return 0;
373} 374}
374 375
376/**
377 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
378 * @gl: the glock
379 *
380 * gl_spin lock is held while calling this
381 */
382static void iopen_go_callback(struct gfs2_glock *gl)
383{
384 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
385
386 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
387 gl->gl_state == LM_ST_SHARED &&
388 ip && test_bit(GIF_USER, &ip->i_flags)) {
389 gfs2_glock_hold(gl);
390 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
391 gfs2_glock_put_nolock(gl);
392 }
393}
394
375const struct gfs2_glock_operations gfs2_meta_glops = { 395const struct gfs2_glock_operations gfs2_meta_glops = {
376 .go_type = LM_TYPE_META, 396 .go_type = LM_TYPE_META,
377}; 397};
@@ -406,6 +426,7 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
406 426
407const struct gfs2_glock_operations gfs2_iopen_glops = { 427const struct gfs2_glock_operations gfs2_iopen_glops = {
408 .go_type = LM_TYPE_IOPEN, 428 .go_type = LM_TYPE_IOPEN,
429 .go_callback = iopen_go_callback,
409}; 430};
410 431
411const struct gfs2_glock_operations gfs2_flock_glops = { 432const struct gfs2_glock_operations gfs2_flock_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 225347fbff3c..61801ada36f0 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -159,6 +159,7 @@ struct gfs2_glock_operations {
159 int (*go_lock) (struct gfs2_holder *gh); 159 int (*go_lock) (struct gfs2_holder *gh);
160 void (*go_unlock) (struct gfs2_holder *gh); 160 void (*go_unlock) (struct gfs2_holder *gh);
161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); 161 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
162 void (*go_callback) (struct gfs2_glock *gl);
162 const int go_type; 163 const int go_type;
163 const unsigned long go_min_hold_time; 164 const unsigned long go_min_hold_time;
164}; 165};
@@ -228,6 +229,7 @@ struct gfs2_glock {
228 struct list_head gl_ail_list; 229 struct list_head gl_ail_list;
229 atomic_t gl_ail_count; 230 atomic_t gl_ail_count;
230 struct delayed_work gl_work; 231 struct delayed_work gl_work;
232 struct work_struct gl_delete;
231}; 233};
232 234
233#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ 235#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index daa4ae341a29..fba795798d3a 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -285,27 +285,19 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
285 } 285 }
286 286
287 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; 287 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
288 if (count[1] + count[2] != tmp) { 288 if (count[1] != tmp) {
289 if (gfs2_consist_rgrpd(rgd)) 289 if (gfs2_consist_rgrpd(rgd))
290 fs_err(sdp, "used data mismatch: %u != %u\n", 290 fs_err(sdp, "used data mismatch: %u != %u\n",
291 count[1], tmp); 291 count[1], tmp);
292 return; 292 return;
293 } 293 }
294 294
295 if (count[3] != rgd->rd_dinodes) { 295 if (count[2] + count[3] != rgd->rd_dinodes) {
296 if (gfs2_consist_rgrpd(rgd)) 296 if (gfs2_consist_rgrpd(rgd))
297 fs_err(sdp, "used metadata mismatch: %u != %u\n", 297 fs_err(sdp, "used metadata mismatch: %u != %u\n",
298 count[3], rgd->rd_dinodes); 298 count[2] + count[3], rgd->rd_dinodes);
299 return; 299 return;
300 } 300 }
301
302 if (count[2] > count[3]) {
303 if (gfs2_consist_rgrpd(rgd))
304 fs_err(sdp, "unlinked inodes > inodes: %u\n",
305 count[2]);
306 return;
307 }
308
309} 301}
310 302
311static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) 303static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
@@ -961,7 +953,8 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
961 * Returns: The inode, if one has been found 953 * Returns: The inode, if one has been found
962 */ 954 */
963 955
964static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked) 956static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
957 u64 skip)
965{ 958{
966 struct inode *inode; 959 struct inode *inode;
967 u32 goal = 0, block; 960 u32 goal = 0, block;
@@ -985,6 +978,8 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
985 goal++; 978 goal++;
986 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) 979 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
987 continue; 980 continue;
981 if (no_addr == skip)
982 continue;
988 *last_unlinked = no_addr; 983 *last_unlinked = no_addr;
989 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN, 984 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
990 no_addr, -1, 1); 985 no_addr, -1, 1);
@@ -1104,7 +1099,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1104 if (try_rgrp_fit(rgd, al)) 1099 if (try_rgrp_fit(rgd, al))
1105 goto out; 1100 goto out;
1106 if (rgd->rd_flags & GFS2_RDF_CHECK) 1101 if (rgd->rd_flags & GFS2_RDF_CHECK)
1107 inode = try_rgrp_unlink(rgd, last_unlinked); 1102 inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1108 if (!rg_locked) 1103 if (!rg_locked)
1109 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1104 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1110 if (inode) 1105 if (inode)
@@ -1138,7 +1133,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1138 if (try_rgrp_fit(rgd, al)) 1133 if (try_rgrp_fit(rgd, al))
1139 goto out; 1134 goto out;
1140 if (rgd->rd_flags & GFS2_RDF_CHECK) 1135 if (rgd->rd_flags & GFS2_RDF_CHECK)
1141 inode = try_rgrp_unlink(rgd, last_unlinked); 1136 inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1142 if (!rg_locked) 1137 if (!rg_locked)
1143 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1138 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1144 if (inode) 1139 if (inode)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0a6801336470..f522bb017973 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -353,7 +353,7 @@ fail:
353 return error; 353 return error;
354} 354}
355 355
356static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 356void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
357{ 357{
358 const struct gfs2_statfs_change *str = buf; 358 const struct gfs2_statfs_change *str = buf;
359 359
@@ -441,6 +441,29 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
441 brelse(l_bh); 441 brelse(l_bh);
442} 442}
443 443
444void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
445 struct buffer_head *l_bh)
446{
447 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
448 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
449 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
450 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
451
452 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
453
454 spin_lock(&sdp->sd_statfs_spin);
455 m_sc->sc_total += l_sc->sc_total;
456 m_sc->sc_free += l_sc->sc_free;
457 m_sc->sc_dinodes += l_sc->sc_dinodes;
458 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
459 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
460 0, sizeof(struct gfs2_statfs_change));
461 spin_unlock(&sdp->sd_statfs_spin);
462
463 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
464 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
465}
466
444int gfs2_statfs_sync(struct gfs2_sbd *sdp) 467int gfs2_statfs_sync(struct gfs2_sbd *sdp)
445{ 468{
446 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 469 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -477,19 +500,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
477 if (error) 500 if (error)
478 goto out_bh2; 501 goto out_bh2;
479 502
480 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); 503 update_statfs(sdp, m_bh, l_bh);
481
482 spin_lock(&sdp->sd_statfs_spin);
483 m_sc->sc_total += l_sc->sc_total;
484 m_sc->sc_free += l_sc->sc_free;
485 m_sc->sc_dinodes += l_sc->sc_dinodes;
486 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
487 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
488 0, sizeof(struct gfs2_statfs_change));
489 spin_unlock(&sdp->sd_statfs_spin);
490
491 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
492 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
493 504
494 gfs2_trans_end(sdp); 505 gfs2_trans_end(sdp);
495 506
@@ -680,6 +691,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
680 struct gfs2_holder t_gh; 691 struct gfs2_holder t_gh;
681 int error; 692 int error;
682 693
694 flush_workqueue(gfs2_delete_workqueue);
683 gfs2_quota_sync(sdp); 695 gfs2_quota_sync(sdp);
684 gfs2_statfs_sync(sdp); 696 gfs2_statfs_sync(sdp);
685 697
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index b56413e3e40d..22e0417ed996 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -40,6 +40,10 @@ extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
40extern int gfs2_statfs_init(struct gfs2_sbd *sdp); 40extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
41extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 41extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
42 s64 dinodes); 42 s64 dinodes);
43extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
44 const void *buf);
45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh);
43extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
44 48
45extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); 49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 23419dc3027b..a7cbfbd340c7 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -386,16 +386,16 @@ static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
386#define GDLM_ATTR(_name,_mode,_show,_store) \ 386#define GDLM_ATTR(_name,_mode,_show,_store) \
387static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 387static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
388 388
389GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 389GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
390GDLM_ATTR(block, 0644, block_show, block_store); 390GDLM_ATTR(block, 0644, block_show, block_store);
391GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 391GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
392GDLM_ATTR(id, 0444, lkid_show, NULL); 392GDLM_ATTR(id, 0444, lkid_show, NULL);
393GDLM_ATTR(jid, 0444, jid_show, NULL); 393GDLM_ATTR(jid, 0444, jid_show, NULL);
394GDLM_ATTR(first, 0444, lkfirst_show, NULL); 394GDLM_ATTR(first, 0444, lkfirst_show, NULL);
395GDLM_ATTR(first_done, 0444, first_done_show, NULL); 395GDLM_ATTR(first_done, 0444, first_done_show, NULL);
396GDLM_ATTR(recover, 0200, NULL, recover_store); 396GDLM_ATTR(recover, 0600, NULL, recover_store);
397GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 397GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
398GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 398GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
399 399
400static struct attribute *lock_module_attrs[] = { 400static struct attribute *lock_module_attrs[] = {
401 &gdlm_attr_proto_name.attr, 401 &gdlm_attr_proto_name.attr,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 941c8425c10b..cb88dac8ccaa 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -935,26 +935,28 @@ static int can_do_hugetlb_shm(void)
935 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); 935 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
936} 936}
937 937
938struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag) 938struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
939 struct user_struct **user)
939{ 940{
940 int error = -ENOMEM; 941 int error = -ENOMEM;
941 int unlock_shm = 0;
942 struct file *file; 942 struct file *file;
943 struct inode *inode; 943 struct inode *inode;
944 struct dentry *dentry, *root; 944 struct dentry *dentry, *root;
945 struct qstr quick_string; 945 struct qstr quick_string;
946 struct user_struct *user = current_user();
947 946
947 *user = NULL;
948 if (!hugetlbfs_vfsmount) 948 if (!hugetlbfs_vfsmount)
949 return ERR_PTR(-ENOENT); 949 return ERR_PTR(-ENOENT);
950 950
951 if (!can_do_hugetlb_shm()) { 951 if (!can_do_hugetlb_shm()) {
952 if (user_shm_lock(size, user)) { 952 *user = current_user();
953 unlock_shm = 1; 953 if (user_shm_lock(size, *user)) {
954 WARN_ONCE(1, 954 WARN_ONCE(1,
955 "Using mlock ulimits for SHM_HUGETLB deprecated\n"); 955 "Using mlock ulimits for SHM_HUGETLB deprecated\n");
956 } else 956 } else {
957 *user = NULL;
957 return ERR_PTR(-EPERM); 958 return ERR_PTR(-EPERM);
959 }
958 } 960 }
959 961
960 root = hugetlbfs_vfsmount->mnt_root; 962 root = hugetlbfs_vfsmount->mnt_root;
@@ -996,8 +998,10 @@ out_inode:
996out_dentry: 998out_dentry:
997 dput(dentry); 999 dput(dentry);
998out_shm_unlock: 1000out_shm_unlock:
999 if (unlock_shm) 1001 if (*user) {
1000 user_shm_unlock(size, user); 1002 user_shm_unlock(size, *user);
1003 *user = NULL;
1004 }
1001 return ERR_PTR(error); 1005 return ERR_PTR(error);
1002} 1006}
1003 1007
diff --git a/fs/inode.c b/fs/inode.c
index 901bad1e5f12..ae7b67e48661 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -120,12 +120,11 @@ static void wake_up_inode(struct inode *inode)
120 * These are initializations that need to be done on every inode 120 * These are initializations that need to be done on every inode
121 * allocation as the fields are not initialised by slab allocation. 121 * allocation as the fields are not initialised by slab allocation.
122 */ 122 */
123struct inode *inode_init_always(struct super_block *sb, struct inode *inode) 123int inode_init_always(struct super_block *sb, struct inode *inode)
124{ 124{
125 static const struct address_space_operations empty_aops; 125 static const struct address_space_operations empty_aops;
126 static struct inode_operations empty_iops; 126 static struct inode_operations empty_iops;
127 static const struct file_operations empty_fops; 127 static const struct file_operations empty_fops;
128
129 struct address_space *const mapping = &inode->i_data; 128 struct address_space *const mapping = &inode->i_data;
130 129
131 inode->i_sb = sb; 130 inode->i_sb = sb;
@@ -152,7 +151,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
152 inode->dirtied_when = 0; 151 inode->dirtied_when = 0;
153 152
154 if (security_inode_alloc(inode)) 153 if (security_inode_alloc(inode))
155 goto out_free_inode; 154 goto out;
156 155
157 /* allocate and initialize an i_integrity */ 156 /* allocate and initialize an i_integrity */
158 if (ima_inode_alloc(inode)) 157 if (ima_inode_alloc(inode))
@@ -198,16 +197,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
198 inode->i_fsnotify_mask = 0; 197 inode->i_fsnotify_mask = 0;
199#endif 198#endif
200 199
201 return inode; 200 return 0;
202 201
203out_free_security: 202out_free_security:
204 security_inode_free(inode); 203 security_inode_free(inode);
205out_free_inode: 204out:
206 if (inode->i_sb->s_op->destroy_inode) 205 return -ENOMEM;
207 inode->i_sb->s_op->destroy_inode(inode);
208 else
209 kmem_cache_free(inode_cachep, (inode));
210 return NULL;
211} 206}
212EXPORT_SYMBOL(inode_init_always); 207EXPORT_SYMBOL(inode_init_always);
213 208
@@ -220,12 +215,21 @@ static struct inode *alloc_inode(struct super_block *sb)
220 else 215 else
221 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 216 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
222 217
223 if (inode) 218 if (!inode)
224 return inode_init_always(sb, inode); 219 return NULL;
225 return NULL; 220
221 if (unlikely(inode_init_always(sb, inode))) {
222 if (inode->i_sb->s_op->destroy_inode)
223 inode->i_sb->s_op->destroy_inode(inode);
224 else
225 kmem_cache_free(inode_cachep, inode);
226 return NULL;
227 }
228
229 return inode;
226} 230}
227 231
228void destroy_inode(struct inode *inode) 232void __destroy_inode(struct inode *inode)
229{ 233{
230 BUG_ON(inode_has_buffers(inode)); 234 BUG_ON(inode_has_buffers(inode));
231 ima_inode_free(inode); 235 ima_inode_free(inode);
@@ -237,13 +241,17 @@ void destroy_inode(struct inode *inode)
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 241 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl); 242 posix_acl_release(inode->i_default_acl);
239#endif 243#endif
244}
245EXPORT_SYMBOL(__destroy_inode);
246
247void destroy_inode(struct inode *inode)
248{
249 __destroy_inode(inode);
240 if (inode->i_sb->s_op->destroy_inode) 250 if (inode->i_sb->s_op->destroy_inode)
241 inode->i_sb->s_op->destroy_inode(inode); 251 inode->i_sb->s_op->destroy_inode(inode);
242 else 252 else
243 kmem_cache_free(inode_cachep, (inode)); 253 kmem_cache_free(inode_cachep, (inode));
244} 254}
245EXPORT_SYMBOL(destroy_inode);
246
247 255
248/* 256/*
249 * These are initializations that only need to be done 257 * These are initializations that only need to be done
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 5edc2bf20581..23c947539864 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -99,7 +99,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
99 kunmap(pg); 99 kunmap(pg);
100 100
101 D2(printk(KERN_DEBUG "readpage finished\n")); 101 D2(printk(KERN_DEBUG "readpage finished\n"));
102 return 0; 102 return ret;
103} 103}
104 104
105int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) 105int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
diff --git a/fs/libfs.c b/fs/libfs.c
index ddfa89948c3f..dcec3d3ea64f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -217,7 +217,7 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name,
217 return PTR_ERR(s); 217 return PTR_ERR(s);
218 218
219 s->s_flags = MS_NOUSER; 219 s->s_flags = MS_NOUSER;
220 s->s_maxbytes = ~0ULL; 220 s->s_maxbytes = MAX_LFS_FILESIZE;
221 s->s_blocksize = PAGE_SIZE; 221 s->s_blocksize = PAGE_SIZE;
222 s->s_blocksize_bits = PAGE_SHIFT; 222 s->s_blocksize_bits = PAGE_SHIFT;
223 s->s_magic = magic; 223 s->s_magic = magic;
diff --git a/fs/namespace.c b/fs/namespace.c
index 277c28a63ead..7230787d18b0 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -316,7 +316,8 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
316 */ 316 */
317int mnt_want_write_file(struct file *file) 317int mnt_want_write_file(struct file *file)
318{ 318{
319 if (!(file->f_mode & FMODE_WRITE)) 319 struct inode *inode = file->f_dentry->d_inode;
320 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
320 return mnt_want_write(file->f_path.mnt); 321 return mnt_want_write(file->f_path.mnt);
321 else 322 else
322 return mnt_clone_write(file->f_path.mnt); 323 return mnt_clone_write(file->f_path.mnt);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 489fc01a3204..e4e089a8f294 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -255,7 +255,7 @@ static void nfs_direct_read_release(void *calldata)
255 255
256 if (put_dreq(dreq)) 256 if (put_dreq(dreq))
257 nfs_direct_complete(dreq); 257 nfs_direct_complete(dreq);
258 nfs_readdata_release(calldata); 258 nfs_readdata_free(data);
259} 259}
260 260
261static const struct rpc_call_ops nfs_read_direct_ops = { 261static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -314,14 +314,14 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
314 data->npages, 1, 0, data->pagevec, NULL); 314 data->npages, 1, 0, data->pagevec, NULL);
315 up_read(&current->mm->mmap_sem); 315 up_read(&current->mm->mmap_sem);
316 if (result < 0) { 316 if (result < 0) {
317 nfs_readdata_release(data); 317 nfs_readdata_free(data);
318 break; 318 break;
319 } 319 }
320 if ((unsigned)result < data->npages) { 320 if ((unsigned)result < data->npages) {
321 bytes = result * PAGE_SIZE; 321 bytes = result * PAGE_SIZE;
322 if (bytes <= pgbase) { 322 if (bytes <= pgbase) {
323 nfs_direct_release_pages(data->pagevec, result); 323 nfs_direct_release_pages(data->pagevec, result);
324 nfs_readdata_release(data); 324 nfs_readdata_free(data);
325 break; 325 break;
326 } 326 }
327 bytes -= pgbase; 327 bytes -= pgbase;
@@ -334,7 +334,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
334 data->inode = inode; 334 data->inode = inode;
335 data->cred = msg.rpc_cred; 335 data->cred = msg.rpc_cred;
336 data->args.fh = NFS_FH(inode); 336 data->args.fh = NFS_FH(inode);
337 data->args.context = get_nfs_open_context(ctx); 337 data->args.context = ctx;
338 data->args.offset = pos; 338 data->args.offset = pos;
339 data->args.pgbase = pgbase; 339 data->args.pgbase = pgbase;
340 data->args.pages = data->pagevec; 340 data->args.pages = data->pagevec;
@@ -441,7 +441,7 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
441 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); 441 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
442 list_del(&data->pages); 442 list_del(&data->pages);
443 nfs_direct_release_pages(data->pagevec, data->npages); 443 nfs_direct_release_pages(data->pagevec, data->npages);
444 nfs_writedata_release(data); 444 nfs_writedata_free(data);
445 } 445 }
446} 446}
447 447
@@ -534,7 +534,7 @@ static void nfs_direct_commit_release(void *calldata)
534 534
535 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status); 535 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
536 nfs_direct_write_complete(dreq, data->inode); 536 nfs_direct_write_complete(dreq, data->inode);
537 nfs_commitdata_release(calldata); 537 nfs_commit_free(data);
538} 538}
539 539
540static const struct rpc_call_ops nfs_commit_direct_ops = { 540static const struct rpc_call_ops nfs_commit_direct_ops = {
@@ -570,7 +570,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
570 data->args.fh = NFS_FH(data->inode); 570 data->args.fh = NFS_FH(data->inode);
571 data->args.offset = 0; 571 data->args.offset = 0;
572 data->args.count = 0; 572 data->args.count = 0;
573 data->args.context = get_nfs_open_context(dreq->ctx); 573 data->args.context = dreq->ctx;
574 data->res.count = 0; 574 data->res.count = 0;
575 data->res.fattr = &data->fattr; 575 data->res.fattr = &data->fattr;
576 data->res.verf = &data->verf; 576 data->res.verf = &data->verf;
@@ -734,14 +734,14 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
734 data->npages, 0, 0, data->pagevec, NULL); 734 data->npages, 0, 0, data->pagevec, NULL);
735 up_read(&current->mm->mmap_sem); 735 up_read(&current->mm->mmap_sem);
736 if (result < 0) { 736 if (result < 0) {
737 nfs_writedata_release(data); 737 nfs_writedata_free(data);
738 break; 738 break;
739 } 739 }
740 if ((unsigned)result < data->npages) { 740 if ((unsigned)result < data->npages) {
741 bytes = result * PAGE_SIZE; 741 bytes = result * PAGE_SIZE;
742 if (bytes <= pgbase) { 742 if (bytes <= pgbase) {
743 nfs_direct_release_pages(data->pagevec, result); 743 nfs_direct_release_pages(data->pagevec, result);
744 nfs_writedata_release(data); 744 nfs_writedata_free(data);
745 break; 745 break;
746 } 746 }
747 bytes -= pgbase; 747 bytes -= pgbase;
@@ -756,7 +756,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
756 data->inode = inode; 756 data->inode = inode;
757 data->cred = msg.rpc_cred; 757 data->cred = msg.rpc_cred;
758 data->args.fh = NFS_FH(inode); 758 data->args.fh = NFS_FH(inode);
759 data->args.context = get_nfs_open_context(ctx); 759 data->args.context = ctx;
760 data->args.offset = pos; 760 data->args.offset = pos;
761 data->args.pgbase = pgbase; 761 data->args.pgbase = pgbase;
762 data->args.pages = data->pagevec; 762 data->args.pages = data->pagevec;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 65ca8c18476f..1434080aefeb 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1250,8 +1250,8 @@ static void nfs4_state_manager(struct nfs_client *clp)
1250 continue; 1250 continue;
1251 } 1251 }
1252 /* Initialize or reset the session */ 1252 /* Initialize or reset the session */
1253 if (nfs4_has_session(clp) && 1253 if (test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)
1254 test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) { 1254 && nfs4_has_session(clp)) {
1255 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) 1255 if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
1256 status = nfs4_initialize_session(clp); 1256 status = nfs4_initialize_session(clp);
1257 else 1257 else
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 73ea5e8d66ce..12c9e66d3f1d 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -60,17 +60,15 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
60 return p; 60 return p;
61} 61}
62 62
63static void nfs_readdata_free(struct nfs_read_data *p) 63void nfs_readdata_free(struct nfs_read_data *p)
64{ 64{
65 if (p && (p->pagevec != &p->page_array[0])) 65 if (p && (p->pagevec != &p->page_array[0]))
66 kfree(p->pagevec); 66 kfree(p->pagevec);
67 mempool_free(p, nfs_rdata_mempool); 67 mempool_free(p, nfs_rdata_mempool);
68} 68}
69 69
70void nfs_readdata_release(void *data) 70static void nfs_readdata_release(struct nfs_read_data *rdata)
71{ 71{
72 struct nfs_read_data *rdata = data;
73
74 put_nfs_open_context(rdata->args.context); 72 put_nfs_open_context(rdata->args.context);
75 nfs_readdata_free(rdata); 73 nfs_readdata_free(rdata);
76} 74}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 0a0a2ff767c3..a34fae21fe10 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -87,17 +87,15 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
87 return p; 87 return p;
88} 88}
89 89
90static void nfs_writedata_free(struct nfs_write_data *p) 90void nfs_writedata_free(struct nfs_write_data *p)
91{ 91{
92 if (p && (p->pagevec != &p->page_array[0])) 92 if (p && (p->pagevec != &p->page_array[0]))
93 kfree(p->pagevec); 93 kfree(p->pagevec);
94 mempool_free(p, nfs_wdata_mempool); 94 mempool_free(p, nfs_wdata_mempool);
95} 95}
96 96
97void nfs_writedata_release(void *data) 97static void nfs_writedata_release(struct nfs_write_data *wdata)
98{ 98{
99 struct nfs_write_data *wdata = data;
100
101 put_nfs_open_context(wdata->args.context); 99 put_nfs_open_context(wdata->args.context);
102 nfs_writedata_free(wdata); 100 nfs_writedata_free(wdata);
103} 101}
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 3d3ddb3f5177..2dfd47714ae5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -412,8 +412,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
412 return 0; /* Do not request flush for shadow page cache */ 412 return 0; /* Do not request flush for shadow page cache */
413 if (!sb) { 413 if (!sb) {
414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs); 414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs);
415 if (!writer) 415 if (!writer) {
416 nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
416 return -EROFS; 417 return -EROFS;
418 }
417 sb = writer->s_super; 419 sb = writer->s_super;
418 } 420 }
419 421
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 8b5e4778cf28..51ff3d0a4ee2 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1859,12 +1859,26 @@ static void nilfs_end_page_io(struct page *page, int err)
1859 if (!page) 1859 if (!page)
1860 return; 1860 return;
1861 1861
1862 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) 1862 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1863 /* 1863 /*
1864 * For b-tree node pages, this function may be called twice 1864 * For b-tree node pages, this function may be called twice
1865 * or more because they might be split in a segment. 1865 * or more because they might be split in a segment.
1866 */ 1866 */
1867 if (PageDirty(page)) {
1868 /*
1869 * For pages holding split b-tree node buffers, dirty
1870 * flag on the buffers may be cleared discretely.
1871 * In that case, the page is once redirtied for
1872 * remaining buffers, and it must be cancelled if
1873 * all the buffers get cleaned later.
1874 */
1875 lock_page(page);
1876 if (nilfs_page_buffers_clean(page))
1877 __nilfs_clear_page_dirty(page);
1878 unlock_page(page);
1879 }
1867 return; 1880 return;
1881 }
1868 1882
1869 __nilfs_end_page_io(page, err); 1883 __nilfs_end_page_io(page, err);
1870} 1884}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8e2ec43b18f4..151964f0de4c 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -416,8 +416,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
416 if (unlikely(err)) 416 if (unlikely(err))
417 goto failed; 417 goto failed;
418 418
419 down_read(&nilfs->ns_segctor_sem);
419 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, 420 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
420 &bh_cp); 421 &bh_cp);
422 up_read(&nilfs->ns_segctor_sem);
421 if (unlikely(err)) { 423 if (unlikely(err)) {
422 if (err == -ENOENT || err == -EINVAL) { 424 if (err == -ENOENT || err == -EINVAL) {
423 printk(KERN_ERR 425 printk(KERN_ERR
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index e8adbffc626f..1b9caafb8662 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -253,7 +253,7 @@ nilfs_detach_writer(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
253 253
254static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi) 254static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi)
255{ 255{
256 if (!atomic_dec_and_test(&sbi->s_count)) 256 if (atomic_dec_and_test(&sbi->s_count))
257 kfree(sbi); 257 kfree(sbi);
258} 258}
259 259
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 47cd258fd24d..c9ee67b442e1 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -62,13 +62,14 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
62 event_priv->wd = wd; 62 event_priv->wd = wd;
63 63
64 ret = fsnotify_add_notify_event(group, event, fsn_event_priv); 64 ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
65 /* EEXIST is not an error */ 65 if (ret) {
66 if (ret == -EEXIST)
67 ret = 0;
68
69 /* did event_priv get attached? */
70 if (list_empty(&fsn_event_priv->event_list))
71 inotify_free_event_priv(fsn_event_priv); 66 inotify_free_event_priv(fsn_event_priv);
67 /* EEXIST says we tail matched, EOVERFLOW isn't something
68 * to report up the stack. */
69 if ((ret == -EEXIST) ||
70 (ret == -EOVERFLOW))
71 ret = 0;
72 }
72 73
73 /* 74 /*
74 * If we hold the entry until after the event is on the queue 75 * If we hold the entry until after the event is on the queue
@@ -104,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
104 return send; 105 return send;
105} 106}
106 107
108/*
109 * This is NEVER supposed to be called. Inotify marks should either have been
110 * removed from the idr when the watch was removed or in the
111 * fsnotify_destroy_mark_by_group() call when the inotify instance was being
112 * torn down. This is only called if the idr is about to be freed but there
113 * are still marks in it.
114 */
107static int idr_callback(int id, void *p, void *data) 115static int idr_callback(int id, void *p, void *data)
108{ 116{
109 BUG(); 117 struct fsnotify_mark_entry *entry;
118 struct inotify_inode_mark_entry *ientry;
119 static bool warned = false;
120
121 if (warned)
122 return 0;
123
124 warned = false;
125 entry = p;
126 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
127
128 WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
129 "idr. Probably leaking memory\n", id, p, data);
130
131 /*
132 * I'm taking the liberty of assuming that the mark in question is a
133 * valid address and I'm dereferencing it. This might help to figure
134 * out why we got here and the panic is no worse than the original
135 * BUG() that was here.
136 */
137 if (entry)
138 printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
139 entry->group, entry->inode, ientry->wd);
110 return 0; 140 return 0;
111} 141}
112 142
113static void inotify_free_group_priv(struct fsnotify_group *group) 143static void inotify_free_group_priv(struct fsnotify_group *group)
114{ 144{
115 /* ideally the idr is empty and we won't hit the BUG in teh callback */ 145 /* ideally the idr is empty and we won't hit the BUG in teh callback */
116 idr_for_each(&group->inotify_data.idr, idr_callback, NULL); 146 idr_for_each(&group->inotify_data.idr, idr_callback, group);
117 idr_remove_all(&group->inotify_data.idr); 147 idr_remove_all(&group->inotify_data.idr);
118 idr_destroy(&group->inotify_data.idr); 148 idr_destroy(&group->inotify_data.idr);
119} 149}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index f30d9bbc2e1b..dcd2040d330c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -47,9 +47,6 @@
47 47
48static struct vfsmount *inotify_mnt __read_mostly; 48static struct vfsmount *inotify_mnt __read_mostly;
49 49
50/* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
51static struct inotify_event nul_inotify_event;
52
53/* these are configurable via /proc/sys/fs/inotify/ */ 50/* these are configurable via /proc/sys/fs/inotify/ */
54static int inotify_max_user_instances __read_mostly; 51static int inotify_max_user_instances __read_mostly;
55static int inotify_max_queued_events __read_mostly; 52static int inotify_max_queued_events __read_mostly;
@@ -157,7 +154,8 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
157 154
158 event = fsnotify_peek_notify_event(group); 155 event = fsnotify_peek_notify_event(group);
159 156
160 event_size += roundup(event->name_len, event_size); 157 if (event->name_len)
158 event_size += roundup(event->name_len + 1, event_size);
161 159
162 if (event_size > count) 160 if (event_size > count)
163 return ERR_PTR(-EINVAL); 161 return ERR_PTR(-EINVAL);
@@ -183,7 +181,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
183 struct fsnotify_event_private_data *fsn_priv; 181 struct fsnotify_event_private_data *fsn_priv;
184 struct inotify_event_private_data *priv; 182 struct inotify_event_private_data *priv;
185 size_t event_size = sizeof(struct inotify_event); 183 size_t event_size = sizeof(struct inotify_event);
186 size_t name_len; 184 size_t name_len = 0;
187 185
188 /* we get the inotify watch descriptor from the event private data */ 186 /* we get the inotify watch descriptor from the event private data */
189 spin_lock(&event->lock); 187 spin_lock(&event->lock);
@@ -199,8 +197,12 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
199 inotify_free_event_priv(fsn_priv); 197 inotify_free_event_priv(fsn_priv);
200 } 198 }
201 199
202 /* round up event->name_len so it is a multiple of event_size */ 200 /*
203 name_len = roundup(event->name_len, event_size); 201 * round up event->name_len so it is a multiple of event_size
202 * plus an extra byte for the terminating '\0'.
203 */
204 if (event->name_len)
205 name_len = roundup(event->name_len + 1, event_size);
204 inotify_event.len = name_len; 206 inotify_event.len = name_len;
205 207
206 inotify_event.mask = inotify_mask_to_arg(event->mask); 208 inotify_event.mask = inotify_mask_to_arg(event->mask);
@@ -224,8 +226,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
224 return -EFAULT; 226 return -EFAULT;
225 buf += event->name_len; 227 buf += event->name_len;
226 228
227 /* fill userspace with 0's from nul_inotify_event */ 229 /* fill userspace with 0's */
228 if (copy_to_user(buf, &nul_inotify_event, len_to_zero)) 230 if (clear_user(buf, len_to_zero))
229 return -EFAULT; 231 return -EFAULT;
230 buf += len_to_zero; 232 buf += len_to_zero;
231 event_size += name_len; 233 event_size += name_len;
@@ -326,8 +328,9 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
326 list_for_each_entry(holder, &group->notification_list, event_list) { 328 list_for_each_entry(holder, &group->notification_list, event_list) {
327 event = holder->event; 329 event = holder->event;
328 send_len += sizeof(struct inotify_event); 330 send_len += sizeof(struct inotify_event);
329 send_len += roundup(event->name_len, 331 if (event->name_len)
330 sizeof(struct inotify_event)); 332 send_len += roundup(event->name_len + 1,
333 sizeof(struct inotify_event));
331 } 334 }
332 mutex_unlock(&group->notification_mutex); 335 mutex_unlock(&group->notification_mutex);
333 ret = put_user(send_len, (int __user *) p); 336 ret = put_user(send_len, (int __user *) p);
@@ -364,20 +367,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
364 return error; 367 return error;
365} 368}
366 369
370/*
371 * Remove the mark from the idr (if present) and drop the reference
372 * on the mark because it was in the idr.
373 */
367static void inotify_remove_from_idr(struct fsnotify_group *group, 374static void inotify_remove_from_idr(struct fsnotify_group *group,
368 struct inotify_inode_mark_entry *ientry) 375 struct inotify_inode_mark_entry *ientry)
369{ 376{
370 struct idr *idr; 377 struct idr *idr;
378 struct fsnotify_mark_entry *entry;
379 struct inotify_inode_mark_entry *found_ientry;
380 int wd;
371 381
372 spin_lock(&group->inotify_data.idr_lock); 382 spin_lock(&group->inotify_data.idr_lock);
373 idr = &group->inotify_data.idr; 383 idr = &group->inotify_data.idr;
374 idr_remove(idr, ientry->wd); 384 wd = ientry->wd;
375 spin_unlock(&group->inotify_data.idr_lock); 385
386 if (wd == -1)
387 goto out;
388
389 entry = idr_find(&group->inotify_data.idr, wd);
390 if (unlikely(!entry))
391 goto out;
392
393 found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
394 if (unlikely(found_ientry != ientry)) {
395 /* We found an entry in the idr with the right wd, but it's
396 * not the entry we were told to remove. eparis seriously
397 * fucked up somewhere. */
398 WARN_ON(1);
399 ientry->wd = -1;
400 goto out;
401 }
402
403 /* One ref for being in the idr, one ref held by the caller */
404 BUG_ON(atomic_read(&entry->refcnt) < 2);
405
406 idr_remove(idr, wd);
376 ientry->wd = -1; 407 ientry->wd = -1;
408
409 /* removed from the idr, drop that ref */
410 fsnotify_put_mark(entry);
411out:
412 spin_unlock(&group->inotify_data.idr_lock);
377} 413}
414
378/* 415/*
379 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the 416 * Send IN_IGNORED for this wd, remove this wd from the idr.
380 * internal reference help on the mark because it is in the idr.
381 */ 417 */
382void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, 418void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
383 struct fsnotify_group *group) 419 struct fsnotify_group *group)
@@ -386,6 +422,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
386 struct fsnotify_event *ignored_event; 422 struct fsnotify_event *ignored_event;
387 struct inotify_event_private_data *event_priv; 423 struct inotify_event_private_data *event_priv;
388 struct fsnotify_event_private_data *fsn_event_priv; 424 struct fsnotify_event_private_data *fsn_event_priv;
425 int ret;
389 426
390 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, 427 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
391 FSNOTIFY_EVENT_NONE, NULL, 0, 428 FSNOTIFY_EVENT_NONE, NULL, 0,
@@ -404,10 +441,8 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
404 fsn_event_priv->group = group; 441 fsn_event_priv->group = group;
405 event_priv->wd = ientry->wd; 442 event_priv->wd = ientry->wd;
406 443
407 fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); 444 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
408 445 if (ret)
409 /* did the private data get added? */
410 if (list_empty(&fsn_event_priv->event_list))
411 inotify_free_event_priv(fsn_event_priv); 446 inotify_free_event_priv(fsn_event_priv);
412 447
413skip_send_ignore: 448skip_send_ignore:
@@ -418,9 +453,6 @@ skip_send_ignore:
418 /* remove this entry from the idr */ 453 /* remove this entry from the idr */
419 inotify_remove_from_idr(group, ientry); 454 inotify_remove_from_idr(group, ientry);
420 455
421 /* removed from idr, drop that reference */
422 fsnotify_put_mark(entry);
423
424 atomic_dec(&group->inotify_data.user->inotify_watches); 456 atomic_dec(&group->inotify_data.user->inotify_watches);
425} 457}
426 458
@@ -432,80 +464,29 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry)
432 kmem_cache_free(inotify_inode_mark_cachep, ientry); 464 kmem_cache_free(inotify_inode_mark_cachep, ientry);
433} 465}
434 466
435static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 467static int inotify_update_existing_watch(struct fsnotify_group *group,
468 struct inode *inode,
469 u32 arg)
436{ 470{
437 struct fsnotify_mark_entry *entry = NULL; 471 struct fsnotify_mark_entry *entry;
438 struct inotify_inode_mark_entry *ientry; 472 struct inotify_inode_mark_entry *ientry;
439 struct inotify_inode_mark_entry *tmp_ientry;
440 int ret = 0;
441 int add = (arg & IN_MASK_ADD);
442 __u32 mask;
443 __u32 old_mask, new_mask; 473 __u32 old_mask, new_mask;
474 __u32 mask;
475 int add = (arg & IN_MASK_ADD);
476 int ret;
444 477
445 /* don't allow invalid bits: we don't want flags set */ 478 /* don't allow invalid bits: we don't want flags set */
446 mask = inotify_arg_to_mask(arg); 479 mask = inotify_arg_to_mask(arg);
447 if (unlikely(!mask)) 480 if (unlikely(!mask))
448 return -EINVAL; 481 return -EINVAL;
449 482
450 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
451 if (unlikely(!tmp_ientry))
452 return -ENOMEM;
453 /* we set the mask at the end after attaching it */
454 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
455 tmp_ientry->wd = -1;
456
457find_entry:
458 spin_lock(&inode->i_lock); 483 spin_lock(&inode->i_lock);
459 entry = fsnotify_find_mark_entry(group, inode); 484 entry = fsnotify_find_mark_entry(group, inode);
460 spin_unlock(&inode->i_lock); 485 spin_unlock(&inode->i_lock);
461 if (entry) { 486 if (!entry)
462 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 487 return -ENOENT;
463 } else {
464 ret = -ENOSPC;
465 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
466 goto out_err;
467retry:
468 ret = -ENOMEM;
469 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
470 goto out_err;
471
472 spin_lock(&group->inotify_data.idr_lock);
473 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
474 group->inotify_data.last_wd,
475 &tmp_ientry->wd);
476 spin_unlock(&group->inotify_data.idr_lock);
477 if (ret) {
478 if (ret == -EAGAIN)
479 goto retry;
480 goto out_err;
481 }
482 488
483 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); 489 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
484 if (ret) {
485 inotify_remove_from_idr(group, tmp_ientry);
486 if (ret == -EEXIST)
487 goto find_entry;
488 goto out_err;
489 }
490
491 /* tmp_ientry has been added to the inode, so we are all set up.
492 * now we just need to make sure tmp_ientry doesn't get freed and
493 * we need to set up entry and ientry so the generic code can
494 * do its thing. */
495 ientry = tmp_ientry;
496 entry = &ientry->fsn_entry;
497 tmp_ientry = NULL;
498
499 atomic_inc(&group->inotify_data.user->inotify_watches);
500
501 /* update the idr hint */
502 group->inotify_data.last_wd = ientry->wd;
503
504 /* we put the mark on the idr, take a reference */
505 fsnotify_get_mark(entry);
506 }
507
508 ret = ientry->wd;
509 490
510 spin_lock(&entry->lock); 491 spin_lock(&entry->lock);
511 492
@@ -537,18 +518,107 @@ retry:
537 fsnotify_recalc_group_mask(group); 518 fsnotify_recalc_group_mask(group);
538 } 519 }
539 520
540 /* this either matches fsnotify_find_mark_entry, or init_mark_entry 521 /* return the wd */
541 * depending on which path we took... */ 522 ret = ientry->wd;
523
524 /* match the get from fsnotify_find_mark_entry() */
542 fsnotify_put_mark(entry); 525 fsnotify_put_mark(entry);
543 526
527 return ret;
528}
529
530static int inotify_new_watch(struct fsnotify_group *group,
531 struct inode *inode,
532 u32 arg)
533{
534 struct inotify_inode_mark_entry *tmp_ientry;
535 __u32 mask;
536 int ret;
537
538 /* don't allow invalid bits: we don't want flags set */
539 mask = inotify_arg_to_mask(arg);
540 if (unlikely(!mask))
541 return -EINVAL;
542
543 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
544 if (unlikely(!tmp_ientry))
545 return -ENOMEM;
546
547 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
548 tmp_ientry->fsn_entry.mask = mask;
549 tmp_ientry->wd = -1;
550
551 ret = -ENOSPC;
552 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
553 goto out_err;
554retry:
555 ret = -ENOMEM;
556 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
557 goto out_err;
558
559 spin_lock(&group->inotify_data.idr_lock);
560 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
561 group->inotify_data.last_wd,
562 &tmp_ientry->wd);
563 spin_unlock(&group->inotify_data.idr_lock);
564 if (ret) {
565 /* idr was out of memory allocate and try again */
566 if (ret == -EAGAIN)
567 goto retry;
568 goto out_err;
569 }
570
571 /* we put the mark on the idr, take a reference */
572 fsnotify_get_mark(&tmp_ientry->fsn_entry);
573
574 /* we are on the idr, now get on the inode */
575 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
576 if (ret) {
577 /* we failed to get on the inode, get off the idr */
578 inotify_remove_from_idr(group, tmp_ientry);
579 goto out_err;
580 }
581
582 /* update the idr hint, who cares about races, it's just a hint */
583 group->inotify_data.last_wd = tmp_ientry->wd;
584
585 /* increment the number of watches the user has */
586 atomic_inc(&group->inotify_data.user->inotify_watches);
587
588 /* return the watch descriptor for this new entry */
589 ret = tmp_ientry->wd;
590
591 /* match the ref from fsnotify_init_markentry() */
592 fsnotify_put_mark(&tmp_ientry->fsn_entry);
593
594 /* if this mark added a new event update the group mask */
595 if (mask & ~group->mask)
596 fsnotify_recalc_group_mask(group);
597
544out_err: 598out_err:
545 /* could be an error, could be that we found an existing mark */ 599 if (ret < 0)
546 if (tmp_ientry) {
547 /* on the idr but didn't make it on the inode */
548 if (tmp_ientry->wd != -1)
549 inotify_remove_from_idr(group, tmp_ientry);
550 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); 600 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
551 } 601
602 return ret;
603}
604
605static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
606{
607 int ret = 0;
608
609retry:
610 /* try to update and existing watch with the new arg */
611 ret = inotify_update_existing_watch(group, inode, arg);
612 /* no mark present, try to add a new one */
613 if (ret == -ENOENT)
614 ret = inotify_new_watch(group, inode, arg);
615 /*
616 * inotify_new_watch could race with another thread which did an
617 * inotify_new_watch between the update_existing and the add watch
618 * here, go back and try to update an existing mark again.
619 */
620 if (ret == -EEXIST)
621 goto retry;
552 622
553 return ret; 623 return ret;
554} 624}
@@ -568,7 +638,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
568 638
569 spin_lock_init(&group->inotify_data.idr_lock); 639 spin_lock_init(&group->inotify_data.idr_lock);
570 idr_init(&group->inotify_data.idr); 640 idr_init(&group->inotify_data.idr);
571 group->inotify_data.last_wd = 0; 641 group->inotify_data.last_wd = 1;
572 group->inotify_data.user = user; 642 group->inotify_data.user = user;
573 group->inotify_data.fa = NULL; 643 group->inotify_data.fa = NULL;
574 644
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 521368574e97..3816d5750dd5 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -153,6 +153,10 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
153 return true; 153 return true;
154 break; 154 break;
155 case (FSNOTIFY_EVENT_NONE): 155 case (FSNOTIFY_EVENT_NONE):
156 if (old->mask & FS_Q_OVERFLOW)
157 return true;
158 else if (old->mask & FS_IN_IGNORED)
159 return false;
156 return false; 160 return false;
157 }; 161 };
158 } 162 }
@@ -171,9 +175,7 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even
171 struct list_head *list = &group->notification_list; 175 struct list_head *list = &group->notification_list;
172 struct fsnotify_event_holder *last_holder; 176 struct fsnotify_event_holder *last_holder;
173 struct fsnotify_event *last_event; 177 struct fsnotify_event *last_event;
174 178 int ret = 0;
175 /* easy to tell if priv was attached to the event */
176 INIT_LIST_HEAD(&priv->event_list);
177 179
178 /* 180 /*
179 * There is one fsnotify_event_holder embedded inside each fsnotify_event. 181 * There is one fsnotify_event_holder embedded inside each fsnotify_event.
@@ -194,6 +196,7 @@ alloc_holder:
194 196
195 if (group->q_len >= group->max_events) { 197 if (group->q_len >= group->max_events) {
196 event = &q_overflow_event; 198 event = &q_overflow_event;
199 ret = -EOVERFLOW;
197 /* sorry, no private data on the overflow event */ 200 /* sorry, no private data on the overflow event */
198 priv = NULL; 201 priv = NULL;
199 } 202 }
@@ -235,7 +238,7 @@ alloc_holder:
235 mutex_unlock(&group->notification_mutex); 238 mutex_unlock(&group->notification_mutex);
236 239
237 wake_up(&group->notification_waitq); 240 wake_up(&group->notification_waitq);
238 return 0; 241 return ret;
239} 242}
240 243
241/* 244/*
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 9edcde4974aa..ab513ddaeff2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1914,7 +1914,8 @@ static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
1914 * immediately to their right. 1914 * immediately to their right.
1915 */ 1915 */
1916 left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); 1916 left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
1917 if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) { 1917 if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) {
1918 BUG_ON(right_child_el->l_tree_depth);
1918 BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); 1919 BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
1919 left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); 1920 left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
1920 } 1921 }
@@ -2476,15 +2477,37 @@ out_ret_path:
2476 return ret; 2477 return ret;
2477} 2478}
2478 2479
2479static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, 2480static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
2480 struct ocfs2_path *path) 2481 int subtree_index, struct ocfs2_path *path)
2481{ 2482{
2482 int i, idx; 2483 int i, idx, ret;
2483 struct ocfs2_extent_rec *rec; 2484 struct ocfs2_extent_rec *rec;
2484 struct ocfs2_extent_list *el; 2485 struct ocfs2_extent_list *el;
2485 struct ocfs2_extent_block *eb; 2486 struct ocfs2_extent_block *eb;
2486 u32 range; 2487 u32 range;
2487 2488
2489 /*
2490 * In normal tree rotation process, we will never touch the
2491 * tree branch above subtree_index and ocfs2_extend_rotate_transaction
2492 * doesn't reserve the credits for them either.
2493 *
2494 * But we do have a special case here which will update the rightmost
2495 * records for all the bh in the path.
2496 * So we have to allocate extra credits and access them.
2497 */
2498 ret = ocfs2_extend_trans(handle,
2499 handle->h_buffer_credits + subtree_index);
2500 if (ret) {
2501 mlog_errno(ret);
2502 goto out;
2503 }
2504
2505 ret = ocfs2_journal_access_path(inode, handle, path);
2506 if (ret) {
2507 mlog_errno(ret);
2508 goto out;
2509 }
2510
2488 /* Path should always be rightmost. */ 2511 /* Path should always be rightmost. */
2489 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; 2512 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
2490 BUG_ON(eb->h_next_leaf_blk != 0ULL); 2513 BUG_ON(eb->h_next_leaf_blk != 0ULL);
@@ -2505,6 +2528,8 @@ static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
2505 2528
2506 ocfs2_journal_dirty(handle, path->p_node[i].bh); 2529 ocfs2_journal_dirty(handle, path->p_node[i].bh);
2507 } 2530 }
2531out:
2532 return ret;
2508} 2533}
2509 2534
2510static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, 2535static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
@@ -2717,7 +2742,12 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
2717 if (del_right_subtree) { 2742 if (del_right_subtree) {
2718 ocfs2_unlink_subtree(inode, handle, left_path, right_path, 2743 ocfs2_unlink_subtree(inode, handle, left_path, right_path,
2719 subtree_index, dealloc); 2744 subtree_index, dealloc);
2720 ocfs2_update_edge_lengths(inode, handle, left_path); 2745 ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
2746 left_path);
2747 if (ret) {
2748 mlog_errno(ret);
2749 goto out;
2750 }
2721 2751
2722 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; 2752 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
2723 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); 2753 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
@@ -3034,7 +3064,12 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
3034 3064
3035 ocfs2_unlink_subtree(inode, handle, left_path, path, 3065 ocfs2_unlink_subtree(inode, handle, left_path, path,
3036 subtree_index, dealloc); 3066 subtree_index, dealloc);
3037 ocfs2_update_edge_lengths(inode, handle, left_path); 3067 ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
3068 left_path);
3069 if (ret) {
3070 mlog_errno(ret);
3071 goto out;
3072 }
3038 3073
3039 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; 3074 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
3040 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); 3075 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
@@ -6816,7 +6851,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
6816 } 6851 }
6817 status = 0; 6852 status = 0;
6818bail: 6853bail:
6819 6854 brelse(last_eb_bh);
6820 mlog_exit(status); 6855 mlog_exit(status);
6821 return status; 6856 return status;
6822} 6857}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index b2c52b3a1484..b401654011a2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -193,6 +193,7 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock,
193 (unsigned long long)OCFS2_I(inode)->ip_blkno); 193 (unsigned long long)OCFS2_I(inode)->ip_blkno);
194 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); 194 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
195 dump_stack(); 195 dump_stack();
196 goto bail;
196 } 197 }
197 198
198 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 199 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
@@ -894,18 +895,17 @@ struct ocfs2_write_cluster_desc {
894 */ 895 */
895 unsigned c_new; 896 unsigned c_new;
896 unsigned c_unwritten; 897 unsigned c_unwritten;
898 unsigned c_needs_zero;
897}; 899};
898 900
899static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
900{
901 return d->c_new || d->c_unwritten;
902}
903
904struct ocfs2_write_ctxt { 901struct ocfs2_write_ctxt {
905 /* Logical cluster position / len of write */ 902 /* Logical cluster position / len of write */
906 u32 w_cpos; 903 u32 w_cpos;
907 u32 w_clen; 904 u32 w_clen;
908 905
906 /* First cluster allocated in a nonsparse extend */
907 u32 w_first_new_cpos;
908
909 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; 909 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
910 910
911 /* 911 /*
@@ -983,6 +983,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
983 return -ENOMEM; 983 return -ENOMEM;
984 984
985 wc->w_cpos = pos >> osb->s_clustersize_bits; 985 wc->w_cpos = pos >> osb->s_clustersize_bits;
986 wc->w_first_new_cpos = UINT_MAX;
986 cend = (pos + len - 1) >> osb->s_clustersize_bits; 987 cend = (pos + len - 1) >> osb->s_clustersize_bits;
987 wc->w_clen = cend - wc->w_cpos + 1; 988 wc->w_clen = cend - wc->w_cpos + 1;
988 get_bh(di_bh); 989 get_bh(di_bh);
@@ -1217,20 +1218,18 @@ out:
1217 */ 1218 */
1218static int ocfs2_write_cluster(struct address_space *mapping, 1219static int ocfs2_write_cluster(struct address_space *mapping,
1219 u32 phys, unsigned int unwritten, 1220 u32 phys, unsigned int unwritten,
1221 unsigned int should_zero,
1220 struct ocfs2_alloc_context *data_ac, 1222 struct ocfs2_alloc_context *data_ac,
1221 struct ocfs2_alloc_context *meta_ac, 1223 struct ocfs2_alloc_context *meta_ac,
1222 struct ocfs2_write_ctxt *wc, u32 cpos, 1224 struct ocfs2_write_ctxt *wc, u32 cpos,
1223 loff_t user_pos, unsigned user_len) 1225 loff_t user_pos, unsigned user_len)
1224{ 1226{
1225 int ret, i, new, should_zero = 0; 1227 int ret, i, new;
1226 u64 v_blkno, p_blkno; 1228 u64 v_blkno, p_blkno;
1227 struct inode *inode = mapping->host; 1229 struct inode *inode = mapping->host;
1228 struct ocfs2_extent_tree et; 1230 struct ocfs2_extent_tree et;
1229 1231
1230 new = phys == 0 ? 1 : 0; 1232 new = phys == 0 ? 1 : 0;
1231 if (new || unwritten)
1232 should_zero = 1;
1233
1234 if (new) { 1233 if (new) {
1235 u32 tmp_pos; 1234 u32 tmp_pos;
1236 1235
@@ -1301,7 +1300,7 @@ static int ocfs2_write_cluster(struct address_space *mapping,
1301 if (tmpret) { 1300 if (tmpret) {
1302 mlog_errno(tmpret); 1301 mlog_errno(tmpret);
1303 if (ret == 0) 1302 if (ret == 0)
1304 tmpret = ret; 1303 ret = tmpret;
1305 } 1304 }
1306 } 1305 }
1307 1306
@@ -1341,7 +1340,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1341 local_len = osb->s_clustersize - cluster_off; 1340 local_len = osb->s_clustersize - cluster_off;
1342 1341
1343 ret = ocfs2_write_cluster(mapping, desc->c_phys, 1342 ret = ocfs2_write_cluster(mapping, desc->c_phys,
1344 desc->c_unwritten, data_ac, meta_ac, 1343 desc->c_unwritten,
1344 desc->c_needs_zero,
1345 data_ac, meta_ac,
1345 wc, desc->c_cpos, pos, local_len); 1346 wc, desc->c_cpos, pos, local_len);
1346 if (ret) { 1347 if (ret) {
1347 mlog_errno(ret); 1348 mlog_errno(ret);
@@ -1391,14 +1392,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1391 * newly allocated cluster. 1392 * newly allocated cluster.
1392 */ 1393 */
1393 desc = &wc->w_desc[0]; 1394 desc = &wc->w_desc[0];
1394 if (ocfs2_should_zero_cluster(desc)) 1395 if (desc->c_needs_zero)
1395 ocfs2_figure_cluster_boundaries(osb, 1396 ocfs2_figure_cluster_boundaries(osb,
1396 desc->c_cpos, 1397 desc->c_cpos,
1397 &wc->w_target_from, 1398 &wc->w_target_from,
1398 NULL); 1399 NULL);
1399 1400
1400 desc = &wc->w_desc[wc->w_clen - 1]; 1401 desc = &wc->w_desc[wc->w_clen - 1];
1401 if (ocfs2_should_zero_cluster(desc)) 1402 if (desc->c_needs_zero)
1402 ocfs2_figure_cluster_boundaries(osb, 1403 ocfs2_figure_cluster_boundaries(osb,
1403 desc->c_cpos, 1404 desc->c_cpos,
1404 NULL, 1405 NULL,
@@ -1466,13 +1467,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
1466 phys++; 1467 phys++;
1467 } 1468 }
1468 1469
1470 /*
1471 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1472 * file that got extended. w_first_new_cpos tells us
1473 * where the newly allocated clusters are so we can
1474 * zero them.
1475 */
1476 if (desc->c_cpos >= wc->w_first_new_cpos) {
1477 BUG_ON(phys == 0);
1478 desc->c_needs_zero = 1;
1479 }
1480
1469 desc->c_phys = phys; 1481 desc->c_phys = phys;
1470 if (phys == 0) { 1482 if (phys == 0) {
1471 desc->c_new = 1; 1483 desc->c_new = 1;
1484 desc->c_needs_zero = 1;
1472 *clusters_to_alloc = *clusters_to_alloc + 1; 1485 *clusters_to_alloc = *clusters_to_alloc + 1;
1473 } 1486 }
1474 if (ext_flags & OCFS2_EXT_UNWRITTEN) 1487
1488 if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1475 desc->c_unwritten = 1; 1489 desc->c_unwritten = 1;
1490 desc->c_needs_zero = 1;
1491 }
1476 1492
1477 num_clusters--; 1493 num_clusters--;
1478 } 1494 }
@@ -1632,10 +1648,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
1632 if (newsize <= i_size_read(inode)) 1648 if (newsize <= i_size_read(inode))
1633 return 0; 1649 return 0;
1634 1650
1635 ret = ocfs2_extend_no_holes(inode, newsize, newsize - len); 1651 ret = ocfs2_extend_no_holes(inode, newsize, pos);
1636 if (ret) 1652 if (ret)
1637 mlog_errno(ret); 1653 mlog_errno(ret);
1638 1654
1655 wc->w_first_new_cpos =
1656 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1657
1639 return ret; 1658 return ret;
1640} 1659}
1641 1660
@@ -1644,7 +1663,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1644 struct page **pagep, void **fsdata, 1663 struct page **pagep, void **fsdata,
1645 struct buffer_head *di_bh, struct page *mmap_page) 1664 struct buffer_head *di_bh, struct page *mmap_page)
1646{ 1665{
1647 int ret, credits = OCFS2_INODE_UPDATE_CREDITS; 1666 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1648 unsigned int clusters_to_alloc, extents_to_split; 1667 unsigned int clusters_to_alloc, extents_to_split;
1649 struct ocfs2_write_ctxt *wc; 1668 struct ocfs2_write_ctxt *wc;
1650 struct inode *inode = mapping->host; 1669 struct inode *inode = mapping->host;
@@ -1722,8 +1741,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1722 1741
1723 } 1742 }
1724 1743
1725 ocfs2_set_target_boundaries(osb, wc, pos, len, 1744 /*
1726 clusters_to_alloc + extents_to_split); 1745 * We have to zero sparse allocated clusters, unwritten extent clusters,
1746 * and non-sparse clusters we just extended. For non-sparse writes,
1747 * we know zeros will only be needed in the first and/or last cluster.
1748 */
1749 if (clusters_to_alloc || extents_to_split ||
1750 wc->w_desc[0].c_needs_zero ||
1751 wc->w_desc[wc->w_clen - 1].c_needs_zero)
1752 cluster_of_pages = 1;
1753 else
1754 cluster_of_pages = 0;
1755
1756 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1727 1757
1728 handle = ocfs2_start_trans(osb, credits); 1758 handle = ocfs2_start_trans(osb, credits);
1729 if (IS_ERR(handle)) { 1759 if (IS_ERR(handle)) {
@@ -1756,8 +1786,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1756 * extent. 1786 * extent.
1757 */ 1787 */
1758 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, 1788 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
1759 clusters_to_alloc + extents_to_split, 1789 cluster_of_pages, mmap_page);
1760 mmap_page);
1761 if (ret) { 1790 if (ret) {
1762 mlog_errno(ret); 1791 mlog_errno(ret);
1763 goto out_quota; 1792 goto out_quota;
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index b574431a031d..2f28b7de2c8d 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -310,22 +310,19 @@ out_attach:
310 return ret; 310 return ret;
311} 311}
312 312
313static DEFINE_SPINLOCK(dentry_list_lock); 313DEFINE_SPINLOCK(dentry_list_lock);
314 314
315/* We limit the number of dentry locks to drop in one go. We have 315/* We limit the number of dentry locks to drop in one go. We have
316 * this limit so that we don't starve other users of ocfs2_wq. */ 316 * this limit so that we don't starve other users of ocfs2_wq. */
317#define DL_INODE_DROP_COUNT 64 317#define DL_INODE_DROP_COUNT 64
318 318
319/* Drop inode references from dentry locks */ 319/* Drop inode references from dentry locks */
320void ocfs2_drop_dl_inodes(struct work_struct *work) 320static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count)
321{ 321{
322 struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
323 dentry_lock_work);
324 struct ocfs2_dentry_lock *dl; 322 struct ocfs2_dentry_lock *dl;
325 int drop_count = DL_INODE_DROP_COUNT;
326 323
327 spin_lock(&dentry_list_lock); 324 spin_lock(&dentry_list_lock);
328 while (osb->dentry_lock_list && drop_count--) { 325 while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) {
329 dl = osb->dentry_lock_list; 326 dl = osb->dentry_lock_list;
330 osb->dentry_lock_list = dl->dl_next; 327 osb->dentry_lock_list = dl->dl_next;
331 spin_unlock(&dentry_list_lock); 328 spin_unlock(&dentry_list_lock);
@@ -333,11 +330,32 @@ void ocfs2_drop_dl_inodes(struct work_struct *work)
333 kfree(dl); 330 kfree(dl);
334 spin_lock(&dentry_list_lock); 331 spin_lock(&dentry_list_lock);
335 } 332 }
336 if (osb->dentry_lock_list) 333 spin_unlock(&dentry_list_lock);
334}
335
336void ocfs2_drop_dl_inodes(struct work_struct *work)
337{
338 struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
339 dentry_lock_work);
340
341 __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT);
342 /*
343 * Don't queue dropping if umount is in progress. We flush the
344 * list in ocfs2_dismount_volume
345 */
346 spin_lock(&dentry_list_lock);
347 if (osb->dentry_lock_list &&
348 !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
337 queue_work(ocfs2_wq, &osb->dentry_lock_work); 349 queue_work(ocfs2_wq, &osb->dentry_lock_work);
338 spin_unlock(&dentry_list_lock); 350 spin_unlock(&dentry_list_lock);
339} 351}
340 352
353/* Flush the whole work queue */
354void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb)
355{
356 __ocfs2_drop_dl_inodes(osb, -1);
357}
358
341/* 359/*
342 * ocfs2_dentry_iput() and friends. 360 * ocfs2_dentry_iput() and friends.
343 * 361 *
@@ -368,7 +386,8 @@ static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
368 /* We leave dropping of inode reference to ocfs2_wq as that can 386 /* We leave dropping of inode reference to ocfs2_wq as that can
369 * possibly lead to inode deletion which gets tricky */ 387 * possibly lead to inode deletion which gets tricky */
370 spin_lock(&dentry_list_lock); 388 spin_lock(&dentry_list_lock);
371 if (!osb->dentry_lock_list) 389 if (!osb->dentry_lock_list &&
390 !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
372 queue_work(ocfs2_wq, &osb->dentry_lock_work); 391 queue_work(ocfs2_wq, &osb->dentry_lock_work);
373 dl->dl_next = osb->dentry_lock_list; 392 dl->dl_next = osb->dentry_lock_list;
374 osb->dentry_lock_list = dl; 393 osb->dentry_lock_list = dl;
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index faa12e75f98d..f5dd1789acf1 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -49,10 +49,13 @@ struct ocfs2_dentry_lock {
49int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, 49int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
50 u64 parent_blkno); 50 u64 parent_blkno);
51 51
52extern spinlock_t dentry_list_lock;
53
52void ocfs2_dentry_lock_put(struct ocfs2_super *osb, 54void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
53 struct ocfs2_dentry_lock *dl); 55 struct ocfs2_dentry_lock *dl);
54 56
55void ocfs2_drop_dl_inodes(struct work_struct *work); 57void ocfs2_drop_dl_inodes(struct work_struct *work);
58void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb);
56 59
57struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, 60struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
58 int skip_unhashed); 61 int skip_unhashed);
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index d07ddbe4b283..81eff8e58322 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -103,7 +103,6 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
103 lock->ast_pending, lock->ml.type); 103 lock->ast_pending, lock->ml.type);
104 BUG(); 104 BUG();
105 } 105 }
106 BUG_ON(!list_empty(&lock->ast_list));
107 if (lock->ast_pending) 106 if (lock->ast_pending)
108 mlog(0, "lock has an ast getting flushed right now\n"); 107 mlog(0, "lock has an ast getting flushed right now\n");
109 108
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index bcb9260c3735..43e6e3280569 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1118,7 +1118,7 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1118 1118
1119 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", 1119 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1120 dlm->name, res->lockname.len, res->lockname.name, 1120 dlm->name, res->lockname.len, res->lockname.name,
1121 orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery", 1121 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1122 send_to); 1122 send_to);
1123 1123
1124 /* send it */ 1124 /* send it */
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index fcf879ed6930..756f5b0998e0 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -122,7 +122,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
122 * that still has AST's pending... */ 122 * that still has AST's pending... */
123 in_use = !list_empty(&lock->ast_list); 123 in_use = !list_empty(&lock->ast_list);
124 spin_unlock(&dlm->ast_lock); 124 spin_unlock(&dlm->ast_lock);
125 if (in_use) { 125 if (in_use && !(flags & LKM_CANCEL)) {
126 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " 126 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
127 "while waiting for an ast!", res->lockname.len, 127 "while waiting for an ast!", res->lockname.len,
128 res->lockname.name); 128 res->lockname.name);
@@ -131,7 +131,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
131 131
132 spin_lock(&res->spinlock); 132 spin_lock(&res->spinlock);
133 if (res->state & DLM_LOCK_RES_IN_PROGRESS) { 133 if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
134 if (master_node) { 134 if (master_node && !(flags & LKM_CANCEL)) {
135 mlog(ML_ERROR, "lockres in progress!\n"); 135 mlog(ML_ERROR, "lockres in progress!\n");
136 spin_unlock(&res->spinlock); 136 spin_unlock(&res->spinlock);
137 return DLM_FORWARD; 137 return DLM_FORWARD;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 62442e413a00..aa501d3f93f1 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1851,6 +1851,7 @@ relock:
1851 if (ret) 1851 if (ret)
1852 goto out_dio; 1852 goto out_dio;
1853 1853
1854 count = ocount;
1854 ret = generic_write_checks(file, ppos, &count, 1855 ret = generic_write_checks(file, ppos, &count,
1855 S_ISBLK(inode->i_mode)); 1856 S_ISBLK(inode->i_mode));
1856 if (ret) 1857 if (ret)
@@ -1918,8 +1919,10 @@ out_sems:
1918 1919
1919 mutex_unlock(&inode->i_mutex); 1920 mutex_unlock(&inode->i_mutex);
1920 1921
1922 if (written)
1923 ret = written;
1921 mlog_exit(ret); 1924 mlog_exit(ret);
1922 return written ? written : ret; 1925 return ret;
1923} 1926}
1924 1927
1925static int ocfs2_splice_to_file(struct pipe_inode_info *pipe, 1928static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index f033760ecbea..c48b93ac6b65 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1954,10 +1954,16 @@ void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
1954 os->os_osb = osb; 1954 os->os_osb = osb;
1955 os->os_count = 0; 1955 os->os_count = 0;
1956 os->os_seqno = 0; 1956 os->os_seqno = 0;
1957 os->os_scantime = CURRENT_TIME;
1958 mutex_init(&os->os_lock); 1957 mutex_init(&os->os_lock);
1959 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); 1958 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
1959}
1960 1960
1961void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
1962{
1963 struct ocfs2_orphan_scan *os;
1964
1965 os = &osb->osb_orphan_scan;
1966 os->os_scantime = CURRENT_TIME;
1961 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) 1967 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1962 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 1968 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
1963 else { 1969 else {
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 5432c7f79cc6..2c3222aec622 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -145,6 +145,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
145 145
146/* Exported only for the journal struct init code in super.c. Do not call. */ 146/* Exported only for the journal struct init code in super.c. Do not call. */
147void ocfs2_orphan_scan_init(struct ocfs2_super *osb); 147void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
148void ocfs2_orphan_scan_start(struct ocfs2_super *osb);
148void ocfs2_orphan_scan_stop(struct ocfs2_super *osb); 149void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
149void ocfs2_orphan_scan_exit(struct ocfs2_super *osb); 150void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
150 151
@@ -329,20 +330,27 @@ int ocfs2_journal_dirty(handle_t *handle,
329/* extended attribute block update */ 330/* extended attribute block update */
330#define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1 331#define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1
331 332
333/* Update of a single quota block */
334#define OCFS2_QUOTA_BLOCK_UPDATE_CREDITS 1
335
332/* global quotafile inode update, data block */ 336/* global quotafile inode update, data block */
333#define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) 337#define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + \
338 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS)
334 339
340#define OCFS2_LOCAL_QINFO_WRITE_CREDITS OCFS2_QUOTA_BLOCK_UPDATE_CREDITS
335/* 341/*
336 * The two writes below can accidentally see global info dirty due 342 * The two writes below can accidentally see global info dirty due
337 * to set_info() quotactl so make them prepared for the writes. 343 * to set_info() quotactl so make them prepared for the writes.
338 */ 344 */
339/* quota data block, global info */ 345/* quota data block, global info */
340/* Write to local quota file */ 346/* Write to local quota file */
341#define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + 1) 347#define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + \
348 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS)
342 349
343/* global quota data block, local quota data block, global quota inode, 350/* global quota data block, local quota data block, global quota inode,
344 * global quota info */ 351 * global quota info */
345#define OCFS2_QSYNC_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 3) 352#define OCFS2_QSYNC_CREDITS (OCFS2_QINFO_WRITE_CREDITS + \
353 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS)
346 354
347static inline int ocfs2_quota_trans_credits(struct super_block *sb) 355static inline int ocfs2_quota_trans_credits(struct super_block *sb)
348{ 356{
@@ -355,11 +363,6 @@ static inline int ocfs2_quota_trans_credits(struct super_block *sb)
355 return credits; 363 return credits;
356} 364}
357 365
358/* Number of credits needed for removing quota structure from file */
359int ocfs2_calc_qdel_credits(struct super_block *sb, int type);
360/* Number of credits needed for initialization of new quota structure */
361int ocfs2_calc_qinit_credits(struct super_block *sb, int type);
362
363/* group extend. inode update and last group update. */ 366/* group extend. inode update and last group update. */
364#define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) 367#define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1)
365 368
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index c9345ebb8493..39e1d5a39505 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -224,10 +224,12 @@ enum ocfs2_mount_options
224 OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */ 224 OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */
225}; 225};
226 226
227#define OCFS2_OSB_SOFT_RO 0x0001 227#define OCFS2_OSB_SOFT_RO 0x0001
228#define OCFS2_OSB_HARD_RO 0x0002 228#define OCFS2_OSB_HARD_RO 0x0002
229#define OCFS2_OSB_ERROR_FS 0x0004 229#define OCFS2_OSB_ERROR_FS 0x0004
230#define OCFS2_DEFAULT_ATIME_QUANTUM 60 230#define OCFS2_OSB_DROP_DENTRY_LOCK_IMMED 0x0008
231
232#define OCFS2_DEFAULT_ATIME_QUANTUM 60
231 233
232struct ocfs2_journal; 234struct ocfs2_journal;
233struct ocfs2_slot_info; 235struct ocfs2_slot_info;
@@ -490,6 +492,18 @@ static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
490 spin_unlock(&osb->osb_lock); 492 spin_unlock(&osb->osb_lock);
491} 493}
492 494
495
496static inline unsigned long ocfs2_test_osb_flag(struct ocfs2_super *osb,
497 unsigned long flag)
498{
499 unsigned long ret;
500
501 spin_lock(&osb->osb_lock);
502 ret = osb->osb_flags & flag;
503 spin_unlock(&osb->osb_lock);
504 return ret;
505}
506
493static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb, 507static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
494 int hard) 508 int hard)
495{ 509{
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index fcdba091af3d..c212cf5a2bdf 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -108,6 +108,7 @@ static char *ocfs2_lock_type_strings[] = {
108 [OCFS2_LOCK_TYPE_OPEN] = "Open", 108 [OCFS2_LOCK_TYPE_OPEN] = "Open",
109 [OCFS2_LOCK_TYPE_FLOCK] = "Flock", 109 [OCFS2_LOCK_TYPE_FLOCK] = "Flock",
110 [OCFS2_LOCK_TYPE_QINFO] = "Quota", 110 [OCFS2_LOCK_TYPE_QINFO] = "Quota",
111 [OCFS2_LOCK_TYPE_NFS_SYNC] = "NFSSync",
111 [OCFS2_LOCK_TYPE_ORPHAN_SCAN] = "OrphanScan", 112 [OCFS2_LOCK_TYPE_ORPHAN_SCAN] = "OrphanScan",
112}; 113};
113 114
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 7365e2e08706..3fb96fcd4c81 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -50,7 +50,6 @@ struct ocfs2_mem_dqinfo {
50 unsigned int dqi_chunks; /* Number of chunks in local quota file */ 50 unsigned int dqi_chunks; /* Number of chunks in local quota file */
51 unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */ 51 unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */
52 unsigned int dqi_syncms; /* How often should we sync with other nodes */ 52 unsigned int dqi_syncms; /* How often should we sync with other nodes */
53 unsigned int dqi_syncjiff; /* Precomputed dqi_syncms in jiffies */
54 struct list_head dqi_chunk; /* List of chunks */ 53 struct list_head dqi_chunk; /* List of chunks */
55 struct inode *dqi_gqinode; /* Global quota file inode */ 54 struct inode *dqi_gqinode; /* Global quota file inode */
56 struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */ 55 struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index edfa60cd155c..44f2a5e1d042 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -23,6 +23,7 @@
23#include "sysfile.h" 23#include "sysfile.h"
24#include "dlmglue.h" 24#include "dlmglue.h"
25#include "uptodate.h" 25#include "uptodate.h"
26#include "super.h"
26#include "quota.h" 27#include "quota.h"
27 28
28static struct workqueue_struct *ocfs2_quota_wq = NULL; 29static struct workqueue_struct *ocfs2_quota_wq = NULL;
@@ -69,6 +70,7 @@ static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
69 d->dqb_curspace = cpu_to_le64(m->dqb_curspace); 70 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
70 d->dqb_btime = cpu_to_le64(m->dqb_btime); 71 d->dqb_btime = cpu_to_le64(m->dqb_btime);
71 d->dqb_itime = cpu_to_le64(m->dqb_itime); 72 d->dqb_itime = cpu_to_le64(m->dqb_itime);
73 d->dqb_pad1 = d->dqb_pad2 = 0;
72} 74}
73 75
74static int ocfs2_global_is_id(void *dp, struct dquot *dquot) 76static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
@@ -113,6 +115,15 @@ int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
113 int rc = 0; 115 int rc = 0;
114 struct buffer_head *tmp = *bh; 116 struct buffer_head *tmp = *bh;
115 117
118 if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
119 ocfs2_error(inode->i_sb,
120 "Quota file %llu is probably corrupted! Requested "
121 "to read block %Lu but file has size only %Lu\n",
122 (unsigned long long)OCFS2_I(inode)->ip_blkno,
123 (unsigned long long)v_block,
124 (unsigned long long)i_size_read(inode));
125 return -EIO;
126 }
116 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, 127 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
117 ocfs2_validate_quota_block); 128 ocfs2_validate_quota_block);
118 if (rc) 129 if (rc)
@@ -211,14 +222,13 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
211 222
212 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA); 223 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
213 if (gqinode->i_size < off + len) { 224 if (gqinode->i_size < off + len) {
214 down_write(&OCFS2_I(gqinode)->ip_alloc_sem); 225 loff_t rounded_end =
215 err = ocfs2_extend_no_holes(gqinode, off + len, off); 226 ocfs2_align_bytes_to_blocks(sb, off + len);
216 up_write(&OCFS2_I(gqinode)->ip_alloc_sem); 227
217 if (err < 0) 228 /* Space is already allocated in ocfs2_global_read_dquot() */
218 goto out;
219 err = ocfs2_simple_size_update(gqinode, 229 err = ocfs2_simple_size_update(gqinode,
220 oinfo->dqi_gqi_bh, 230 oinfo->dqi_gqi_bh,
221 off + len); 231 rounded_end);
222 if (err < 0) 232 if (err < 0)
223 goto out; 233 goto out;
224 new = 1; 234 new = 1;
@@ -234,7 +244,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
234 } 244 }
235 if (err) { 245 if (err) {
236 mlog_errno(err); 246 mlog_errno(err);
237 return err; 247 goto out;
238 } 248 }
239 lock_buffer(bh); 249 lock_buffer(bh);
240 if (new) 250 if (new)
@@ -342,7 +352,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
342 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); 352 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
343 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); 353 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
344 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms); 354 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
345 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
346 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); 355 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
347 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); 356 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
348 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); 357 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
@@ -352,7 +361,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
352 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); 361 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
353 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); 362 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
354 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, 363 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
355 oinfo->dqi_syncjiff); 364 msecs_to_jiffies(oinfo->dqi_syncms));
356 365
357out_err: 366out_err:
358 mlog_exit(status); 367 mlog_exit(status);
@@ -402,13 +411,36 @@ int ocfs2_global_write_info(struct super_block *sb, int type)
402 return err; 411 return err;
403} 412}
404 413
414static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
415{
416 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
417
418 /*
419 * We may need to allocate tree blocks and a leaf block but not the
420 * root block
421 */
422 return oinfo->dqi_gi.dqi_qtree_depth;
423}
424
425static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
426{
427 /* We modify all the allocated blocks, tree root, and info block */
428 return (ocfs2_global_qinit_alloc(sb, type) + 2) *
429 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
430}
431
405/* Read in information from global quota file and acquire a reference to it. 432/* Read in information from global quota file and acquire a reference to it.
406 * dquot_acquire() has already started the transaction and locked quota file */ 433 * dquot_acquire() has already started the transaction and locked quota file */
407int ocfs2_global_read_dquot(struct dquot *dquot) 434int ocfs2_global_read_dquot(struct dquot *dquot)
408{ 435{
409 int err, err2, ex = 0; 436 int err, err2, ex = 0;
410 struct ocfs2_mem_dqinfo *info = 437 struct super_block *sb = dquot->dq_sb;
411 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 438 int type = dquot->dq_type;
439 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
440 struct ocfs2_super *osb = OCFS2_SB(sb);
441 struct inode *gqinode = info->dqi_gqinode;
442 int need_alloc = ocfs2_global_qinit_alloc(sb, type);
443 handle_t *handle = NULL;
412 444
413 err = ocfs2_qinfo_lock(info, 0); 445 err = ocfs2_qinfo_lock(info, 0);
414 if (err < 0) 446 if (err < 0)
@@ -419,14 +451,33 @@ int ocfs2_global_read_dquot(struct dquot *dquot)
419 OCFS2_DQUOT(dquot)->dq_use_count++; 451 OCFS2_DQUOT(dquot)->dq_use_count++;
420 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; 452 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
421 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; 453 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
454 ocfs2_qinfo_unlock(info, 0);
455
422 if (!dquot->dq_off) { /* No real quota entry? */ 456 if (!dquot->dq_off) { /* No real quota entry? */
423 /* Upgrade to exclusive lock for allocation */
424 ocfs2_qinfo_unlock(info, 0);
425 err = ocfs2_qinfo_lock(info, 1);
426 if (err < 0)
427 goto out_qlock;
428 ex = 1; 457 ex = 1;
458 /*
459 * Add blocks to quota file before we start a transaction since
460 * locking allocators ranks above a transaction start
461 */
462 WARN_ON(journal_current_handle());
463 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
464 err = ocfs2_extend_no_holes(gqinode,
465 gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
466 gqinode->i_size);
467 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
468 if (err < 0)
469 goto out;
429 } 470 }
471
472 handle = ocfs2_start_trans(osb,
473 ocfs2_calc_global_qinit_credits(sb, type));
474 if (IS_ERR(handle)) {
475 err = PTR_ERR(handle);
476 goto out;
477 }
478 err = ocfs2_qinfo_lock(info, ex);
479 if (err < 0)
480 goto out_trans;
430 err = qtree_write_dquot(&info->dqi_gi, dquot); 481 err = qtree_write_dquot(&info->dqi_gi, dquot);
431 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) { 482 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
432 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type); 483 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
@@ -438,6 +489,9 @@ out_qlock:
438 ocfs2_qinfo_unlock(info, 1); 489 ocfs2_qinfo_unlock(info, 1);
439 else 490 else
440 ocfs2_qinfo_unlock(info, 0); 491 ocfs2_qinfo_unlock(info, 0);
492out_trans:
493 if (handle)
494 ocfs2_commit_trans(osb, handle);
441out: 495out:
442 if (err < 0) 496 if (err < 0)
443 mlog_errno(err); 497 mlog_errno(err);
@@ -607,7 +661,7 @@ static void qsync_work_fn(struct work_struct *work)
607 661
608 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); 662 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
609 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, 663 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
610 oinfo->dqi_syncjiff); 664 msecs_to_jiffies(oinfo->dqi_syncms));
611} 665}
612 666
613/* 667/*
@@ -635,20 +689,18 @@ out:
635 return status; 689 return status;
636} 690}
637 691
638int ocfs2_calc_qdel_credits(struct super_block *sb, int type) 692static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
639{ 693{
640 struct ocfs2_mem_dqinfo *oinfo; 694 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
641 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, 695 /*
642 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA }; 696 * We modify tree, leaf block, global info, local chunk header,
643 697 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
644 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type])) 698 * accounts for inode update
645 return 0; 699 */
646 700 return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
647 oinfo = sb_dqinfo(sb, type)->dqi_priv; 701 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
648 /* We modify tree, leaf block, global info, local chunk header, 702 OCFS2_QINFO_WRITE_CREDITS +
649 * global and local inode */ 703 OCFS2_INODE_UPDATE_CREDITS;
650 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
651 2 * OCFS2_INODE_UPDATE_CREDITS;
652} 704}
653 705
654static int ocfs2_release_dquot(struct dquot *dquot) 706static int ocfs2_release_dquot(struct dquot *dquot)
@@ -680,33 +732,10 @@ out:
680 return status; 732 return status;
681} 733}
682 734
683int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
684{
685 struct ocfs2_mem_dqinfo *oinfo;
686 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
687 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
688 struct ocfs2_dinode *lfe, *gfe;
689
690 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
691 return 0;
692
693 oinfo = sb_dqinfo(sb, type)->dqi_priv;
694 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
695 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
696 /* We can extend local file + global file. In local file we
697 * can modify info, chunk header block and dquot block. In
698 * global file we can modify info, tree and leaf block */
699 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
700 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
701 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
702}
703
704static int ocfs2_acquire_dquot(struct dquot *dquot) 735static int ocfs2_acquire_dquot(struct dquot *dquot)
705{ 736{
706 handle_t *handle;
707 struct ocfs2_mem_dqinfo *oinfo = 737 struct ocfs2_mem_dqinfo *oinfo =
708 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 738 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
709 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
710 int status = 0; 739 int status = 0;
711 740
712 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 741 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
@@ -715,16 +744,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
715 status = ocfs2_lock_global_qf(oinfo, 1); 744 status = ocfs2_lock_global_qf(oinfo, 1);
716 if (status < 0) 745 if (status < 0)
717 goto out; 746 goto out;
718 handle = ocfs2_start_trans(osb,
719 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
720 if (IS_ERR(handle)) {
721 status = PTR_ERR(handle);
722 mlog_errno(status);
723 goto out_ilock;
724 }
725 status = dquot_acquire(dquot); 747 status = dquot_acquire(dquot);
726 ocfs2_commit_trans(osb, handle);
727out_ilock:
728 ocfs2_unlock_global_qf(oinfo, 1); 748 ocfs2_unlock_global_qf(oinfo, 1);
729out: 749out:
730 mlog_exit(status); 750 mlog_exit(status);
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 5a460fa82553..bdb09cb6e1fe 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -20,6 +20,7 @@
20#include "sysfile.h" 20#include "sysfile.h"
21#include "dlmglue.h" 21#include "dlmglue.h"
22#include "quota.h" 22#include "quota.h"
23#include "uptodate.h"
23 24
24/* Number of local quota structures per block */ 25/* Number of local quota structures per block */
25static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) 26static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -100,7 +101,8 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
100 handle_t *handle; 101 handle_t *handle;
101 int status; 102 int status;
102 103
103 handle = ocfs2_start_trans(OCFS2_SB(sb), 1); 104 handle = ocfs2_start_trans(OCFS2_SB(sb),
105 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
104 if (IS_ERR(handle)) { 106 if (IS_ERR(handle)) {
105 status = PTR_ERR(handle); 107 status = PTR_ERR(handle);
106 mlog_errno(status); 108 mlog_errno(status);
@@ -610,7 +612,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
610 goto out_bh; 612 goto out_bh;
611 /* Mark quota file as clean if we are recovering quota file of 613 /* Mark quota file as clean if we are recovering quota file of
612 * some other node. */ 614 * some other node. */
613 handle = ocfs2_start_trans(osb, 1); 615 handle = ocfs2_start_trans(osb,
616 OCFS2_LOCAL_QINFO_WRITE_CREDITS);
614 if (IS_ERR(handle)) { 617 if (IS_ERR(handle)) {
615 status = PTR_ERR(handle); 618 status = PTR_ERR(handle);
616 mlog_errno(status); 619 mlog_errno(status);
@@ -940,7 +943,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
940 struct ocfs2_local_disk_chunk *dchunk; 943 struct ocfs2_local_disk_chunk *dchunk;
941 int status; 944 int status;
942 handle_t *handle; 945 handle_t *handle;
943 struct buffer_head *bh = NULL; 946 struct buffer_head *bh = NULL, *dbh = NULL;
944 u64 p_blkno; 947 u64 p_blkno;
945 948
946 /* We are protected by dqio_sem so no locking needed */ 949 /* We are protected by dqio_sem so no locking needed */
@@ -964,32 +967,35 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
964 mlog_errno(status); 967 mlog_errno(status);
965 goto out; 968 goto out;
966 } 969 }
970 /* Local quota info and two new blocks we initialize */
971 handle = ocfs2_start_trans(OCFS2_SB(sb),
972 OCFS2_LOCAL_QINFO_WRITE_CREDITS +
973 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
974 if (IS_ERR(handle)) {
975 status = PTR_ERR(handle);
976 mlog_errno(status);
977 goto out;
978 }
967 979
980 /* Initialize chunk header */
968 down_read(&OCFS2_I(lqinode)->ip_alloc_sem); 981 down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
969 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, 982 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
970 &p_blkno, NULL, NULL); 983 &p_blkno, NULL, NULL);
971 up_read(&OCFS2_I(lqinode)->ip_alloc_sem); 984 up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
972 if (status < 0) { 985 if (status < 0) {
973 mlog_errno(status); 986 mlog_errno(status);
974 goto out; 987 goto out_trans;
975 } 988 }
976 bh = sb_getblk(sb, p_blkno); 989 bh = sb_getblk(sb, p_blkno);
977 if (!bh) { 990 if (!bh) {
978 status = -ENOMEM; 991 status = -ENOMEM;
979 mlog_errno(status); 992 mlog_errno(status);
980 goto out; 993 goto out_trans;
981 } 994 }
982 dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; 995 dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
983 996 ocfs2_set_new_buffer_uptodate(lqinode, bh);
984 handle = ocfs2_start_trans(OCFS2_SB(sb), 2);
985 if (IS_ERR(handle)) {
986 status = PTR_ERR(handle);
987 mlog_errno(status);
988 goto out;
989 }
990
991 status = ocfs2_journal_access_dq(handle, lqinode, bh, 997 status = ocfs2_journal_access_dq(handle, lqinode, bh,
992 OCFS2_JOURNAL_ACCESS_WRITE); 998 OCFS2_JOURNAL_ACCESS_CREATE);
993 if (status < 0) { 999 if (status < 0) {
994 mlog_errno(status); 1000 mlog_errno(status);
995 goto out_trans; 1001 goto out_trans;
@@ -999,7 +1005,6 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
999 memset(dchunk->dqc_bitmap, 0, 1005 memset(dchunk->dqc_bitmap, 0,
1000 sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) - 1006 sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) -
1001 OCFS2_QBLK_RESERVED_SPACE); 1007 OCFS2_QBLK_RESERVED_SPACE);
1002 set_buffer_uptodate(bh);
1003 unlock_buffer(bh); 1008 unlock_buffer(bh);
1004 status = ocfs2_journal_dirty(handle, bh); 1009 status = ocfs2_journal_dirty(handle, bh);
1005 if (status < 0) { 1010 if (status < 0) {
@@ -1007,6 +1012,38 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
1007 goto out_trans; 1012 goto out_trans;
1008 } 1013 }
1009 1014
1015 /* Initialize new block with structures */
1016 down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1017 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
1018 &p_blkno, NULL, NULL);
1019 up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1020 if (status < 0) {
1021 mlog_errno(status);
1022 goto out_trans;
1023 }
1024 dbh = sb_getblk(sb, p_blkno);
1025 if (!dbh) {
1026 status = -ENOMEM;
1027 mlog_errno(status);
1028 goto out_trans;
1029 }
1030 ocfs2_set_new_buffer_uptodate(lqinode, dbh);
1031 status = ocfs2_journal_access_dq(handle, lqinode, dbh,
1032 OCFS2_JOURNAL_ACCESS_CREATE);
1033 if (status < 0) {
1034 mlog_errno(status);
1035 goto out_trans;
1036 }
1037 lock_buffer(dbh);
1038 memset(dbh->b_data, 0, sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE);
1039 unlock_buffer(dbh);
1040 status = ocfs2_journal_dirty(handle, dbh);
1041 if (status < 0) {
1042 mlog_errno(status);
1043 goto out_trans;
1044 }
1045
1046 /* Update local quotafile info */
1010 oinfo->dqi_blocks += 2; 1047 oinfo->dqi_blocks += 2;
1011 oinfo->dqi_chunks++; 1048 oinfo->dqi_chunks++;
1012 status = ocfs2_local_write_info(sb, type); 1049 status = ocfs2_local_write_info(sb, type);
@@ -1031,6 +1068,7 @@ out_trans:
1031 ocfs2_commit_trans(OCFS2_SB(sb), handle); 1068 ocfs2_commit_trans(OCFS2_SB(sb), handle);
1032out: 1069out:
1033 brelse(bh); 1070 brelse(bh);
1071 brelse(dbh);
1034 kmem_cache_free(ocfs2_qf_chunk_cachep, chunk); 1072 kmem_cache_free(ocfs2_qf_chunk_cachep, chunk);
1035 return ERR_PTR(status); 1073 return ERR_PTR(status);
1036} 1074}
@@ -1048,6 +1086,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
1048 struct ocfs2_local_disk_chunk *dchunk; 1086 struct ocfs2_local_disk_chunk *dchunk;
1049 int epb = ol_quota_entries_per_block(sb); 1087 int epb = ol_quota_entries_per_block(sb);
1050 unsigned int chunk_blocks; 1088 unsigned int chunk_blocks;
1089 struct buffer_head *bh;
1090 u64 p_blkno;
1051 int status; 1091 int status;
1052 handle_t *handle; 1092 handle_t *handle;
1053 1093
@@ -1075,12 +1115,49 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
1075 mlog_errno(status); 1115 mlog_errno(status);
1076 goto out; 1116 goto out;
1077 } 1117 }
1078 handle = ocfs2_start_trans(OCFS2_SB(sb), 2); 1118
1119 /* Get buffer from the just added block */
1120 down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1121 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
1122 &p_blkno, NULL, NULL);
1123 up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1124 if (status < 0) {
1125 mlog_errno(status);
1126 goto out;
1127 }
1128 bh = sb_getblk(sb, p_blkno);
1129 if (!bh) {
1130 status = -ENOMEM;
1131 mlog_errno(status);
1132 goto out;
1133 }
1134 ocfs2_set_new_buffer_uptodate(lqinode, bh);
1135
1136 /* Local quota info, chunk header and the new block we initialize */
1137 handle = ocfs2_start_trans(OCFS2_SB(sb),
1138 OCFS2_LOCAL_QINFO_WRITE_CREDITS +
1139 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS);
1079 if (IS_ERR(handle)) { 1140 if (IS_ERR(handle)) {
1080 status = PTR_ERR(handle); 1141 status = PTR_ERR(handle);
1081 mlog_errno(status); 1142 mlog_errno(status);
1082 goto out; 1143 goto out;
1083 } 1144 }
1145 /* Zero created block */
1146 status = ocfs2_journal_access_dq(handle, lqinode, bh,
1147 OCFS2_JOURNAL_ACCESS_CREATE);
1148 if (status < 0) {
1149 mlog_errno(status);
1150 goto out_trans;
1151 }
1152 lock_buffer(bh);
1153 memset(bh->b_data, 0, sb->s_blocksize);
1154 unlock_buffer(bh);
1155 status = ocfs2_journal_dirty(handle, bh);
1156 if (status < 0) {
1157 mlog_errno(status);
1158 goto out_trans;
1159 }
1160 /* Update chunk header */
1084 status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh, 1161 status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh,
1085 OCFS2_JOURNAL_ACCESS_WRITE); 1162 OCFS2_JOURNAL_ACCESS_WRITE);
1086 if (status < 0) { 1163 if (status < 0) {
@@ -1097,6 +1174,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
1097 mlog_errno(status); 1174 mlog_errno(status);
1098 goto out_trans; 1175 goto out_trans;
1099 } 1176 }
1177 /* Update file header */
1100 oinfo->dqi_blocks++; 1178 oinfo->dqi_blocks++;
1101 status = ocfs2_local_write_info(sb, type); 1179 status = ocfs2_local_write_info(sb, type);
1102 if (status < 0) { 1180 if (status < 0) {
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 3f661376a2de..e49c41050264 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -17,6 +17,7 @@
17 * General Public License for more details. 17 * General Public License for more details.
18 */ 18 */
19 19
20#include <linux/kernel.h>
20#include <linux/crc32.h> 21#include <linux/crc32.h>
21#include <linux/module.h> 22#include <linux/module.h>
22 23
@@ -153,7 +154,7 @@ static int status_map[] = {
153 154
154static int dlm_status_to_errno(enum dlm_status status) 155static int dlm_status_to_errno(enum dlm_status status)
155{ 156{
156 BUG_ON(status > (sizeof(status_map) / sizeof(status_map[0]))); 157 BUG_ON(status < 0 || status >= ARRAY_SIZE(status_map));
157 158
158 return status_map[status]; 159 return status_map[status];
159} 160}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7efb349fb9bd..a3f8871d21fd 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -777,6 +777,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
777 } 777 }
778 di = (struct ocfs2_dinode *) (*bh)->b_data; 778 di = (struct ocfs2_dinode *) (*bh)->b_data;
779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); 779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
780 spin_lock_init(&stats->b_lock);
780 status = ocfs2_verify_volume(di, *bh, blksize, stats); 781 status = ocfs2_verify_volume(di, *bh, blksize, stats);
781 if (status >= 0) 782 if (status >= 0)
782 goto bail; 783 goto bail;
@@ -1182,7 +1183,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1182 wake_up(&osb->osb_mount_event); 1183 wake_up(&osb->osb_mount_event);
1183 1184
1184 /* Start this when the mount is almost sure of being successful */ 1185 /* Start this when the mount is almost sure of being successful */
1185 ocfs2_orphan_scan_init(osb); 1186 ocfs2_orphan_scan_start(osb);
1186 1187
1187 mlog_exit(status); 1188 mlog_exit(status);
1188 return status; 1189 return status;
@@ -1213,14 +1214,31 @@ static int ocfs2_get_sb(struct file_system_type *fs_type,
1213 mnt); 1214 mnt);
1214} 1215}
1215 1216
1217static void ocfs2_kill_sb(struct super_block *sb)
1218{
1219 struct ocfs2_super *osb = OCFS2_SB(sb);
1220
1221 /* Failed mount? */
1222 if (!osb || atomic_read(&osb->vol_state) == VOLUME_DISABLED)
1223 goto out;
1224
1225 /* Prevent further queueing of inode drop events */
1226 spin_lock(&dentry_list_lock);
1227 ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED);
1228 spin_unlock(&dentry_list_lock);
1229 /* Wait for work to finish and/or remove it */
1230 cancel_work_sync(&osb->dentry_lock_work);
1231out:
1232 kill_block_super(sb);
1233}
1234
1216static struct file_system_type ocfs2_fs_type = { 1235static struct file_system_type ocfs2_fs_type = {
1217 .owner = THIS_MODULE, 1236 .owner = THIS_MODULE,
1218 .name = "ocfs2", 1237 .name = "ocfs2",
1219 .get_sb = ocfs2_get_sb, /* is this called when we mount 1238 .get_sb = ocfs2_get_sb, /* is this called when we mount
1220 * the fs? */ 1239 * the fs? */
1221 .kill_sb = kill_block_super, /* set to the generic one 1240 .kill_sb = ocfs2_kill_sb,
1222 * right now, but do we 1241
1223 * need to change that? */
1224 .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, 1242 .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
1225 .next = NULL 1243 .next = NULL
1226}; 1244};
@@ -1819,6 +1837,12 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1819 1837
1820 debugfs_remove(osb->osb_ctxt); 1838 debugfs_remove(osb->osb_ctxt);
1821 1839
1840 /*
1841 * Flush inode dropping work queue so that deletes are
1842 * performed while the filesystem is still working
1843 */
1844 ocfs2_drop_all_dl_inodes(osb);
1845
1822 /* Orphan scan should be stopped as early as possible */ 1846 /* Orphan scan should be stopped as early as possible */
1823 ocfs2_orphan_scan_stop(osb); 1847 ocfs2_orphan_scan_stop(osb);
1824 1848
@@ -1981,6 +2005,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
1981 snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u", 2005 snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
1982 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 2006 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1983 2007
2008 ocfs2_orphan_scan_init(osb);
2009
1984 status = ocfs2_recovery_init(osb); 2010 status = ocfs2_recovery_init(osb);
1985 if (status) { 2011 if (status) {
1986 mlog(ML_ERROR, "Unable to initialize recovery state\n"); 2012 mlog(ML_ERROR, "Unable to initialize recovery state\n");
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index ba320e250747..d1a27cda984f 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1052,7 +1052,8 @@ static int ocfs2_xattr_block_get(struct inode *inode,
1052 struct ocfs2_xattr_block *xb; 1052 struct ocfs2_xattr_block *xb;
1053 struct ocfs2_xattr_value_root *xv; 1053 struct ocfs2_xattr_value_root *xv;
1054 size_t size; 1054 size_t size;
1055 int ret = -ENODATA, name_offset, name_len, block_off, i; 1055 int ret = -ENODATA, name_offset, name_len, i;
1056 int uninitialized_var(block_off);
1056 1057
1057 xs->bucket = ocfs2_xattr_bucket_new(inode); 1058 xs->bucket = ocfs2_xattr_bucket_new(inode);
1058 if (!xs->bucket) { 1059 if (!xs->bucket) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3ce5ae9e3d2d..6f742f6658a9 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -234,23 +234,20 @@ static int check_mem_permission(struct task_struct *task)
234 234
235struct mm_struct *mm_for_maps(struct task_struct *task) 235struct mm_struct *mm_for_maps(struct task_struct *task)
236{ 236{
237 struct mm_struct *mm = get_task_mm(task); 237 struct mm_struct *mm;
238 if (!mm) 238
239 if (mutex_lock_killable(&task->cred_guard_mutex))
239 return NULL; 240 return NULL;
240 down_read(&mm->mmap_sem); 241
241 task_lock(task); 242 mm = get_task_mm(task);
242 if (task->mm != mm) 243 if (mm && mm != current->mm &&
243 goto out; 244 !ptrace_may_access(task, PTRACE_MODE_READ)) {
244 if (task->mm != current->mm && 245 mmput(mm);
245 __ptrace_may_access(task, PTRACE_MODE_READ) < 0) 246 mm = NULL;
246 goto out; 247 }
247 task_unlock(task); 248 mutex_unlock(&task->cred_guard_mutex);
249
248 return mm; 250 return mm;
249out:
250 task_unlock(task);
251 up_read(&mm->mmap_sem);
252 mmput(mm);
253 return NULL;
254} 251}
255 252
256static int proc_pid_cmdline(struct task_struct *task, char * buffer) 253static int proc_pid_cmdline(struct task_struct *task, char * buffer)
@@ -1006,12 +1003,7 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
1006 1003
1007 if (!task) 1004 if (!task)
1008 return -ESRCH; 1005 return -ESRCH;
1009 task_lock(task); 1006 oom_adjust = task->oomkilladj;
1010 if (task->mm)
1011 oom_adjust = task->mm->oom_adj;
1012 else
1013 oom_adjust = OOM_DISABLE;
1014 task_unlock(task);
1015 put_task_struct(task); 1007 put_task_struct(task);
1016 1008
1017 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); 1009 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
@@ -1040,19 +1032,11 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
1040 task = get_proc_task(file->f_path.dentry->d_inode); 1032 task = get_proc_task(file->f_path.dentry->d_inode);
1041 if (!task) 1033 if (!task)
1042 return -ESRCH; 1034 return -ESRCH;
1043 task_lock(task); 1035 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
1044 if (!task->mm) {
1045 task_unlock(task);
1046 put_task_struct(task);
1047 return -EINVAL;
1048 }
1049 if (oom_adjust < task->mm->oom_adj && !capable(CAP_SYS_RESOURCE)) {
1050 task_unlock(task);
1051 put_task_struct(task); 1036 put_task_struct(task);
1052 return -EACCES; 1037 return -EACCES;
1053 } 1038 }
1054 task->mm->oom_adj = oom_adjust; 1039 task->oomkilladj = oom_adjust;
1055 task_unlock(task);
1056 put_task_struct(task); 1040 put_task_struct(task);
1057 if (end - buffer == 0) 1041 if (end - buffer == 0)
1058 return -EIO; 1042 return -EIO;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6f61b7cc32e0..9bd8be1d235c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -119,6 +119,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
119 mm = mm_for_maps(priv->task); 119 mm = mm_for_maps(priv->task);
120 if (!mm) 120 if (!mm)
121 return NULL; 121 return NULL;
122 down_read(&mm->mmap_sem);
122 123
123 tail_vma = get_gate_vma(priv->task); 124 tail_vma = get_gate_vma(priv->task);
124 priv->tail_vma = tail_vma; 125 priv->tail_vma = tail_vma;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 64a72e2e7650..8f5c05d3dbd3 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -189,6 +189,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
189 priv->task = NULL; 189 priv->task = NULL;
190 return NULL; 190 return NULL;
191 } 191 }
192 down_read(&mm->mmap_sem);
192 193
193 /* start from the Nth VMA */ 194 /* start from the Nth VMA */
194 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) 195 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 70f36c043d62..38f7bd559f35 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2043,7 +2043,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2043 invalidate_bdev(sb->s_bdev); 2043 invalidate_bdev(sb->s_bdev);
2044 } 2044 }
2045 mutex_lock(&dqopt->dqonoff_mutex); 2045 mutex_lock(&dqopt->dqonoff_mutex);
2046 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2047 if (sb_has_quota_loaded(sb, type)) { 2046 if (sb_has_quota_loaded(sb, type)) {
2048 error = -EBUSY; 2047 error = -EBUSY;
2049 goto out_lock; 2048 goto out_lock;
@@ -2054,9 +2053,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2054 * possible) Also nobody should write to the file - we use 2053 * possible) Also nobody should write to the file - we use
2055 * special IO operations which ignore the immutable bit. */ 2054 * special IO operations which ignore the immutable bit. */
2056 down_write(&dqopt->dqptr_sem); 2055 down_write(&dqopt->dqptr_sem);
2056 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2057 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2057 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2058 S_NOQUOTA); 2058 S_NOQUOTA);
2059 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2059 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2060 mutex_unlock(&inode->i_mutex);
2060 up_write(&dqopt->dqptr_sem); 2061 up_write(&dqopt->dqptr_sem);
2061 sb->dq_op->drop(inode); 2062 sb->dq_op->drop(inode);
2062 } 2063 }
@@ -2080,7 +2081,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2080 goto out_file_init; 2081 goto out_file_init;
2081 } 2082 }
2082 mutex_unlock(&dqopt->dqio_mutex); 2083 mutex_unlock(&dqopt->dqio_mutex);
2083 mutex_unlock(&inode->i_mutex);
2084 spin_lock(&dq_state_lock); 2084 spin_lock(&dq_state_lock);
2085 dqopt->flags |= dquot_state_flag(flags, type); 2085 dqopt->flags |= dquot_state_flag(flags, type);
2086 spin_unlock(&dq_state_lock); 2086 spin_unlock(&dq_state_lock);
@@ -2096,13 +2096,14 @@ out_file_init:
2096out_lock: 2096out_lock:
2097 if (oldflags != -1) { 2097 if (oldflags != -1) {
2098 down_write(&dqopt->dqptr_sem); 2098 down_write(&dqopt->dqptr_sem);
2099 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2099 /* Set the flags back (in the case of accidental quotaon() 2100 /* Set the flags back (in the case of accidental quotaon()
2100 * on a wrong file we don't want to mess up the flags) */ 2101 * on a wrong file we don't want to mess up the flags) */
2101 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2102 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2102 inode->i_flags |= oldflags; 2103 inode->i_flags |= oldflags;
2104 mutex_unlock(&inode->i_mutex);
2103 up_write(&dqopt->dqptr_sem); 2105 up_write(&dqopt->dqptr_sem);
2104 } 2106 }
2105 mutex_unlock(&inode->i_mutex);
2106 mutex_unlock(&dqopt->dqonoff_mutex); 2107 mutex_unlock(&dqopt->dqonoff_mutex);
2107out_fmt: 2108out_fmt:
2108 put_quota_format(fmt); 2109 put_quota_format(fmt);
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ebb2c417912c..11f0c06316de 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -20,6 +20,7 @@
20#include <linux/ramfs.h> 20#include <linux/ramfs.h>
21#include <linux/pagevec.h> 21#include <linux/pagevec.h>
22#include <linux/mman.h> 22#include <linux/mman.h>
23#include <linux/sched.h>
23 24
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include "internal.h" 26#include "internal.h"
diff --git a/fs/select.c b/fs/select.c
index d870237e42c7..8084834e123e 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -110,6 +110,7 @@ void poll_initwait(struct poll_wqueues *pwq)
110{ 110{
111 init_poll_funcptr(&pwq->pt, __pollwait); 111 init_poll_funcptr(&pwq->pt, __pollwait);
112 pwq->polling_task = current; 112 pwq->polling_task = current;
113 pwq->triggered = 0;
113 pwq->error = 0; 114 pwq->error = 0;
114 pwq->table = NULL; 115 pwq->table = NULL;
115 pwq->inline_index = 0; 116 pwq->inline_index = 0;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index d88d0fac9fa5..14f2d71ea3ce 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -939,8 +939,10 @@ again:
939 /* Remove from old parent's list and insert into new parent's list. */ 939 /* Remove from old parent's list and insert into new parent's list. */
940 sysfs_unlink_sibling(sd); 940 sysfs_unlink_sibling(sd);
941 sysfs_get(new_parent_sd); 941 sysfs_get(new_parent_sd);
942 drop_nlink(old_parent->d_inode);
942 sysfs_put(sd->s_parent); 943 sysfs_put(sd->s_parent);
943 sd->s_parent = new_parent_sd; 944 sd->s_parent = new_parent_sd;
945 inc_nlink(new_parent->d_inode);
944 sysfs_link_sibling(sd); 946 sysfs_link_sibling(sd);
945 947
946 out_unlock: 948 out_unlock:
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6832135159b6..9d1b8c2e6c45 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1087,11 +1087,23 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1087 struct udf_inode_info *vati; 1087 struct udf_inode_info *vati;
1088 uint32_t pos; 1088 uint32_t pos;
1089 struct virtualAllocationTable20 *vat20; 1089 struct virtualAllocationTable20 *vat20;
1090 sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
1090 1091
1091 /* VAT file entry is in the last recorded block */ 1092 /* VAT file entry is in the last recorded block */
1092 ino.partitionReferenceNum = type1_index; 1093 ino.partitionReferenceNum = type1_index;
1093 ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root; 1094 ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
1094 sbi->s_vat_inode = udf_iget(sb, &ino); 1095 sbi->s_vat_inode = udf_iget(sb, &ino);
1096 if (!sbi->s_vat_inode &&
1097 sbi->s_last_block != blocks - 1) {
1098 printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
1099 " last recorded block (%lu), retrying with the last "
1100 "block of the device (%lu).\n",
1101 (unsigned long)sbi->s_last_block,
1102 (unsigned long)blocks - 1);
1103 ino.partitionReferenceNum = type1_index;
1104 ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
1105 sbi->s_vat_inode = udf_iget(sb, &ino);
1106 }
1095 if (!sbi->s_vat_inode) 1107 if (!sbi->s_vat_inode)
1096 return 1; 1108 return 1;
1097 1109
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7ec89fc05b2b..aecf2519db76 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1268,6 +1268,14 @@ xfs_vm_writepage(
1268 if (!page_has_buffers(page)) 1268 if (!page_has_buffers(page))
1269 create_empty_buffers(page, 1 << inode->i_blkbits, 0); 1269 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1270 1270
1271
1272 /*
1273 * VM calculation for nr_to_write seems off. Bump it way
1274 * up, this gets simple streaming writes zippy again.
1275 * To be reviewed again after Jens' writeback changes.
1276 */
1277 wbc->nr_to_write *= 4;
1278
1271 /* 1279 /*
1272 * Convert delayed allocate, unwritten or unmapped space 1280 * Convert delayed allocate, unwritten or unmapped space
1273 * to real space and flush out to disk. 1281 * to real space and flush out to disk.
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 0c93c7ef3d18..965df1227d64 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -770,7 +770,7 @@ xfs_buf_associate_memory(
770 bp->b_pages = NULL; 770 bp->b_pages = NULL;
771 bp->b_addr = mem; 771 bp->b_addr = mem;
772 772
773 rval = _xfs_buf_get_pages(bp, page_count, 0); 773 rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
774 if (rval) 774 if (rval)
775 return rval; 775 return rval;
776 776
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 58973bb46038..8070b34cc287 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -680,8 +680,8 @@ xfs_vn_fiemap(
680 else 680 else
681 bm.bmv_length = BTOBB(length); 681 bm.bmv_length = BTOBB(length);
682 682
683 /* our formatter will tell xfs_getbmap when to stop. */ 683 /* We add one because in getbmap world count includes the header */
684 bm.bmv_count = MAXEXTNUM; 684 bm.bmv_count = fieinfo->fi_extents_max + 1;
685 bm.bmv_iflags = BMV_IF_PREALLOC; 685 bm.bmv_iflags = BMV_IF_PREALLOC;
686 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) 686 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
687 bm.bmv_iflags |= BMV_IF_ATTRFORK; 687 bm.bmv_iflags |= BMV_IF_ATTRFORK;
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index b619d6b8ca43..98ef624d9baf 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -708,6 +708,16 @@ xfs_reclaim_inode(
708 return 0; 708 return 0;
709} 709}
710 710
711void
712__xfs_inode_set_reclaim_tag(
713 struct xfs_perag *pag,
714 struct xfs_inode *ip)
715{
716 radix_tree_tag_set(&pag->pag_ici_root,
717 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
718 XFS_ICI_RECLAIM_TAG);
719}
720
711/* 721/*
712 * We set the inode flag atomically with the radix tree tag. 722 * We set the inode flag atomically with the radix tree tag.
713 * Once we get tag lookups on the radix tree, this inode flag 723 * Once we get tag lookups on the radix tree, this inode flag
@@ -722,8 +732,7 @@ xfs_inode_set_reclaim_tag(
722 732
723 read_lock(&pag->pag_ici_lock); 733 read_lock(&pag->pag_ici_lock);
724 spin_lock(&ip->i_flags_lock); 734 spin_lock(&ip->i_flags_lock);
725 radix_tree_tag_set(&pag->pag_ici_root, 735 __xfs_inode_set_reclaim_tag(pag, ip);
726 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
727 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 736 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
728 spin_unlock(&ip->i_flags_lock); 737 spin_unlock(&ip->i_flags_lock);
729 read_unlock(&pag->pag_ici_lock); 738 read_unlock(&pag->pag_ici_lock);
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 2a10301c99c7..59120602588a 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -48,6 +48,7 @@ int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
48int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); 48int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
49 49
50void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); 50void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
51void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
51void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip); 52void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip);
52void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, 53void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
53 struct xfs_inode *ip); 54 struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index db15feb906ff..4ece1906bd41 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -2010,7 +2010,9 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
2010 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); 2010 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
2011 blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); 2011 blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
2012 error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, 2012 error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
2013 blkcnt, XFS_BUF_LOCK, &bp); 2013 blkcnt,
2014 XFS_BUF_LOCK | XBF_DONT_BLOCK,
2015 &bp);
2014 if (error) 2016 if (error)
2015 return(error); 2017 return(error);
2016 2018
@@ -2141,8 +2143,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2141 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), 2143 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
2142 blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); 2144 blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
2143 2145
2144 bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, 2146 bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt,
2145 blkcnt, XFS_BUF_LOCK); 2147 XFS_BUF_LOCK | XBF_DONT_BLOCK);
2146 ASSERT(bp); 2148 ASSERT(bp);
2147 ASSERT(!XFS_BUF_GETERROR(bp)); 2149 ASSERT(!XFS_BUF_GETERROR(bp));
2148 2150
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 7928b9983c1d..8ee5b5a76a2a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -6009,7 +6009,7 @@ xfs_getbmap(
6009 */ 6009 */
6010 error = ENOMEM; 6010 error = ENOMEM;
6011 subnex = 16; 6011 subnex = 16;
6012 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL); 6012 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
6013 if (!map) 6013 if (!map)
6014 goto out_unlock_ilock; 6014 goto out_unlock_ilock;
6015 6015
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index e9df99574829..26717388acf5 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -120,8 +120,8 @@ xfs_btree_check_sblock(
120 XFS_RANDOM_BTREE_CHECK_SBLOCK))) { 120 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
121 if (bp) 121 if (bp)
122 xfs_buftrace("SBTREE ERROR", bp); 122 xfs_buftrace("SBTREE ERROR", bp);
123 XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW, 123 XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
124 cur->bc_mp); 124 XFS_ERRLEVEL_LOW, cur->bc_mp, block);
125 return XFS_ERROR(EFSCORRUPTED); 125 return XFS_ERROR(EFSCORRUPTED);
126 } 126 }
127 return 0; 127 return 0;
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 9ff6e57a5075..2847bbc1c534 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -2201,7 +2201,7 @@ kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2201xfs_da_state_t * 2201xfs_da_state_t *
2202xfs_da_state_alloc(void) 2202xfs_da_state_alloc(void)
2203{ 2203{
2204 return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP); 2204 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2205} 2205}
2206 2206
2207/* 2207/*
@@ -2261,9 +2261,9 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
2261 int off; 2261 int off;
2262 2262
2263 if (nbuf == 1) 2263 if (nbuf == 1)
2264 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP); 2264 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2265 else 2265 else
2266 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP); 2266 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2267 dabuf->dirty = 0; 2267 dabuf->dirty = 0;
2268#ifdef XFS_DABUF_DEBUG 2268#ifdef XFS_DABUF_DEBUG
2269 dabuf->ra = ra; 2269 dabuf->ra = ra;
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index c657bec6d951..bb1d58eb3982 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -256,7 +256,7 @@ xfs_dir_cilookup_result(
256 !(args->op_flags & XFS_DA_OP_CILOOKUP)) 256 !(args->op_flags & XFS_DA_OP_CILOOKUP))
257 return EEXIST; 257 return EEXIST;
258 258
259 args->value = kmem_alloc(len, KM_MAYFAIL); 259 args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL);
260 if (!args->value) 260 if (!args->value)
261 return ENOMEM; 261 return ENOMEM;
262 262
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index cbd451bb4848..2d0b3e1da9e6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -167,17 +167,25 @@ xfs_growfs_data_private(
167 new = nb - mp->m_sb.sb_dblocks; 167 new = nb - mp->m_sb.sb_dblocks;
168 oagcount = mp->m_sb.sb_agcount; 168 oagcount = mp->m_sb.sb_agcount;
169 if (nagcount > oagcount) { 169 if (nagcount > oagcount) {
170 void *new_perag, *old_perag;
171
170 xfs_filestream_flush(mp); 172 xfs_filestream_flush(mp);
173
174 new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount,
175 KM_MAYFAIL);
176 if (!new_perag)
177 return XFS_ERROR(ENOMEM);
178
171 down_write(&mp->m_peraglock); 179 down_write(&mp->m_peraglock);
172 mp->m_perag = kmem_realloc(mp->m_perag, 180 memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount);
173 sizeof(xfs_perag_t) * nagcount, 181 old_perag = mp->m_perag;
174 sizeof(xfs_perag_t) * oagcount, 182 mp->m_perag = new_perag;
175 KM_SLEEP); 183
176 memset(&mp->m_perag[oagcount], 0,
177 (nagcount - oagcount) * sizeof(xfs_perag_t));
178 mp->m_flags |= XFS_MOUNT_32BITINODES; 184 mp->m_flags |= XFS_MOUNT_32BITINODES;
179 nagimax = xfs_initialize_perag(mp, nagcount); 185 nagimax = xfs_initialize_perag(mp, nagcount);
180 up_write(&mp->m_peraglock); 186 up_write(&mp->m_peraglock);
187
188 kmem_free(old_perag);
181 } 189 }
182 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); 190 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
183 tp->t_flags |= XFS_TRANS_RESERVE; 191 tp->t_flags |= XFS_TRANS_RESERVE;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 5fcec6f020a7..ecbf8b4d2e2e 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -64,6 +64,10 @@ xfs_inode_alloc(
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
65 if (!ip) 65 if (!ip)
66 return NULL; 66 return NULL;
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
69 return NULL;
70 }
67 71
68 ASSERT(atomic_read(&ip->i_iocount) == 0); 72 ASSERT(atomic_read(&ip->i_iocount) == 0);
69 ASSERT(atomic_read(&ip->i_pincount) == 0); 73 ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -105,17 +109,6 @@ xfs_inode_alloc(
105#ifdef XFS_DIR2_TRACE 109#ifdef XFS_DIR2_TRACE
106 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); 110 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
107#endif 111#endif
108 /*
109 * Now initialise the VFS inode. We do this after the xfs_inode
110 * initialisation as internal failures will result in ->destroy_inode
111 * being called and that will pass down through the reclaim path and
112 * free the XFS inode. This path requires the XFS inode to already be
113 * initialised. Hence if this call fails, the xfs_inode has already
114 * been freed and we should not reference it at all in the error
115 * handling.
116 */
117 if (!inode_init_always(mp->m_super, VFS_I(ip)))
118 return NULL;
119 112
120 /* prevent anyone from using this yet */ 113 /* prevent anyone from using this yet */
121 VFS_I(ip)->i_state = I_NEW|I_LOCK; 114 VFS_I(ip)->i_state = I_NEW|I_LOCK;
@@ -123,6 +116,71 @@ xfs_inode_alloc(
123 return ip; 116 return ip;
124} 117}
125 118
119STATIC void
120xfs_inode_free(
121 struct xfs_inode *ip)
122{
123 switch (ip->i_d.di_mode & S_IFMT) {
124 case S_IFREG:
125 case S_IFDIR:
126 case S_IFLNK:
127 xfs_idestroy_fork(ip, XFS_DATA_FORK);
128 break;
129 }
130
131 if (ip->i_afp)
132 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
133
134#ifdef XFS_INODE_TRACE
135 ktrace_free(ip->i_trace);
136#endif
137#ifdef XFS_BMAP_TRACE
138 ktrace_free(ip->i_xtrace);
139#endif
140#ifdef XFS_BTREE_TRACE
141 ktrace_free(ip->i_btrace);
142#endif
143#ifdef XFS_RW_TRACE
144 ktrace_free(ip->i_rwtrace);
145#endif
146#ifdef XFS_ILOCK_TRACE
147 ktrace_free(ip->i_lock_trace);
148#endif
149#ifdef XFS_DIR2_TRACE
150 ktrace_free(ip->i_dir_trace);
151#endif
152
153 if (ip->i_itemp) {
154 /*
155 * Only if we are shutting down the fs will we see an
156 * inode still in the AIL. If it is there, we should remove
157 * it to prevent a use-after-free from occurring.
158 */
159 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
160 struct xfs_ail *ailp = lip->li_ailp;
161
162 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
163 XFS_FORCED_SHUTDOWN(ip->i_mount));
164 if (lip->li_flags & XFS_LI_IN_AIL) {
165 spin_lock(&ailp->xa_lock);
166 if (lip->li_flags & XFS_LI_IN_AIL)
167 xfs_trans_ail_delete(ailp, lip);
168 else
169 spin_unlock(&ailp->xa_lock);
170 }
171 xfs_inode_item_destroy(ip);
172 ip->i_itemp = NULL;
173 }
174
175 /* asserts to verify all state is correct here */
176 ASSERT(atomic_read(&ip->i_iocount) == 0);
177 ASSERT(atomic_read(&ip->i_pincount) == 0);
178 ASSERT(!spin_is_locked(&ip->i_flags_lock));
179 ASSERT(completion_done(&ip->i_flush));
180
181 kmem_zone_free(xfs_inode_zone, ip);
182}
183
126/* 184/*
127 * Check the validity of the inode we just found it the cache 185 * Check the validity of the inode we just found it the cache
128 */ 186 */
@@ -133,80 +191,82 @@ xfs_iget_cache_hit(
133 int flags, 191 int flags,
134 int lock_flags) __releases(pag->pag_ici_lock) 192 int lock_flags) __releases(pag->pag_ici_lock)
135{ 193{
194 struct inode *inode = VFS_I(ip);
136 struct xfs_mount *mp = ip->i_mount; 195 struct xfs_mount *mp = ip->i_mount;
137 int error = EAGAIN; 196 int error;
197
198 spin_lock(&ip->i_flags_lock);
138 199
139 /* 200 /*
140 * If INEW is set this inode is being set up 201 * If we are racing with another cache hit that is currently
141 * If IRECLAIM is set this inode is being torn down 202 * instantiating this inode or currently recycling it out of
142 * Pause and try again. 203 * reclaimabe state, wait for the initialisation to complete
204 * before continuing.
205 *
206 * XXX(hch): eventually we should do something equivalent to
207 * wait_on_inode to wait for these flags to be cleared
208 * instead of polling for it.
143 */ 209 */
144 if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) { 210 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
145 XFS_STATS_INC(xs_ig_frecycle); 211 XFS_STATS_INC(xs_ig_frecycle);
212 error = EAGAIN;
146 goto out_error; 213 goto out_error;
147 } 214 }
148 215
149 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */ 216 /*
150 if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { 217 * If lookup is racing with unlink return an error immediately.
151 218 */
152 /* 219 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
153 * If lookup is racing with unlink, then we should return an 220 error = ENOENT;
154 * error immediately so we don't remove it from the reclaim 221 goto out_error;
155 * list and potentially leak the inode. 222 }
156 */
157 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
158 error = ENOENT;
159 goto out_error;
160 }
161 223
224 /*
225 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
226 * Need to carefully get it back into useable state.
227 */
228 if (ip->i_flags & XFS_IRECLAIMABLE) {
162 xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 229 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
163 230
164 /* 231 /*
165 * We need to re-initialise the VFS inode as it has been 232 * We need to set XFS_INEW atomically with clearing the
166 * 'freed' by the VFS. Do this here so we can deal with 233 * reclaimable tag so that we do have an indicator of the
167 * errors cleanly, then tag it so it can be set up correctly 234 * inode still being initialized.
168 * later.
169 */ 235 */
170 if (!inode_init_always(mp->m_super, VFS_I(ip))) { 236 ip->i_flags |= XFS_INEW;
171 error = ENOMEM; 237 ip->i_flags &= ~XFS_IRECLAIMABLE;
172 goto out_error; 238 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
173 }
174 239
175 /* 240 spin_unlock(&ip->i_flags_lock);
176 * We must set the XFS_INEW flag before clearing the 241 read_unlock(&pag->pag_ici_lock);
177 * XFS_IRECLAIMABLE flag so that if a racing lookup does
178 * not find the XFS_IRECLAIMABLE above but has the igrab()
179 * below succeed we can safely check XFS_INEW to detect
180 * that this inode is still being initialised.
181 */
182 xfs_iflags_set(ip, XFS_INEW);
183 xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
184 242
185 /* clear the radix tree reclaim flag as well. */ 243 error = -inode_init_always(mp->m_super, inode);
186 __xfs_inode_clear_reclaim_tag(mp, pag, ip); 244 if (error) {
187 } else if (!igrab(VFS_I(ip))) { 245 /*
246 * Re-initializing the inode failed, and we are in deep
247 * trouble. Try to re-add it to the reclaim list.
248 */
249 read_lock(&pag->pag_ici_lock);
250 spin_lock(&ip->i_flags_lock);
251
252 ip->i_flags &= ~XFS_INEW;
253 ip->i_flags |= XFS_IRECLAIMABLE;
254 __xfs_inode_set_reclaim_tag(pag, ip);
255 goto out_error;
256 }
257 inode->i_state = I_LOCK|I_NEW;
258 } else {
188 /* If the VFS inode is being torn down, pause and try again. */ 259 /* If the VFS inode is being torn down, pause and try again. */
189 XFS_STATS_INC(xs_ig_frecycle); 260 if (!igrab(inode)) {
190 goto out_error; 261 error = EAGAIN;
191 } else if (xfs_iflags_test(ip, XFS_INEW)) { 262 goto out_error;
192 /* 263 }
193 * We are racing with another cache hit that is
194 * currently recycling this inode out of the XFS_IRECLAIMABLE
195 * state. Wait for the initialisation to complete before
196 * continuing.
197 */
198 wait_on_inode(VFS_I(ip));
199 }
200 264
201 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { 265 /* We've got a live one. */
202 error = ENOENT; 266 spin_unlock(&ip->i_flags_lock);
203 iput(VFS_I(ip)); 267 read_unlock(&pag->pag_ici_lock);
204 goto out_error;
205 } 268 }
206 269
207 /* We've got a live one. */
208 read_unlock(&pag->pag_ici_lock);
209
210 if (lock_flags != 0) 270 if (lock_flags != 0)
211 xfs_ilock(ip, lock_flags); 271 xfs_ilock(ip, lock_flags);
212 272
@@ -216,6 +276,7 @@ xfs_iget_cache_hit(
216 return 0; 276 return 0;
217 277
218out_error: 278out_error:
279 spin_unlock(&ip->i_flags_lock);
219 read_unlock(&pag->pag_ici_lock); 280 read_unlock(&pag->pag_ici_lock);
220 return error; 281 return error;
221} 282}
@@ -299,7 +360,8 @@ out_preload_end:
299 if (lock_flags) 360 if (lock_flags)
300 xfs_iunlock(ip, lock_flags); 361 xfs_iunlock(ip, lock_flags);
301out_destroy: 362out_destroy:
302 xfs_destroy_inode(ip); 363 __destroy_inode(VFS_I(ip));
364 xfs_inode_free(ip);
303 return error; 365 return error;
304} 366}
305 367
@@ -504,62 +566,7 @@ xfs_ireclaim(
504 xfs_qm_dqdetach(ip); 566 xfs_qm_dqdetach(ip);
505 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 567 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
506 568
507 switch (ip->i_d.di_mode & S_IFMT) { 569 xfs_inode_free(ip);
508 case S_IFREG:
509 case S_IFDIR:
510 case S_IFLNK:
511 xfs_idestroy_fork(ip, XFS_DATA_FORK);
512 break;
513 }
514
515 if (ip->i_afp)
516 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
517
518#ifdef XFS_INODE_TRACE
519 ktrace_free(ip->i_trace);
520#endif
521#ifdef XFS_BMAP_TRACE
522 ktrace_free(ip->i_xtrace);
523#endif
524#ifdef XFS_BTREE_TRACE
525 ktrace_free(ip->i_btrace);
526#endif
527#ifdef XFS_RW_TRACE
528 ktrace_free(ip->i_rwtrace);
529#endif
530#ifdef XFS_ILOCK_TRACE
531 ktrace_free(ip->i_lock_trace);
532#endif
533#ifdef XFS_DIR2_TRACE
534 ktrace_free(ip->i_dir_trace);
535#endif
536 if (ip->i_itemp) {
537 /*
538 * Only if we are shutting down the fs will we see an
539 * inode still in the AIL. If it is there, we should remove
540 * it to prevent a use-after-free from occurring.
541 */
542 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
543 struct xfs_ail *ailp = lip->li_ailp;
544
545 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
546 XFS_FORCED_SHUTDOWN(ip->i_mount));
547 if (lip->li_flags & XFS_LI_IN_AIL) {
548 spin_lock(&ailp->xa_lock);
549 if (lip->li_flags & XFS_LI_IN_AIL)
550 xfs_trans_ail_delete(ailp, lip);
551 else
552 spin_unlock(&ailp->xa_lock);
553 }
554 xfs_inode_item_destroy(ip);
555 ip->i_itemp = NULL;
556 }
557 /* asserts to verify all state is correct here */
558 ASSERT(atomic_read(&ip->i_iocount) == 0);
559 ASSERT(atomic_read(&ip->i_pincount) == 0);
560 ASSERT(!spin_is_locked(&ip->i_flags_lock));
561 ASSERT(completion_done(&ip->i_flush));
562 kmem_zone_free(xfs_inode_zone, ip);
563} 570}
564 571
565/* 572/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1f22d65fed0a..da428b3fe0f5 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -343,6 +343,16 @@ xfs_iformat(
343 return XFS_ERROR(EFSCORRUPTED); 343 return XFS_ERROR(EFSCORRUPTED);
344 } 344 }
345 345
346 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
347 !ip->i_mount->m_rtdev_targp)) {
348 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
349 "corrupt dinode %Lu, has realtime flag set.",
350 ip->i_ino);
351 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
352 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
353 return XFS_ERROR(EFSCORRUPTED);
354 }
355
346 switch (ip->i_d.di_mode & S_IFMT) { 356 switch (ip->i_d.di_mode & S_IFMT) {
347 case S_IFIFO: 357 case S_IFIFO:
348 case S_IFCHR: 358 case S_IFCHR:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1804f866a71d..65f24a3cc992 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -310,23 +310,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
310} 310}
311 311
312/* 312/*
313 * Get rid of a partially initialized inode.
314 *
315 * We have to go through destroy_inode to make sure allocations
316 * from init_inode_always like the security data are undone.
317 *
318 * We mark the inode bad so that it takes the short cut in
319 * the reclaim path instead of going through the flush path
320 * which doesn't make sense for an inode that has never seen the
321 * light of day.
322 */
323static inline void xfs_destroy_inode(struct xfs_inode *ip)
324{
325 make_bad_inode(VFS_I(ip));
326 return destroy_inode(VFS_I(ip));
327}
328
329/*
330 * i_flags helper functions 313 * i_flags helper functions
331 */ 314 */
332static inline void 315static inline void
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 3750f04ede0b..9dbdff3ea484 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3180,7 +3180,7 @@ try_again:
3180STATIC void 3180STATIC void
3181xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) 3181xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3182{ 3182{
3183 ASSERT(spin_is_locked(&log->l_icloglock)); 3183 assert_spin_locked(&log->l_icloglock);
3184 3184
3185 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3185 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3186 xlog_state_switch_iclogs(log, iclog, 0); 3186 xlog_state_switch_iclogs(log, iclog, 0);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index c4eca5ed5dab..492d75bae2bf 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -538,7 +538,9 @@ xfs_readlink_bmap(
538 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); 538 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
539 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); 539 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
540 540
541 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0); 541 bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt),
542 XBF_LOCK | XBF_MAPPED |
543 XBF_DONT_BLOCK);
542 error = XFS_BUF_GETERROR(bp); 544 error = XFS_BUF_GETERROR(bp);
543 if (error) { 545 if (error) {
544 xfs_ioerror_alert("xfs_readlink", 546 xfs_ioerror_alert("xfs_readlink",